@gammatech/aijsx 0.1.0-beta.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE.md +21 -0
- package/README.md +224 -0
- package/dist/createElement-Q_LxUYf8.d.ts +159 -0
- package/dist/index.d.ts +72 -0
- package/dist/index.js +859 -0
- package/dist/jsx-runtime.d.ts +29 -0
- package/dist/jsx-runtime.js +63 -0
- package/package.json +75 -0
package/LICENSE.md
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2023 Fixie.ai
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,224 @@
|
|
|
1
|
+
# AIJSX
|
|
2
|
+
|
|
3
|
+
> This is forked from [@fixie-ai/ai-jsx](https://github.com/fixie-ai/ai-jsx)
|
|
4
|
+
|
|
5
|
+
**Reasons for forking**
|
|
6
|
+
|
|
7
|
+
- Streamlined Library: We found that the original library, [@fixie-ai/ai-jsx], came with an abundance of tools, integrations, models, and features that surpassed our project's requirements. Our fork aims to provide a more focused and simplified version tailored to our needs.
|
|
8
|
+
|
|
9
|
+
- Performance Optimization: The production instance of ai-jsx exhibited performance issues, consuming approximately 30MB of head memory per stream and displaying slow garbage collection. This behavior led to server restarts. Our fork addresses these concerns, optimizing performance and memory usage.
|
|
10
|
+
|
|
11
|
+
- Tailored Functionality: We identified several features in the original library that were surplus to our project requirements. By eliminating unnecessary functionalities, we aim to create a leaner and faster library.
|
|
12
|
+
|
|
13
|
+
### What is AIJSX?
|
|
14
|
+
|
|
15
|
+
AIJSX is a framework/toolkit designed to facilitate the construction of Large Language Model (LLM) prompts in a composable and ergonomic manner by leveraging JSX.
|
|
16
|
+
|
|
17
|
+
Example:
|
|
18
|
+
|
|
19
|
+
```ts
|
|
20
|
+
import {
|
|
21
|
+
OpenAIChatCompletion,
|
|
22
|
+
SystemMessage,
|
|
23
|
+
UserMessage,
|
|
24
|
+
createRenderContext,
|
|
25
|
+
} from '@gammatech/aijsx'
|
|
26
|
+
|
|
27
|
+
function JokePrompt(props: { input: string }) {
|
|
28
|
+
return (
|
|
29
|
+
<OpenAIChatCompletion>
|
|
30
|
+
<SysytemMessage>You are funny</SysytemMessage>
|
|
31
|
+
<UserMessage>Tell me a joke about: {props.input}</UserMessage>
|
|
32
|
+
</OpenAIChatCompletion>
|
|
33
|
+
)
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
const result = await createRenderContext().render(
|
|
37
|
+
<JokePrompt input="bananas" />
|
|
38
|
+
)
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
### Why JSX?
|
|
42
|
+
|
|
43
|
+
- JSX facilitates the mixing of string interpolation and control flow logic in a type-safe and familiar manner.
|
|
44
|
+
|
|
45
|
+
- JSX excels in handling composition, allowing the chaining of prompt results to the input of another.
|
|
46
|
+
|
|
47
|
+
- JSX enjoys widespread support across various build systems and does not necessitate a one-off compilation for string-based templating.
|
|
48
|
+
|
|
49
|
+
It's important to note that using JSX is distinct from using React. JSX is compile-time sugar that converts a component into a function call. In contrast, React + ReactDOM is a framework responsible for synchronizing the state of a virtual representation of the DOM with an actual browser DOM. AIJSX leverages JSX more akin to using React.renderToString.
|
|
50
|
+
|
|
51
|
+
### How AIJSX Works to Build Prompts
|
|
52
|
+
|
|
53
|
+
In its simplest form, AIJSX transforms a tree of JSX components into a string. This resulting string can serve as input to an LLM call or be returned via API, offering flexibility in usage. Unlike React, where components may need to respond to updates and state changes, AIJSX follows a render-once-and-done approach.
|
|
54
|
+
|
|
55
|
+
### Differences between AIJSX and React
|
|
56
|
+
|
|
57
|
+
**React's Component Type Signature**
|
|
58
|
+
|
|
59
|
+
```ts
|
|
60
|
+
type Component = (props: P) => React.Element
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
**AIJSX Component Type Signature**
|
|
64
|
+
|
|
65
|
+
```ts
|
|
66
|
+
export interface RenderableStream {
|
|
67
|
+
[Symbol.asyncIterator]: () => AsyncGenerator<string, void, unknown>
|
|
68
|
+
}
|
|
69
|
+
type AINode = Literal | AIElement<any> | AINode[]
|
|
70
|
+
type Renderable = AINode | PromiseLike<Renderable> | RenderableStream
|
|
71
|
+
|
|
72
|
+
type AIComponent = (props: P, context: RenderContext) => Renderable
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
AIComponents in AIJSX can return two additional types: `PromiseLike<Renderable>` and `RenderableStream`. This distinction fundamentally changes the rendering paradigm in AIJSX, introducing asynchronous rendering that can return either a `Promise` or an object with access to an `AsyncGenerator` (`RenderableStream`).
|
|
76
|
+
|
|
77
|
+
The asynchronous render abstraction is powerful, allowing chaining of results from asynchronous operations (such as an LLM API call) to the input of another component.
|
|
78
|
+
|
|
79
|
+
Example:
|
|
80
|
+
|
|
81
|
+
```ts
|
|
82
|
+
const GetLang: AIComponent<{ children: AINode }> = (
|
|
83
|
+
{ children },
|
|
84
|
+
{ render }
|
|
85
|
+
) => {
|
|
86
|
+
const renderedChildren = await render(children)
|
|
87
|
+
const language = await determineLanguage(renderedChildren)
|
|
88
|
+
|
|
89
|
+
return language
|
|
90
|
+
}
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
You'll notice several differences from normal React components. The second argument to `AIComponent` is the `RenderContext` object, which has a `render` method, allowing us to resolve the value of this components children and use them as inputs to an API call.
|
|
94
|
+
|
|
95
|
+
Because `GetLang` returns `Promise<string>` which matches the signature of a `Renderable` in AIJSX we can use it as an input in another component
|
|
96
|
+
|
|
97
|
+
```ts
|
|
98
|
+
import {
|
|
99
|
+
OpenAIChatCompletion,
|
|
100
|
+
SystemMessage,
|
|
101
|
+
UserMessage,
|
|
102
|
+
createRenderContext,
|
|
103
|
+
} from '@gammatech/aijsx'
|
|
104
|
+
|
|
105
|
+
const WritePoem: AIComponent<{ input: string }> = (
|
|
106
|
+
{ sentence },
|
|
107
|
+
{ logger }
|
|
108
|
+
) => {
|
|
109
|
+
return (
|
|
110
|
+
<OpenAIChatCompletion>
|
|
111
|
+
<SysytemMessage>You are a world class poet</SysytemMessage>
|
|
112
|
+
<UserMessage>
|
|
113
|
+
Write a poem about "{sentence}" in <GetLang>{sentence}</GetLang>
|
|
114
|
+
</UserMessage>
|
|
115
|
+
</OpenAIChatCompletion>
|
|
116
|
+
)
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
const result = createRenderContext.render(<WritePoem input="公共車" />)
|
|
120
|
+
```
|
|
121
|
+
|
|
122
|
+
The above code will first render all of the elements under `<OpenAIChatCompletion>`
|
|
123
|
+
|
|
124
|
+
```ts
|
|
125
|
+
<OpenAIChatCompletion>
|
|
126
|
+
<SysytemMessage>You are a world class poet</SysytemMessage>
|
|
127
|
+
<UserMessage>
|
|
128
|
+
Write a poem about "公共車" in <GetLang>公共車</GetLang>
|
|
129
|
+
</UserMessage>
|
|
130
|
+
</OpenAIChatCompletion>
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
```ts
|
|
134
|
+
<OpenAIChatCompletion>
|
|
135
|
+
<SysytemMessage>You are a world class poet</SysytemMessage>
|
|
136
|
+
<UserMessage>Write a poem about "公共車" in Simplified Chinese</UserMessage>
|
|
137
|
+
</OpenAIChatCompletion>
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
# Recipes
|
|
141
|
+
|
|
142
|
+
## Logging
|
|
143
|
+
|
|
144
|
+
`createRenderContext` can take a `logger: LogImplementation` option. This is used as the base logger when the `RenderContext` traverses the component tree and passes `Logger` to each component.
|
|
145
|
+
|
|
146
|
+
At the very least, extenders of `LogImplementation` must implement:
|
|
147
|
+
|
|
148
|
+
```ts
|
|
149
|
+
abstract log(ctx: RenderContext, level: LogLevel, message: string): void
|
|
150
|
+
```
|
|
151
|
+
|
|
152
|
+
## Creating Components
|
|
153
|
+
|
|
154
|
+
**Basic synchronous components**
|
|
155
|
+
|
|
156
|
+
```tsx
|
|
157
|
+
const Text: AIComponent<{ children: AINode }> = ({ children }) => {
|
|
158
|
+
return (
|
|
159
|
+
<>
|
|
160
|
+
{children}
|
|
161
|
+
{'\n'}
|
|
162
|
+
</>
|
|
163
|
+
)
|
|
164
|
+
}
|
|
165
|
+
```
|
|
166
|
+
|
|
167
|
+
**Creating Async Components**
|
|
168
|
+
|
|
169
|
+
```tsx
|
|
170
|
+
const AsyncComponent: AIComponent<{ input: string }> = async ({ input }) => {
|
|
171
|
+
const result = await someApICall(input)
|
|
172
|
+
return <>result: {result}</>
|
|
173
|
+
}
|
|
174
|
+
```
|
|
175
|
+
|
|
176
|
+
**Creating AsyncGenerator Components**
|
|
177
|
+
|
|
178
|
+
```tsx
|
|
179
|
+
async function* GeneratorComponent(props: { input: string }) {
|
|
180
|
+
yield 'this '
|
|
181
|
+
yield 'is '
|
|
182
|
+
yield 'a '
|
|
183
|
+
yield 'test'
|
|
184
|
+
|
|
185
|
+
// NOTE: AsyncGenerator component in AIJSX do not return values, and it's assumed that
|
|
186
|
+
// there yielded results should be string concattenated together
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
async function* GeneratorComponent2(props: { input: string }) {
|
|
190
|
+
return yield* someAsyncGeneratingFunction(input)
|
|
191
|
+
}
|
|
192
|
+
```
|
|
193
|
+
|
|
194
|
+
# API Reference
|
|
195
|
+
|
|
196
|
+
### Top Level Methods
|
|
197
|
+
|
|
198
|
+
`createRenderContext` - creates a render context used to render a
|
|
199
|
+
|
|
200
|
+
```ts
|
|
201
|
+
export function createRenderContext(opts: {
|
|
202
|
+
logger?: LogImplementation
|
|
203
|
+
rootRenderId?: string
|
|
204
|
+
}): RenderContext
|
|
205
|
+
|
|
206
|
+
export interface RenderContext {
|
|
207
|
+
parentContext: RenderContext | null
|
|
208
|
+
|
|
209
|
+
element: AIElement<any>
|
|
210
|
+
|
|
211
|
+
renderId: string
|
|
212
|
+
|
|
213
|
+
logger: Logger
|
|
214
|
+
|
|
215
|
+
getContext<T>(context: Context<T>): T
|
|
216
|
+
|
|
217
|
+
render(renderable: Renderable): RenderResult
|
|
218
|
+
}
|
|
219
|
+
```
|
|
220
|
+
|
|
221
|
+
**args**
|
|
222
|
+
|
|
223
|
+
- `logger?: LogImplementation` - Defualts to `NoopLogImplementation`
|
|
224
|
+
- `rootRenderId?: string` - allows setting the root RenderContext's renderId, auto-generates a nanoid if left undefined
|
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
type Literal = string | number | null | undefined | boolean;
|
|
2
|
+
interface RenderableStream {
|
|
3
|
+
[Symbol.asyncIterator]: () => AsyncGenerator<string, void, unknown>;
|
|
4
|
+
}
|
|
5
|
+
interface RenderResult extends RenderableStream {
|
|
6
|
+
then: (onResolved: (value: string) => void, onRejected?: (reason?: any) => void) => void;
|
|
7
|
+
}
|
|
8
|
+
interface Context<T> {
|
|
9
|
+
Provider: AIComponent<{
|
|
10
|
+
children: AINode;
|
|
11
|
+
value: T;
|
|
12
|
+
}>;
|
|
13
|
+
defaultValue: T;
|
|
14
|
+
key: symbol;
|
|
15
|
+
}
|
|
16
|
+
type AIComponent<P> = (props: P, context: RenderContext) => Renderable;
|
|
17
|
+
declare const attachedContextSymbol: unique symbol;
|
|
18
|
+
interface AIElement<P> {
|
|
19
|
+
/** The tag associated with this {@link AIElement}. */
|
|
20
|
+
tag: AIComponent<P>;
|
|
21
|
+
/** The component properties. */
|
|
22
|
+
props: P;
|
|
23
|
+
/** A function that renders this {@link AIElement} to a {@link Renderable}. */
|
|
24
|
+
render: (ctx: RenderContext) => Renderable;
|
|
25
|
+
/** The {@link RenderContext} associated with this {@link Element}. */
|
|
26
|
+
[attachedContextSymbol]?: Record<symbol, any>;
|
|
27
|
+
}
|
|
28
|
+
type AINode = Literal | AIElement<any> | AINode[];
|
|
29
|
+
type Renderable = AINode | PromiseLike<Renderable> | RenderableStream;
|
|
30
|
+
type PropsOfAIComponent<T extends AIComponent<any>> = T extends AIComponent<infer P> ? P : never;
|
|
31
|
+
|
|
32
|
+
declare const LoggerContext: Context<LogImplementation>;
|
|
33
|
+
interface RenderContext {
|
|
34
|
+
parentContext: RenderContext | null;
|
|
35
|
+
element: AIElement<any>;
|
|
36
|
+
renderId: string;
|
|
37
|
+
logger: Logger;
|
|
38
|
+
getContext<T>(context: Context<T>): T;
|
|
39
|
+
render(renderable: Renderable): RenderResult;
|
|
40
|
+
}
|
|
41
|
+
declare function createContext<T>(defaultValue: T): Context<T>;
|
|
42
|
+
|
|
43
|
+
/**
|
|
44
|
+
* This can be extended using declare module to add additional providers.
|
|
45
|
+
*/
|
|
46
|
+
interface ChatCompletionRequestPayloads {
|
|
47
|
+
}
|
|
48
|
+
interface LogChatCompletionRequest<R extends Record<string, any> = ChatCompletionRequestPayloads[keyof ChatCompletionRequestPayloads]> {
|
|
49
|
+
startTime: number;
|
|
50
|
+
model: string;
|
|
51
|
+
providerRegion?: string;
|
|
52
|
+
provider?: string;
|
|
53
|
+
inputMessages: RenderedConversationMessage[];
|
|
54
|
+
request: R;
|
|
55
|
+
}
|
|
56
|
+
interface LogChatCompletionResponse<R extends Record<string, any> = ChatCompletionRequestPayloads[keyof ChatCompletionRequestPayloads]> extends LogChatCompletionRequest<R> {
|
|
57
|
+
latency: number;
|
|
58
|
+
outputMessage: RenderedConversationMessage;
|
|
59
|
+
finishReason: string;
|
|
60
|
+
tokensUsed: {
|
|
61
|
+
prompt: number;
|
|
62
|
+
completion: number;
|
|
63
|
+
total: number;
|
|
64
|
+
};
|
|
65
|
+
}
|
|
66
|
+
type LogLevel = 'error' | 'warn' | 'info' | 'debug';
|
|
67
|
+
type Loggable = string | number | boolean | undefined | null | object;
|
|
68
|
+
type Logger = {
|
|
69
|
+
error: (...msg: Loggable[]) => void;
|
|
70
|
+
warn: (...msg: Loggable[]) => void;
|
|
71
|
+
info: (...msg: Loggable[]) => void;
|
|
72
|
+
debug: (...msg: Loggable[]) => void;
|
|
73
|
+
logException: (exception: unknown) => void;
|
|
74
|
+
chatCompletionRequest: <K extends keyof ChatCompletionRequestPayloads>(provider: K, payload: LogChatCompletionRequest<ChatCompletionRequestPayloads[K]>) => void;
|
|
75
|
+
chatCompletionResponse: <K extends keyof ChatCompletionRequestPayloads>(provider: K, payload: LogChatCompletionResponse<ChatCompletionRequestPayloads[K]>) => void;
|
|
76
|
+
};
|
|
77
|
+
declare abstract class LogImplementation {
|
|
78
|
+
protected readonly loggedExceptions: WeakMap<object, boolean>;
|
|
79
|
+
/**
|
|
80
|
+
* @param ctx The current RenderContext
|
|
81
|
+
* @param level The log level, e.g. 'error', 'warn', 'info', 'debug'
|
|
82
|
+
* @param message
|
|
83
|
+
*/
|
|
84
|
+
abstract log(ctx: RenderContext, level: LogLevel, message: string): void;
|
|
85
|
+
/**
|
|
86
|
+
* Logs exceptions thrown during an element's render.
|
|
87
|
+
*/
|
|
88
|
+
logException(ctx: RenderContext, exception: unknown): void;
|
|
89
|
+
chatCompletionRequest<K extends keyof ChatCompletionRequestPayloads>(_ctx: RenderContext, _provider: K, _payload: LogChatCompletionRequest<ChatCompletionRequestPayloads[K]>): void;
|
|
90
|
+
chatCompletionResponse<K extends keyof ChatCompletionRequestPayloads>(_ctx: RenderContext, _provider: K, _payload: LogChatCompletionResponse<ChatCompletionRequestPayloads[K]>): void;
|
|
91
|
+
}
|
|
92
|
+
declare class BoundLogger implements Logger {
|
|
93
|
+
private readonly impl;
|
|
94
|
+
private readonly ctx;
|
|
95
|
+
constructor(impl: LogImplementation, ctx: RenderContext);
|
|
96
|
+
private formatMessage;
|
|
97
|
+
error: (...msgs: Loggable[]) => void;
|
|
98
|
+
warn: (...msgs: Loggable[]) => void;
|
|
99
|
+
info: (...msgs: Loggable[]) => void;
|
|
100
|
+
debug: (...msgs: Loggable[]) => void;
|
|
101
|
+
logException: (exception: unknown) => void;
|
|
102
|
+
chatCompletionRequest: <K extends keyof ChatCompletionRequestPayloads>(provider: K, payload: LogChatCompletionRequest<ChatCompletionRequestPayloads[K]>) => void;
|
|
103
|
+
chatCompletionResponse: <K extends keyof ChatCompletionRequestPayloads>(provider: K, payload: LogChatCompletionResponse<ChatCompletionRequestPayloads[K]>) => void;
|
|
104
|
+
}
|
|
105
|
+
declare class NoopLogImplementation extends LogImplementation {
|
|
106
|
+
log(_ctx: RenderContext, _level: LogLevel, _message: string): void;
|
|
107
|
+
}
|
|
108
|
+
declare class ConsoleLogger extends LogImplementation {
|
|
109
|
+
log(ctx: RenderContext, level: LogLevel, message: string): void;
|
|
110
|
+
}
|
|
111
|
+
declare class CombinedLogger extends LogImplementation {
|
|
112
|
+
private readonly loggers;
|
|
113
|
+
constructor(loggers: LogImplementation[]);
|
|
114
|
+
log(...args: Parameters<LogImplementation['log']>): void;
|
|
115
|
+
chatCompletionRequest<K extends keyof ChatCompletionRequestPayloads>(...args: Parameters<LogImplementation['chatCompletionRequest']>): void;
|
|
116
|
+
chatCompletionResponse<K extends keyof ChatCompletionRequestPayloads>(...args: Parameters<LogImplementation['chatCompletionResponse']>): void;
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
type ChatCompletionRole = 'user' | 'system' | 'assistant';
|
|
120
|
+
declare const SystemMessage: (props: {
|
|
121
|
+
children: AINode;
|
|
122
|
+
}) => AINode;
|
|
123
|
+
declare const UserMessage: (props: {
|
|
124
|
+
children: AINode;
|
|
125
|
+
}) => AINode;
|
|
126
|
+
declare const AssistantMessage: (props: {
|
|
127
|
+
children: AINode;
|
|
128
|
+
}) => AINode;
|
|
129
|
+
interface ConversationMessageType<T extends ChatCompletionRole, C extends AIComponent<any>> {
|
|
130
|
+
type: T;
|
|
131
|
+
element: AIElement<PropsOfAIComponent<C>>;
|
|
132
|
+
}
|
|
133
|
+
type ConversationMessage = ConversationMessageType<'user', typeof UserMessage> | ConversationMessageType<'assistant', typeof AssistantMessage> | ConversationMessageType<'system', typeof SystemMessage>;
|
|
134
|
+
type RenderedConversationMessage = ConversationMessage & {
|
|
135
|
+
content: string;
|
|
136
|
+
tokens: number;
|
|
137
|
+
};
|
|
138
|
+
declare const childrenToConversationMessage: (c: AIElement<any> | AIElement<any>[]) => ConversationMessage[];
|
|
139
|
+
declare const computeUsage: (messages: RenderedConversationMessage[]) => {
|
|
140
|
+
prompt: number;
|
|
141
|
+
completion: number;
|
|
142
|
+
total: number;
|
|
143
|
+
};
|
|
144
|
+
declare class ChatCompletionError extends Error {
|
|
145
|
+
readonly chatCompletionRequest: LogChatCompletionRequest;
|
|
146
|
+
constructor(message: string, chatCompletionRequest: LogChatCompletionRequest);
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
declare function createAIElement<P extends {
|
|
150
|
+
children: C;
|
|
151
|
+
}, C>(tag: AIComponent<P>, props: Omit<P, 'children'> | null, ...children: [C]): AIElement<P>;
|
|
152
|
+
declare function createAIElement<P extends {
|
|
153
|
+
children: C[];
|
|
154
|
+
}, C>(tag: AIComponent<P>, props: Omit<P, 'children'> | null, ...children: C[]): AIElement<P>;
|
|
155
|
+
declare function AIFragment({ children }: {
|
|
156
|
+
children: AINode;
|
|
157
|
+
}): Renderable;
|
|
158
|
+
|
|
159
|
+
export { type AIElement as A, BoundLogger as B, type Context as C, LogImplementation as L, NoopLogImplementation as N, type PropsOfAIComponent as P, type RenderContext as R, SystemMessage as S, UserMessage as U, type RenderedConversationMessage as a, AIFragment as b, createAIElement as c, LoggerContext as d, createContext as e, AssistantMessage as f, type ConversationMessage as g, childrenToConversationMessage as h, computeUsage as i, ChatCompletionError as j, type ChatCompletionRequestPayloads as k, type LogChatCompletionRequest as l, type LogChatCompletionResponse as m, type LogLevel as n, type Logger as o, ConsoleLogger as p, CombinedLogger as q, type Literal as r, type RenderableStream as s, type RenderResult as t, type AIComponent as u, attachedContextSymbol as v, type AINode as w, type Renderable as x };
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
import { L as LogImplementation, R as RenderContext, C as Context, A as AIElement, a as RenderedConversationMessage } from './createElement-Q_LxUYf8.js';
|
|
2
|
+
export { u as AIComponent, b as AIFragment, w as AINode, f as AssistantMessage, B as BoundLogger, j as ChatCompletionError, k as ChatCompletionRequestPayloads, q as CombinedLogger, p as ConsoleLogger, g as ConversationMessage, r as Literal, l as LogChatCompletionRequest, m as LogChatCompletionResponse, n as LogLevel, o as Logger, d as LoggerContext, N as NoopLogImplementation, P as PropsOfAIComponent, t as RenderResult, x as Renderable, s as RenderableStream, S as SystemMessage, U as UserMessage, v as attachedContextSymbol, h as childrenToConversationMessage, i as computeUsage, c as createAIElement, e as createContext } from './createElement-Q_LxUYf8.js';
|
|
3
|
+
import { OpenAI } from 'openai';
|
|
4
|
+
export { OpenAI as OpenAIClient } from 'openai';
|
|
5
|
+
import AnthropicClient from '@anthropic-ai/sdk';
|
|
6
|
+
export { default as AnthropicClient } from '@anthropic-ai/sdk';
|
|
7
|
+
export { countTokens as countAnthropicTokens } from '@anthropic-ai/tokenizer';
|
|
8
|
+
|
|
9
|
+
declare function createRenderContext({ logger, rootRenderId, }?: {
|
|
10
|
+
logger?: LogImplementation;
|
|
11
|
+
rootRenderId?: string;
|
|
12
|
+
}): RenderContext;
|
|
13
|
+
|
|
14
|
+
type OpenAIChatCompletionRequest = OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming;
|
|
15
|
+
declare module '@gammatech/aijsx' {
|
|
16
|
+
interface ChatCompletionRequestPayloads {
|
|
17
|
+
openai: OpenAIChatCompletionRequest;
|
|
18
|
+
}
|
|
19
|
+
}
|
|
20
|
+
type ValidOpenAIChatModel = 'gpt-4' | 'gpt-4-0314' | 'gpt-4-0613' | 'gpt-4-32k' | 'gpt-4-32k-0314' | 'gpt-4-32k-0613' | 'gpt-4-1106-preview' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-0301' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-16k-0613' | 'gpt-3.5-turbo-1106';
|
|
21
|
+
declare const OpenAIClientContext: Context<() => OpenAI>;
|
|
22
|
+
type OpenAIChatCompletionProps = {
|
|
23
|
+
model: ValidOpenAIChatModel;
|
|
24
|
+
maxTokens?: number;
|
|
25
|
+
temperature?: number;
|
|
26
|
+
children: AIElement<any> | AIElement<any>[];
|
|
27
|
+
provider?: string;
|
|
28
|
+
providerRegion?: string;
|
|
29
|
+
};
|
|
30
|
+
declare function OpenAIChatCompletion(props: OpenAIChatCompletionProps, { logger, render, getContext }: RenderContext): AsyncGenerator<string, void, unknown>;
|
|
31
|
+
|
|
32
|
+
declare const tokenizer: {
|
|
33
|
+
encode: (text: string) => number[];
|
|
34
|
+
decode: (tokens: number[]) => string;
|
|
35
|
+
};
|
|
36
|
+
declare function tokenLimitForChatModel(model: ValidOpenAIChatModel): number | undefined;
|
|
37
|
+
declare function tokenCountForConversationMessage(message: Pick<RenderedConversationMessage, 'type' | 'content'>): number;
|
|
38
|
+
|
|
39
|
+
type AnthropicChatCompletionRequest = AnthropicClient.CompletionCreateParams;
|
|
40
|
+
declare module '@gammatech/aijsx' {
|
|
41
|
+
interface ChatCompletionRequestPayloads {
|
|
42
|
+
anthropic: AnthropicChatCompletionRequest;
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
/**
|
|
46
|
+
* The set of valid Claude models.
|
|
47
|
+
* @see https://docs.anthropic.com/claude/reference/selecting-a-model
|
|
48
|
+
*/
|
|
49
|
+
type ValidAnthropicChatModel = 'claude-instant-1.2' | 'claude-2.1';
|
|
50
|
+
declare const AnthropicClientContext: Context<() => AnthropicClient>;
|
|
51
|
+
/**
|
|
52
|
+
* If you use an Anthropic model without specifying the max tokens for the completion, this value will be used as the default.
|
|
53
|
+
*/
|
|
54
|
+
declare const defaultMaxTokens = 4096;
|
|
55
|
+
type AnthropicChatCompletionProps = {
|
|
56
|
+
model: ValidAnthropicChatModel;
|
|
57
|
+
maxTokens?: number;
|
|
58
|
+
temperature?: number;
|
|
59
|
+
children: AIElement<any> | AIElement<any>[];
|
|
60
|
+
provider?: string;
|
|
61
|
+
providerRegion?: string;
|
|
62
|
+
};
|
|
63
|
+
/**
|
|
64
|
+
* An AI.JSX component that invokes an Anthropic Large Language Model.
|
|
65
|
+
* @param children The children to render.
|
|
66
|
+
* @param chatModel The chat model to use.
|
|
67
|
+
* @param completionModel The completion model to use.
|
|
68
|
+
* @param client The Anthropic client.
|
|
69
|
+
*/
|
|
70
|
+
declare function AnthropicChatCompletion(props: AnthropicChatCompletionProps, { render, logger, getContext }: RenderContext): AsyncGenerator<string, void, unknown>;
|
|
71
|
+
|
|
72
|
+
export { AIElement, AnthropicChatCompletion, type AnthropicChatCompletionRequest, AnthropicClientContext, Context, LogImplementation, OpenAIChatCompletion, type OpenAIChatCompletionRequest, OpenAIClientContext, RenderContext, RenderedConversationMessage, type ValidAnthropicChatModel, type ValidOpenAIChatModel, createRenderContext, defaultMaxTokens, tokenCountForConversationMessage, tokenLimitForChatModel, tokenizer };
|