@effect/ai 0.26.0 → 0.27.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/Chat/package.json +6 -0
- package/EmbeddingModel/package.json +6 -0
- package/IdGenerator/package.json +6 -0
- package/LanguageModel/package.json +6 -0
- package/Model/package.json +6 -0
- package/Prompt/package.json +6 -0
- package/Response/package.json +6 -0
- package/Telemetry/package.json +6 -0
- package/Tool/package.json +6 -0
- package/Toolkit/package.json +6 -0
- package/dist/cjs/AiError.js +575 -11
- package/dist/cjs/AiError.js.map +1 -1
- package/dist/cjs/Chat.js +302 -0
- package/dist/cjs/Chat.js.map +1 -0
- package/dist/cjs/EmbeddingModel.js +184 -0
- package/dist/cjs/EmbeddingModel.js.map +1 -0
- package/dist/cjs/IdGenerator.js +255 -0
- package/dist/cjs/IdGenerator.js.map +1 -0
- package/dist/cjs/LanguageModel.js +584 -0
- package/dist/cjs/LanguageModel.js.map +1 -0
- package/dist/cjs/McpServer.js +12 -4
- package/dist/cjs/McpServer.js.map +1 -1
- package/dist/cjs/Model.js +118 -0
- package/dist/cjs/Model.js.map +1 -0
- package/dist/cjs/Prompt.js +649 -0
- package/dist/cjs/Prompt.js.map +1 -0
- package/dist/cjs/Response.js +635 -0
- package/dist/cjs/Response.js.map +1 -0
- package/dist/cjs/Telemetry.js +176 -0
- package/dist/cjs/Telemetry.js.map +1 -0
- package/dist/cjs/Tokenizer.js +87 -8
- package/dist/cjs/Tokenizer.js.map +1 -1
- package/dist/cjs/Tool.js +556 -0
- package/dist/cjs/Tool.js.map +1 -0
- package/dist/cjs/Toolkit.js +279 -0
- package/dist/cjs/Toolkit.js.map +1 -0
- package/dist/cjs/index.js +21 -19
- package/dist/dts/AiError.d.ts +577 -9
- package/dist/dts/AiError.d.ts.map +1 -1
- package/dist/dts/Chat.d.ts +356 -0
- package/dist/dts/Chat.d.ts.map +1 -0
- package/dist/dts/EmbeddingModel.d.ts +153 -0
- package/dist/dts/EmbeddingModel.d.ts.map +1 -0
- package/dist/dts/IdGenerator.d.ts +272 -0
- package/dist/dts/IdGenerator.d.ts.map +1 -0
- package/dist/dts/LanguageModel.d.ts +458 -0
- package/dist/dts/LanguageModel.d.ts.map +1 -0
- package/dist/dts/McpSchema.d.ts +25 -25
- package/dist/dts/McpServer.d.ts +6 -4
- package/dist/dts/McpServer.d.ts.map +1 -1
- package/dist/dts/Model.d.ts +124 -0
- package/dist/dts/Model.d.ts.map +1 -0
- package/dist/dts/Prompt.d.ts +1119 -0
- package/dist/dts/Prompt.d.ts.map +1 -0
- package/dist/dts/Response.d.ts +1519 -0
- package/dist/dts/Response.d.ts.map +1 -0
- package/dist/dts/Telemetry.d.ts +520 -0
- package/dist/dts/Telemetry.d.ts.map +1 -0
- package/dist/dts/Tokenizer.d.ts +131 -13
- package/dist/dts/Tokenizer.d.ts.map +1 -1
- package/dist/dts/Tool.d.ts +876 -0
- package/dist/dts/Tool.d.ts.map +1 -0
- package/dist/dts/Toolkit.d.ts +310 -0
- package/dist/dts/Toolkit.d.ts.map +1 -0
- package/dist/dts/index.d.ts +498 -13
- package/dist/dts/index.d.ts.map +1 -1
- package/dist/esm/AiError.js +570 -10
- package/dist/esm/AiError.js.map +1 -1
- package/dist/esm/Chat.js +291 -0
- package/dist/esm/Chat.js.map +1 -0
- package/dist/esm/EmbeddingModel.js +173 -0
- package/dist/esm/EmbeddingModel.js.map +1 -0
- package/dist/esm/IdGenerator.js +245 -0
- package/dist/esm/IdGenerator.js.map +1 -0
- package/dist/esm/LanguageModel.js +572 -0
- package/dist/esm/LanguageModel.js.map +1 -0
- package/dist/esm/McpServer.js +12 -4
- package/dist/esm/McpServer.js.map +1 -1
- package/dist/esm/Model.js +108 -0
- package/dist/esm/Model.js.map +1 -0
- package/dist/esm/Prompt.js +633 -0
- package/dist/esm/Prompt.js.map +1 -0
- package/dist/esm/Response.js +619 -0
- package/dist/esm/Response.js.map +1 -0
- package/dist/esm/Telemetry.js +166 -0
- package/dist/esm/Telemetry.js.map +1 -0
- package/dist/esm/Tokenizer.js +87 -8
- package/dist/esm/Tokenizer.js.map +1 -1
- package/dist/esm/Tool.js +534 -0
- package/dist/esm/Tool.js.map +1 -0
- package/dist/esm/Toolkit.js +269 -0
- package/dist/esm/Toolkit.js.map +1 -0
- package/dist/esm/index.js +498 -13
- package/dist/esm/index.js.map +1 -1
- package/package.json +76 -68
- package/src/AiError.ts +739 -9
- package/src/Chat.ts +546 -0
- package/src/EmbeddingModel.ts +311 -0
- package/src/IdGenerator.ts +320 -0
- package/src/LanguageModel.ts +1074 -0
- package/src/McpServer.ts +337 -194
- package/src/Model.ts +155 -0
- package/src/Prompt.ts +1616 -0
- package/src/Response.ts +2131 -0
- package/src/Telemetry.ts +655 -0
- package/src/Tokenizer.ts +145 -24
- package/src/Tool.ts +1267 -0
- package/src/Toolkit.ts +516 -0
- package/src/index.ts +499 -13
- package/AiChat/package.json +0 -6
- package/AiEmbeddingModel/package.json +0 -6
- package/AiInput/package.json +0 -6
- package/AiLanguageModel/package.json +0 -6
- package/AiModel/package.json +0 -6
- package/AiResponse/package.json +0 -6
- package/AiTelemetry/package.json +0 -6
- package/AiTool/package.json +0 -6
- package/AiToolkit/package.json +0 -6
- package/dist/cjs/AiChat.js +0 -122
- package/dist/cjs/AiChat.js.map +0 -1
- package/dist/cjs/AiEmbeddingModel.js +0 -109
- package/dist/cjs/AiEmbeddingModel.js.map +0 -1
- package/dist/cjs/AiInput.js +0 -458
- package/dist/cjs/AiInput.js.map +0 -1
- package/dist/cjs/AiLanguageModel.js +0 -351
- package/dist/cjs/AiLanguageModel.js.map +0 -1
- package/dist/cjs/AiModel.js +0 -37
- package/dist/cjs/AiModel.js.map +0 -1
- package/dist/cjs/AiResponse.js +0 -681
- package/dist/cjs/AiResponse.js.map +0 -1
- package/dist/cjs/AiTelemetry.js +0 -58
- package/dist/cjs/AiTelemetry.js.map +0 -1
- package/dist/cjs/AiTool.js +0 -150
- package/dist/cjs/AiTool.js.map +0 -1
- package/dist/cjs/AiToolkit.js +0 -157
- package/dist/cjs/AiToolkit.js.map +0 -1
- package/dist/cjs/internal/common.js +0 -21
- package/dist/cjs/internal/common.js.map +0 -1
- package/dist/dts/AiChat.d.ts +0 -101
- package/dist/dts/AiChat.d.ts.map +0 -1
- package/dist/dts/AiEmbeddingModel.d.ts +0 -65
- package/dist/dts/AiEmbeddingModel.d.ts.map +0 -1
- package/dist/dts/AiInput.d.ts +0 -590
- package/dist/dts/AiInput.d.ts.map +0 -1
- package/dist/dts/AiLanguageModel.d.ts +0 -302
- package/dist/dts/AiLanguageModel.d.ts.map +0 -1
- package/dist/dts/AiModel.d.ts +0 -25
- package/dist/dts/AiModel.d.ts.map +0 -1
- package/dist/dts/AiResponse.d.ts +0 -863
- package/dist/dts/AiResponse.d.ts.map +0 -1
- package/dist/dts/AiTelemetry.d.ts +0 -242
- package/dist/dts/AiTelemetry.d.ts.map +0 -1
- package/dist/dts/AiTool.d.ts +0 -334
- package/dist/dts/AiTool.d.ts.map +0 -1
- package/dist/dts/AiToolkit.d.ts +0 -96
- package/dist/dts/AiToolkit.d.ts.map +0 -1
- package/dist/dts/internal/common.d.ts +0 -2
- package/dist/dts/internal/common.d.ts.map +0 -1
- package/dist/esm/AiChat.js +0 -111
- package/dist/esm/AiChat.js.map +0 -1
- package/dist/esm/AiEmbeddingModel.js +0 -98
- package/dist/esm/AiEmbeddingModel.js.map +0 -1
- package/dist/esm/AiInput.js +0 -433
- package/dist/esm/AiInput.js.map +0 -1
- package/dist/esm/AiLanguageModel.js +0 -340
- package/dist/esm/AiLanguageModel.js.map +0 -1
- package/dist/esm/AiModel.js +0 -29
- package/dist/esm/AiModel.js.map +0 -1
- package/dist/esm/AiResponse.js +0 -657
- package/dist/esm/AiResponse.js.map +0 -1
- package/dist/esm/AiTelemetry.js +0 -48
- package/dist/esm/AiTelemetry.js.map +0 -1
- package/dist/esm/AiTool.js +0 -134
- package/dist/esm/AiTool.js.map +0 -1
- package/dist/esm/AiToolkit.js +0 -147
- package/dist/esm/AiToolkit.js.map +0 -1
- package/dist/esm/internal/common.js +0 -14
- package/dist/esm/internal/common.js.map +0 -1
- package/src/AiChat.ts +0 -251
- package/src/AiEmbeddingModel.ts +0 -169
- package/src/AiInput.ts +0 -602
- package/src/AiLanguageModel.ts +0 -685
- package/src/AiModel.ts +0 -53
- package/src/AiResponse.ts +0 -986
- package/src/AiTelemetry.ts +0 -333
- package/src/AiTool.ts +0 -579
- package/src/AiToolkit.ts +0 -265
- package/src/internal/common.ts +0 -12
package/src/AiLanguageModel.ts
DELETED
|
@@ -1,685 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* @since 1.0.0
|
|
3
|
-
*/
|
|
4
|
-
import * as _Context from "effect/Context"
|
|
5
|
-
import * as Effect from "effect/Effect"
|
|
6
|
-
import * as JsonSchema from "effect/JSONSchema"
|
|
7
|
-
import * as Option from "effect/Option"
|
|
8
|
-
import * as Predicate from "effect/Predicate"
|
|
9
|
-
import * as Random from "effect/Random"
|
|
10
|
-
import * as Schema from "effect/Schema"
|
|
11
|
-
import * as AST from "effect/SchemaAST"
|
|
12
|
-
import * as Stream from "effect/Stream"
|
|
13
|
-
import type { Span } from "effect/Tracer"
|
|
14
|
-
import type { Concurrency, Mutable, NoExcessProperties } from "effect/Types"
|
|
15
|
-
import { AiError } from "./AiError.js"
|
|
16
|
-
import * as AiInput from "./AiInput.js"
|
|
17
|
-
import * as AiResponse from "./AiResponse.js"
|
|
18
|
-
import { CurrentSpanTransformer } from "./AiTelemetry.js"
|
|
19
|
-
import type * as AiTool from "./AiTool.js"
|
|
20
|
-
import type * as AiToolkit from "./AiToolkit.js"
|
|
21
|
-
|
|
22
|
-
const constDisableValidation = { disableValidation: true }
|
|
23
|
-
|
|
24
|
-
/**
|
|
25
|
-
* @since 1.0.0
|
|
26
|
-
* @category Context
|
|
27
|
-
*/
|
|
28
|
-
export class AiLanguageModel extends _Context.Tag("@effect/ai/AiLanguageModel")<
|
|
29
|
-
AiLanguageModel,
|
|
30
|
-
AiLanguageModel.Service
|
|
31
|
-
>() {}
|
|
32
|
-
|
|
33
|
-
/**
|
|
34
|
-
* @since 1.0.0
|
|
35
|
-
* @category Models
|
|
36
|
-
*/
|
|
37
|
-
export type StructuredSchema<A, I extends Record<string, unknown>, R> =
|
|
38
|
-
| TaggedSchema<A, I, R>
|
|
39
|
-
| IdentifiedSchema<A, I, R>
|
|
40
|
-
|
|
41
|
-
/**
|
|
42
|
-
* @since 1.0.0
|
|
43
|
-
* @category Models
|
|
44
|
-
*/
|
|
45
|
-
export interface TaggedSchema<A, I, R> extends Schema.Schema<A, I, R> {
|
|
46
|
-
readonly _tag: string
|
|
47
|
-
}
|
|
48
|
-
|
|
49
|
-
/**
|
|
50
|
-
* @since 1.0.0
|
|
51
|
-
* @category Models
|
|
52
|
-
*/
|
|
53
|
-
export interface IdentifiedSchema<A, I, R> extends Schema.Schema<A, I, R> {
|
|
54
|
-
readonly identifier: string
|
|
55
|
-
}
|
|
56
|
-
|
|
57
|
-
/**
|
|
58
|
-
* The tool choice mode for the language model.
|
|
59
|
-
*
|
|
60
|
-
* - `auto` (default): The model can decide whether or not to call tools, as well as which tools to call.
|
|
61
|
-
* - `required`: The model **must** call a tool but can decide which tool will be called.
|
|
62
|
-
* - `none`: The model **must not** call a tool.
|
|
63
|
-
* - `{ tool: <tool_name> }`: The model must call the specified tool.
|
|
64
|
-
*
|
|
65
|
-
* @since 1.0.0
|
|
66
|
-
* @category Models
|
|
67
|
-
*/
|
|
68
|
-
export type ToolChoice<Tool extends AiTool.Any> = "auto" | "none" | "required" | {
|
|
69
|
-
readonly tool: Tool["name"]
|
|
70
|
-
}
|
|
71
|
-
|
|
72
|
-
/**
|
|
73
|
-
* @since 1.0.0
|
|
74
|
-
* @category Models
|
|
75
|
-
*/
|
|
76
|
-
export interface ToolCallIdGenerator {
|
|
77
|
-
generateId(): Effect.Effect<string>
|
|
78
|
-
}
|
|
79
|
-
|
|
80
|
-
/**
|
|
81
|
-
* The default services available for use when constructing an `AiLanguageModel`.
|
|
82
|
-
*
|
|
83
|
-
* @since 1.0.0
|
|
84
|
-
* @category Context
|
|
85
|
-
*/
|
|
86
|
-
export type Context = CurrentToolCallIdGenerator
|
|
87
|
-
|
|
88
|
-
const ALPHANUMS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
|
|
89
|
-
const DefaultToolCallIdGenerator: ToolCallIdGenerator = {
|
|
90
|
-
generateId: Effect.fnUntraced(function*() {
|
|
91
|
-
const chars = new Array(32)
|
|
92
|
-
for (let i = 0; i < 32; i++) {
|
|
93
|
-
chars[i] = ALPHANUMS[yield* Random.nextIntBetween(0, ALPHANUMS.length - 1)]
|
|
94
|
-
}
|
|
95
|
-
return `tool_${chars.join("")}`
|
|
96
|
-
})
|
|
97
|
-
}
|
|
98
|
-
|
|
99
|
-
/**
|
|
100
|
-
* @since 1.0.0
|
|
101
|
-
* @category Context
|
|
102
|
-
*/
|
|
103
|
-
export class CurrentToolCallIdGenerator extends _Context.Tag("@effect/ai/CurrentToolCallIdGenerator")<
|
|
104
|
-
CurrentToolCallIdGenerator,
|
|
105
|
-
ToolCallIdGenerator
|
|
106
|
-
>() {}
|
|
107
|
-
|
|
108
|
-
/**
|
|
109
|
-
* Options for generating text using a large language model.
|
|
110
|
-
*
|
|
111
|
-
* @since 1.0.0
|
|
112
|
-
* @category Models
|
|
113
|
-
*/
|
|
114
|
-
export interface GenerateTextOptions<Tools extends AiTool.Any> {
|
|
115
|
-
/**
|
|
116
|
-
* The prompt input to use to generate text.
|
|
117
|
-
*/
|
|
118
|
-
readonly prompt: AiInput.Raw
|
|
119
|
-
|
|
120
|
-
/**
|
|
121
|
-
* An optional system message that will be part of the prompt.
|
|
122
|
-
*/
|
|
123
|
-
readonly system?: string | undefined
|
|
124
|
-
|
|
125
|
-
/**
|
|
126
|
-
* A toolkit containing both the tools and the tool call handler to use to
|
|
127
|
-
* augment text generation.
|
|
128
|
-
*/
|
|
129
|
-
readonly toolkit?: AiToolkit.ToHandler<Tools> | Effect.Effect<AiToolkit.ToHandler<Tools>, any, any>
|
|
130
|
-
|
|
131
|
-
/**
|
|
132
|
-
* The tool choice mode for the language model.
|
|
133
|
-
*
|
|
134
|
-
* - `auto` (default): The model can decide whether or not to call tools, as well as which tools to call.
|
|
135
|
-
* - `required`: The model **must** call a tool but can decide which tool will be called.
|
|
136
|
-
* - `none`: The model **must not** call a tool.
|
|
137
|
-
* - `{ tool: <tool_name> }`: The model must call the specified tool.
|
|
138
|
-
*/
|
|
139
|
-
readonly toolChoice?: ToolChoice<Tools>
|
|
140
|
-
|
|
141
|
-
/**
|
|
142
|
-
* The concurrency level for resolving tool calls.
|
|
143
|
-
*/
|
|
144
|
-
readonly concurrency?: Concurrency | undefined
|
|
145
|
-
|
|
146
|
-
/**
|
|
147
|
-
* When set to `true`, tool calls requested by the large language model
|
|
148
|
-
* will not be auto-resolved by the framework.
|
|
149
|
-
*
|
|
150
|
-
* This option is useful when:
|
|
151
|
-
* 1. The user wants to include tool call definitions from an `AiToolkit`
|
|
152
|
-
* in requests to the large language model so that the model has the
|
|
153
|
-
* capability to call tools.
|
|
154
|
-
* 2. The user wants to control the execution of tool call resolvers
|
|
155
|
-
* instead of having the framework handle tool call resolution.
|
|
156
|
-
*/
|
|
157
|
-
readonly disableToolCallResolution?: boolean | undefined
|
|
158
|
-
}
|
|
159
|
-
|
|
160
|
-
/**
|
|
161
|
-
* Options for generating a structured object using a large language model.
|
|
162
|
-
*
|
|
163
|
-
* @since 1.0.0
|
|
164
|
-
* @category Models
|
|
165
|
-
*/
|
|
166
|
-
export interface GenerateObjectOptions<A, I extends Record<string, unknown>, R> {
|
|
167
|
-
/**
|
|
168
|
-
* The prompt input to use to generate text.
|
|
169
|
-
*/
|
|
170
|
-
readonly prompt: AiInput.Raw
|
|
171
|
-
|
|
172
|
-
/**
|
|
173
|
-
* An optional system message that will be part of the prompt.
|
|
174
|
-
*/
|
|
175
|
-
readonly system?: string | undefined
|
|
176
|
-
|
|
177
|
-
/**
|
|
178
|
-
* The schema to be used to specify the structure of the object to generate.
|
|
179
|
-
*/
|
|
180
|
-
readonly schema: Schema.Schema<A, I, R>
|
|
181
|
-
|
|
182
|
-
/**
|
|
183
|
-
* The identifier to use to associating the underlying tool call with the
|
|
184
|
-
* generated output.
|
|
185
|
-
*/
|
|
186
|
-
readonly toolCallId?: string | undefined
|
|
187
|
-
}
|
|
188
|
-
|
|
189
|
-
/**
|
|
190
|
-
* A utility type to extract the success type for the text generation methods
|
|
191
|
-
* of `AiLanguageModel` from the provided options.
|
|
192
|
-
*
|
|
193
|
-
* @since 1.0.0
|
|
194
|
-
* @category Utility Types
|
|
195
|
-
*/
|
|
196
|
-
export type ExtractSuccess<Options> = Options extends {
|
|
197
|
-
disableToolCallResolution: true
|
|
198
|
-
} ? AiResponse.AiResponse
|
|
199
|
-
: Options extends {
|
|
200
|
-
toolkit: AiToolkit.ToHandler<infer _Tools>
|
|
201
|
-
} ? AiResponse.WithToolCallResults<_Tools>
|
|
202
|
-
: Options extends {
|
|
203
|
-
toolkit: Effect.Effect<AiToolkit.ToHandler<infer _Tools>, infer _E, infer _R>
|
|
204
|
-
} ? AiResponse.WithToolCallResults<_Tools>
|
|
205
|
-
: AiResponse.AiResponse
|
|
206
|
-
|
|
207
|
-
/**
|
|
208
|
-
* A utility type to extract the error type for the text generation methods
|
|
209
|
-
* of `AiLanguageModel` from the provided options.
|
|
210
|
-
*
|
|
211
|
-
* @since 1.0.0
|
|
212
|
-
* @category Utility Types
|
|
213
|
-
*/
|
|
214
|
-
export type ExtractError<Options> = Options extends {
|
|
215
|
-
disableToolCallResolution: true
|
|
216
|
-
} ? AiError
|
|
217
|
-
: Options extends {
|
|
218
|
-
toolkit: AiToolkit.ToHandler<infer _Tools>
|
|
219
|
-
} ? AiError | AiTool.Failure<_Tools>
|
|
220
|
-
: Options extends {
|
|
221
|
-
toolkit: Effect.Effect<AiToolkit.ToHandler<infer _Tools>, infer _E, infer _R>
|
|
222
|
-
} ? AiError | AiTool.Failure<_Tools> | _E
|
|
223
|
-
: AiError
|
|
224
|
-
|
|
225
|
-
/**
|
|
226
|
-
* A utility type to extract the context type for the text generation methods
|
|
227
|
-
* of `AiLanguageModel` from the provided options.
|
|
228
|
-
*
|
|
229
|
-
* @since 1.0.0
|
|
230
|
-
* @category Utility Types
|
|
231
|
-
*/
|
|
232
|
-
export type ExtractContext<Options> = Options extends {
|
|
233
|
-
disableToolCallResolution: true
|
|
234
|
-
} ? never
|
|
235
|
-
: Options extends {
|
|
236
|
-
toolkit: AiToolkit.ToHandler<infer _Tools>
|
|
237
|
-
} ? AiTool.Context<_Tools>
|
|
238
|
-
: Options extends {
|
|
239
|
-
toolkit: Effect.Effect<AiToolkit.ToHandler<infer _Tools>, infer _E, infer _R>
|
|
240
|
-
} ? AiTool.Context<_Tools> | _R
|
|
241
|
-
: never
|
|
242
|
-
|
|
243
|
-
/**
|
|
244
|
-
* @since 1.0.0
|
|
245
|
-
* @category Models
|
|
246
|
-
*/
|
|
247
|
-
export declare namespace AiLanguageModel {
|
|
248
|
-
/**
|
|
249
|
-
* @since 1.0.0
|
|
250
|
-
* @category Models
|
|
251
|
-
*/
|
|
252
|
-
export interface Service {
|
|
253
|
-
/**
|
|
254
|
-
* Generate text using a large language model for the specified `prompt`.
|
|
255
|
-
*
|
|
256
|
-
* If a `toolkit` is specified, the large language model will additionally
|
|
257
|
-
* be able to perform tool calls to augment its response.
|
|
258
|
-
*/
|
|
259
|
-
readonly generateText: <
|
|
260
|
-
Tools extends AiTool.Any,
|
|
261
|
-
Options extends NoExcessProperties<GenerateTextOptions<any>, Options>
|
|
262
|
-
>(
|
|
263
|
-
options: Options & GenerateTextOptions<Tools>
|
|
264
|
-
) => Effect.Effect<
|
|
265
|
-
ExtractSuccess<Options>,
|
|
266
|
-
ExtractError<Options>,
|
|
267
|
-
ExtractContext<Options>
|
|
268
|
-
>
|
|
269
|
-
/**
|
|
270
|
-
* Generate text using a large language model for the specified `prompt`,
|
|
271
|
-
* streaming output from the model as soon as it is available.
|
|
272
|
-
*
|
|
273
|
-
* If a `toolkit` is specified, the large language model will additionally
|
|
274
|
-
* be able to perform tool calls to augment its response.
|
|
275
|
-
*/
|
|
276
|
-
readonly streamText: <
|
|
277
|
-
Tools extends AiTool.Any,
|
|
278
|
-
Options extends NoExcessProperties<GenerateTextOptions<any>, Options>
|
|
279
|
-
>(
|
|
280
|
-
options: Options & GenerateTextOptions<Tools>
|
|
281
|
-
) => Stream.Stream<
|
|
282
|
-
ExtractSuccess<Options>,
|
|
283
|
-
ExtractError<Options>,
|
|
284
|
-
ExtractContext<Options>
|
|
285
|
-
>
|
|
286
|
-
|
|
287
|
-
/**
|
|
288
|
-
* Generate a structured object for the specified prompt and schema using a
|
|
289
|
-
* large language model.
|
|
290
|
-
*/
|
|
291
|
-
readonly generateObject: <A, I extends Record<string, unknown>, R>(
|
|
292
|
-
options: GenerateObjectOptions<A, I, R>
|
|
293
|
-
) => Effect.Effect<AiResponse.WithStructuredOutput<A>, AiError, R>
|
|
294
|
-
}
|
|
295
|
-
}
|
|
296
|
-
|
|
297
|
-
const constEmptyMap = new Map<never, never>()
|
|
298
|
-
|
|
299
|
-
/**
|
|
300
|
-
* @since 1.0.0
|
|
301
|
-
* @category Models
|
|
302
|
-
*/
|
|
303
|
-
export interface AiLanguageModelOptions {
|
|
304
|
-
/**
|
|
305
|
-
* The prompt messages to use to generate text.
|
|
306
|
-
*/
|
|
307
|
-
readonly prompt: AiInput.AiInput
|
|
308
|
-
/**
|
|
309
|
-
* An optional system message that will be part of the prompt.
|
|
310
|
-
*/
|
|
311
|
-
readonly system: Option.Option<string>
|
|
312
|
-
/**
|
|
313
|
-
* The tools to use to generate text in an encoded format suitable for
|
|
314
|
-
* incorporation into requests to the large language model.
|
|
315
|
-
*/
|
|
316
|
-
readonly tools: Array<{
|
|
317
|
-
readonly name: string
|
|
318
|
-
readonly description: string
|
|
319
|
-
readonly parameters: JsonSchema.JsonSchema7
|
|
320
|
-
readonly structured: boolean
|
|
321
|
-
}>
|
|
322
|
-
/**
|
|
323
|
-
* The tool choice mode for the language model.
|
|
324
|
-
*
|
|
325
|
-
* - `auto` (default): The model can decide whether or not to call tools, as well as which tools to call.
|
|
326
|
-
* - `required`: The model **must** call a tool but can decide which tool will be called.
|
|
327
|
-
* - `none`: The model **must not** call a tool.
|
|
328
|
-
* - `{ tool: <tool_name> }`: The model must call the specified tool.
|
|
329
|
-
*/
|
|
330
|
-
readonly toolChoice: ToolChoice<any>
|
|
331
|
-
/**
|
|
332
|
-
* The span to use to trace interactions with the large language model.
|
|
333
|
-
*/
|
|
334
|
-
readonly span: Span
|
|
335
|
-
}
|
|
336
|
-
|
|
337
|
-
/**
|
|
338
|
-
* @since 1.0.0
|
|
339
|
-
* @category Constructors
|
|
340
|
-
*/
|
|
341
|
-
export const make: (
|
|
342
|
-
opts: {
|
|
343
|
-
readonly generateText: (
|
|
344
|
-
options: AiLanguageModelOptions
|
|
345
|
-
) => Effect.Effect<AiResponse.AiResponse, AiError, Context>
|
|
346
|
-
readonly streamText: (
|
|
347
|
-
options: AiLanguageModelOptions
|
|
348
|
-
) => Stream.Stream<AiResponse.AiResponse, AiError, Context>
|
|
349
|
-
}
|
|
350
|
-
) => Effect.Effect<
|
|
351
|
-
AiLanguageModel.Service
|
|
352
|
-
> = Effect.fnUntraced(function*(opts: {
|
|
353
|
-
readonly generateText: (
|
|
354
|
-
options: AiLanguageModelOptions
|
|
355
|
-
) => Effect.Effect<AiResponse.AiResponse, AiError, Context>
|
|
356
|
-
readonly streamText: (
|
|
357
|
-
options: AiLanguageModelOptions
|
|
358
|
-
) => Stream.Stream<AiResponse.AiResponse, AiError, Context>
|
|
359
|
-
}) {
|
|
360
|
-
const parentSpanTransformer = yield* Effect.serviceOption(CurrentSpanTransformer)
|
|
361
|
-
const getSpanTransformer = Effect.serviceOption(CurrentSpanTransformer).pipe(
|
|
362
|
-
Effect.map(Option.orElse(() => parentSpanTransformer))
|
|
363
|
-
)
|
|
364
|
-
|
|
365
|
-
const toolCallIdGenerator = yield* Effect.serviceOption(CurrentToolCallIdGenerator).pipe(
|
|
366
|
-
Effect.map(Option.getOrElse(() => DefaultToolCallIdGenerator))
|
|
367
|
-
)
|
|
368
|
-
|
|
369
|
-
const generateText = <
|
|
370
|
-
Options extends NoExcessProperties<GenerateTextOptions<any>, Options>
|
|
371
|
-
>({ concurrency, toolChoice = "auto", toolkit, ...options }: Options): Effect.Effect<
|
|
372
|
-
ExtractSuccess<Options>,
|
|
373
|
-
ExtractError<Options>,
|
|
374
|
-
ExtractContext<Options>
|
|
375
|
-
> =>
|
|
376
|
-
Effect.useSpan(
|
|
377
|
-
"AiLanguageModel.generateText",
|
|
378
|
-
{ captureStackTrace: false, attributes: { concurrency, toolChoice } },
|
|
379
|
-
Effect.fnUntraced(
|
|
380
|
-
function*(span) {
|
|
381
|
-
const prompt = AiInput.make(options.prompt)
|
|
382
|
-
const system = Option.fromNullable(options.system)
|
|
383
|
-
const spanTransformer = yield* getSpanTransformer
|
|
384
|
-
const modelOptions: Mutable<AiLanguageModelOptions> = { prompt, system, tools: [], toolChoice: "none", span }
|
|
385
|
-
if (Predicate.isUndefined(toolkit)) {
|
|
386
|
-
const response = yield* opts.generateText(modelOptions)
|
|
387
|
-
if (Option.isSome(spanTransformer)) {
|
|
388
|
-
spanTransformer.value({ ...modelOptions, response })
|
|
389
|
-
}
|
|
390
|
-
return response
|
|
391
|
-
}
|
|
392
|
-
modelOptions.toolChoice = toolChoice
|
|
393
|
-
const actualToolkit = Effect.isEffect(toolkit) ? yield* toolkit : toolkit
|
|
394
|
-
for (const tool of actualToolkit.tools) {
|
|
395
|
-
modelOptions.tools.push(convertTool(tool))
|
|
396
|
-
}
|
|
397
|
-
const response = yield* opts.generateText(modelOptions)
|
|
398
|
-
if (Option.isSome(spanTransformer)) {
|
|
399
|
-
spanTransformer.value({ ...modelOptions, response })
|
|
400
|
-
}
|
|
401
|
-
if (options.disableToolCallResolution) {
|
|
402
|
-
return response
|
|
403
|
-
}
|
|
404
|
-
return yield* resolveParts({ response, toolkit: actualToolkit, concurrency, method: "generateText" })
|
|
405
|
-
},
|
|
406
|
-
(effect, span) => Effect.withParentSpan(effect, span),
|
|
407
|
-
Effect.provideService(CurrentToolCallIdGenerator, toolCallIdGenerator)
|
|
408
|
-
)
|
|
409
|
-
) as any
|
|
410
|
-
|
|
411
|
-
const streamText = Effect.fnUntraced(
|
|
412
|
-
function*<
|
|
413
|
-
Options extends NoExcessProperties<GenerateTextOptions<any>, Options>
|
|
414
|
-
>({ concurrency, toolChoice = "auto", toolkit, ...options }: Options) {
|
|
415
|
-
const span = yield* Effect.makeSpanScoped("AiLanguageModel.streamText", {
|
|
416
|
-
captureStackTrace: false,
|
|
417
|
-
attributes: { concurrency, toolChoice }
|
|
418
|
-
})
|
|
419
|
-
const prompt = AiInput.make(options.prompt)
|
|
420
|
-
const system = Option.fromNullable(options.system)
|
|
421
|
-
const modelOptions: Mutable<AiLanguageModelOptions> = { prompt, system, tools: [], toolChoice: "none", span }
|
|
422
|
-
if (Predicate.isUndefined(toolkit)) {
|
|
423
|
-
return [opts.streamText(modelOptions), modelOptions] as const
|
|
424
|
-
}
|
|
425
|
-
modelOptions.toolChoice = toolChoice
|
|
426
|
-
const actualToolkit = Effect.isEffect(toolkit)
|
|
427
|
-
? yield* (toolkit as Effect.Effect<AiToolkit.ToHandler<any>>)
|
|
428
|
-
: toolkit
|
|
429
|
-
for (const tool of actualToolkit.tools) {
|
|
430
|
-
modelOptions.tools.push(convertTool(tool))
|
|
431
|
-
}
|
|
432
|
-
const stream = opts.streamText(modelOptions)
|
|
433
|
-
if (options.disableToolCallResolution) {
|
|
434
|
-
return [stream, modelOptions] as const
|
|
435
|
-
}
|
|
436
|
-
return [
|
|
437
|
-
stream.pipe(
|
|
438
|
-
Stream.mapEffect(
|
|
439
|
-
(response) => resolveParts({ response, toolkit: actualToolkit, concurrency, method: "streamText" }),
|
|
440
|
-
{ concurrency: "unbounded" }
|
|
441
|
-
)
|
|
442
|
-
) as Stream.Stream<AiResponse.AiResponse, AiError, Context>,
|
|
443
|
-
modelOptions
|
|
444
|
-
] as const
|
|
445
|
-
},
|
|
446
|
-
Effect.flatMap(Effect.fnUntraced(function*([stream, options]) {
|
|
447
|
-
const spanTransformer = yield* getSpanTransformer
|
|
448
|
-
if (Option.isNone(spanTransformer)) {
|
|
449
|
-
return stream
|
|
450
|
-
}
|
|
451
|
-
let finalResponse = AiResponse.empty
|
|
452
|
-
return stream.pipe(
|
|
453
|
-
Stream.map((response) => {
|
|
454
|
-
finalResponse = AiResponse.merge(finalResponse, response)
|
|
455
|
-
return response
|
|
456
|
-
}),
|
|
457
|
-
Stream.ensuring(Effect.sync(() => {
|
|
458
|
-
spanTransformer.value({ ...options, response: finalResponse })
|
|
459
|
-
}))
|
|
460
|
-
)
|
|
461
|
-
})),
|
|
462
|
-
Stream.unwrapScoped,
|
|
463
|
-
Stream.provideService(CurrentToolCallIdGenerator, toolCallIdGenerator)
|
|
464
|
-
)
|
|
465
|
-
|
|
466
|
-
const generateObject = <A, I extends Record<string, unknown>, R>(
|
|
467
|
-
options: GenerateObjectOptions<A, I, R>
|
|
468
|
-
): Effect.Effect<AiResponse.WithStructuredOutput<A>, AiError, R> => {
|
|
469
|
-
const toolCallId: string = options.toolCallId
|
|
470
|
-
? options.toolCallId
|
|
471
|
-
: "_tag" in options.schema
|
|
472
|
-
? options.schema._tag as string
|
|
473
|
-
: "identifier" in options.schema
|
|
474
|
-
? options.schema.identifier as string
|
|
475
|
-
: "generateObject"
|
|
476
|
-
return Effect.useSpan(
|
|
477
|
-
"AiLanguageModel.generateObject",
|
|
478
|
-
{
|
|
479
|
-
captureStackTrace: false,
|
|
480
|
-
attributes: { toolCallId }
|
|
481
|
-
},
|
|
482
|
-
Effect.fnUntraced(
|
|
483
|
-
function*(span) {
|
|
484
|
-
const prompt = AiInput.make(options.prompt)
|
|
485
|
-
const system = Option.fromNullable(options.system)
|
|
486
|
-
const spanTransformer = yield* getSpanTransformer
|
|
487
|
-
const decode = Schema.decodeUnknown(options.schema)
|
|
488
|
-
const tool = convertStructured(toolCallId, options.schema)
|
|
489
|
-
const toolChoice = { tool: tool.name } as const
|
|
490
|
-
const modelOptions: AiLanguageModelOptions = { prompt, system, tools: [tool], toolChoice, span }
|
|
491
|
-
const response = yield* opts.generateText(modelOptions)
|
|
492
|
-
if (Option.isSome(spanTransformer)) {
|
|
493
|
-
spanTransformer.value({ ...modelOptions, response })
|
|
494
|
-
}
|
|
495
|
-
const toolCallPart = response.parts.find((part): part is AiResponse.ToolCallPart =>
|
|
496
|
-
part._tag === "ToolCallPart" && part.name === toolCallId
|
|
497
|
-
)
|
|
498
|
-
if (Predicate.isUndefined(toolCallPart)) {
|
|
499
|
-
return yield* new AiError({
|
|
500
|
-
module: "AiLanguageModel",
|
|
501
|
-
method: "generateObject",
|
|
502
|
-
description: `Tool call '${toolCallId}' not found in model response`
|
|
503
|
-
})
|
|
504
|
-
}
|
|
505
|
-
return yield* Effect.matchEffect(decode(toolCallPart.params), {
|
|
506
|
-
onFailure: (cause) =>
|
|
507
|
-
new AiError({
|
|
508
|
-
module: "AiLanguageModel",
|
|
509
|
-
method: "generateObject",
|
|
510
|
-
description: `Failed to decode tool call '${toolCallId}' parameters`,
|
|
511
|
-
cause
|
|
512
|
-
}),
|
|
513
|
-
onSuccess: (output) =>
|
|
514
|
-
Effect.succeed(
|
|
515
|
-
new AiResponse.WithStructuredOutput({
|
|
516
|
-
parts: response.parts,
|
|
517
|
-
id: toolCallPart.id,
|
|
518
|
-
name: toolCallPart.name,
|
|
519
|
-
value: output
|
|
520
|
-
}, constDisableValidation)
|
|
521
|
-
)
|
|
522
|
-
})
|
|
523
|
-
},
|
|
524
|
-
(effect, span) => Effect.withParentSpan(effect, span),
|
|
525
|
-
Effect.provideService(CurrentToolCallIdGenerator, toolCallIdGenerator)
|
|
526
|
-
)
|
|
527
|
-
)
|
|
528
|
-
}
|
|
529
|
-
|
|
530
|
-
return AiLanguageModel.of({ generateText, streamText, generateObject } as any)
|
|
531
|
-
})
|
|
532
|
-
|
|
533
|
-
const convertTool = <Tool extends AiTool.Any>(tool: Tool) => ({
|
|
534
|
-
name: tool.name,
|
|
535
|
-
description: tool.description ?? getDescription(tool.parametersSchema.ast),
|
|
536
|
-
parameters: makeJsonSchema(tool.parametersSchema.ast),
|
|
537
|
-
structured: false
|
|
538
|
-
})
|
|
539
|
-
|
|
540
|
-
const convertStructured = <A, I, R>(name: string, schema: Schema.Schema<A, I, R>) => ({
|
|
541
|
-
name,
|
|
542
|
-
description: getDescription(schema.ast),
|
|
543
|
-
parameters: makeJsonSchema(schema.ast),
|
|
544
|
-
structured: true
|
|
545
|
-
})
|
|
546
|
-
|
|
547
|
-
const makeJsonSchema = (ast: AST.AST): JsonSchema.JsonSchema7 => {
|
|
548
|
-
const props = AST.getPropertySignatures(ast)
|
|
549
|
-
if (props.length === 0) {
|
|
550
|
-
return {
|
|
551
|
-
type: "object",
|
|
552
|
-
properties: {},
|
|
553
|
-
required: [],
|
|
554
|
-
additionalProperties: false
|
|
555
|
-
}
|
|
556
|
-
}
|
|
557
|
-
const $defs = {}
|
|
558
|
-
const schema = JsonSchema.fromAST(ast, {
|
|
559
|
-
definitions: $defs,
|
|
560
|
-
topLevelReferenceStrategy: "skip"
|
|
561
|
-
})
|
|
562
|
-
if (Object.keys($defs).length === 0) return schema
|
|
563
|
-
;(schema as any).$defs = $defs
|
|
564
|
-
return schema
|
|
565
|
-
}
|
|
566
|
-
|
|
567
|
-
const getDescription = (ast: AST.AST): string => {
|
|
568
|
-
const annotations = ast._tag === "Transformation" ?
|
|
569
|
-
{
|
|
570
|
-
...ast.to.annotations,
|
|
571
|
-
...ast.annotations
|
|
572
|
-
} :
|
|
573
|
-
ast.annotations
|
|
574
|
-
return AST.DescriptionAnnotationId in annotations ? annotations[AST.DescriptionAnnotationId] as string : ""
|
|
575
|
-
}
|
|
576
|
-
|
|
577
|
-
const resolveParts = Effect.fnUntraced(function*<Tools extends AiTool.Any>(options: {
|
|
578
|
-
readonly response: AiResponse.AiResponse
|
|
579
|
-
readonly toolkit: AiToolkit.ToHandler<Tools>
|
|
580
|
-
readonly concurrency: Concurrency | undefined
|
|
581
|
-
readonly method: string
|
|
582
|
-
}) {
|
|
583
|
-
const toolNames: Array<string> = []
|
|
584
|
-
const toolParts = options.response.parts.filter(
|
|
585
|
-
(part): part is AiResponse.ToolCallPart => {
|
|
586
|
-
if (part._tag === "ToolCallPart") {
|
|
587
|
-
toolNames.push(part.name)
|
|
588
|
-
return true
|
|
589
|
-
}
|
|
590
|
-
return false
|
|
591
|
-
}
|
|
592
|
-
)
|
|
593
|
-
if (toolParts.length === 0) {
|
|
594
|
-
return new AiResponse.WithToolCallResults({
|
|
595
|
-
parts: options.response.parts,
|
|
596
|
-
results: constEmptyMap,
|
|
597
|
-
encodedResults: constEmptyMap
|
|
598
|
-
}, constDisableValidation)
|
|
599
|
-
}
|
|
600
|
-
yield* Effect.annotateCurrentSpan("toolCalls", toolNames)
|
|
601
|
-
const results = new Map<AiResponse.ToolCallId, {
|
|
602
|
-
readonly name: string
|
|
603
|
-
readonly result: AiTool.Success<Tools>
|
|
604
|
-
}>()
|
|
605
|
-
const encodedResults = new Map<AiResponse.ToolCallId, {
|
|
606
|
-
readonly name: string
|
|
607
|
-
readonly result: unknown
|
|
608
|
-
}>()
|
|
609
|
-
const resolve = Effect.forEach(toolParts, (part) => {
|
|
610
|
-
const id = part.id as AiResponse.ToolCallId
|
|
611
|
-
const name = part.name as AiTool.Name<Tools>
|
|
612
|
-
const params = part.params as AiTool.Parameters<Tools>
|
|
613
|
-
const toolCall = options.toolkit.handle(name, params)
|
|
614
|
-
return Effect.map(toolCall, ({ encodedResult, result }) => {
|
|
615
|
-
results.set(id, { name, result })
|
|
616
|
-
encodedResults.set(id, { name, result: encodedResult })
|
|
617
|
-
})
|
|
618
|
-
}, { concurrency: options.concurrency, discard: true })
|
|
619
|
-
yield* resolve
|
|
620
|
-
return new AiResponse.WithToolCallResults({
|
|
621
|
-
parts: options.response.parts,
|
|
622
|
-
results,
|
|
623
|
-
encodedResults
|
|
624
|
-
}, constDisableValidation)
|
|
625
|
-
})
|
|
626
|
-
|
|
627
|
-
/**
|
|
628
|
-
* Generate text using a large language model for the specified `prompt`.
|
|
629
|
-
*
|
|
630
|
-
* If a `toolkit` is specified, the large language model will additionally
|
|
631
|
-
* be able to perform tool calls to augment its response.
|
|
632
|
-
*
|
|
633
|
-
* @since 1.0.0
|
|
634
|
-
* @category Functions
|
|
635
|
-
*/
|
|
636
|
-
export const generateText: <
|
|
637
|
-
Tools extends AiTool.Any,
|
|
638
|
-
Options extends NoExcessProperties<GenerateTextOptions<any>, Options>
|
|
639
|
-
>(
|
|
640
|
-
options: Options & GenerateTextOptions<Tools>
|
|
641
|
-
) => Effect.Effect<
|
|
642
|
-
ExtractSuccess<Options>,
|
|
643
|
-
ExtractError<Options>,
|
|
644
|
-
AiLanguageModel | ExtractContext<Options>
|
|
645
|
-
> = Effect.serviceFunctionEffect(AiLanguageModel, (_) => _.generateText)
|
|
646
|
-
|
|
647
|
-
/**
|
|
648
|
-
* Generate a structured object for the specified prompt and schema using a
|
|
649
|
-
* large language model.
|
|
650
|
-
*
|
|
651
|
-
* When using a `Schema` that does not have an `identifier` or `_tag`
|
|
652
|
-
* property, you must specify a `toolCallId` to properly associate the
|
|
653
|
-
* output of the model.
|
|
654
|
-
*
|
|
655
|
-
* @since 1.0.0
|
|
656
|
-
* @category Functions
|
|
657
|
-
*/
|
|
658
|
-
export const generateObject: <A, I extends Record<string, unknown>, R>(
|
|
659
|
-
options: GenerateObjectOptions<A, I, R>
|
|
660
|
-
) => Effect.Effect<
|
|
661
|
-
AiResponse.WithStructuredOutput<A>,
|
|
662
|
-
AiError,
|
|
663
|
-
AiLanguageModel | R
|
|
664
|
-
> = Effect.serviceFunctionEffect(AiLanguageModel, (_) => _.generateObject)
|
|
665
|
-
|
|
666
|
-
/**
|
|
667
|
-
* Generate text using a large language model for the specified `prompt`,
|
|
668
|
-
* streaming output from the model as soon as it is available.
|
|
669
|
-
*
|
|
670
|
-
* If a `toolkit` is specified, the large language model will additionally
|
|
671
|
-
* be able to perform tool calls to augment its response.
|
|
672
|
-
*
|
|
673
|
-
* @since 1.0.0
|
|
674
|
-
* @category Functions
|
|
675
|
-
*/
|
|
676
|
-
export const streamText = <
|
|
677
|
-
Tools extends AiTool.Any,
|
|
678
|
-
Options extends NoExcessProperties<GenerateTextOptions<any>, Options>
|
|
679
|
-
>(
|
|
680
|
-
options: Options & GenerateTextOptions<Tools>
|
|
681
|
-
): Stream.Stream<
|
|
682
|
-
ExtractSuccess<Options>,
|
|
683
|
-
ExtractError<Options>,
|
|
684
|
-
AiLanguageModel | ExtractContext<Options>
|
|
685
|
-
> => Stream.unwrap(AiLanguageModel.pipe(Effect.map((_) => _.streamText(options))))
|