@effect/ai 0.14.1 → 0.16.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (134) hide show
  1. package/AiEmbeddingModel/package.json +6 -0
  2. package/AiLanguageModel/package.json +6 -0
  3. package/AiTool/package.json +6 -0
  4. package/dist/cjs/AiChat.js +65 -86
  5. package/dist/cjs/AiChat.js.map +1 -1
  6. package/dist/cjs/{Embeddings.js → AiEmbeddingModel.js} +12 -12
  7. package/dist/cjs/AiEmbeddingModel.js.map +1 -0
  8. package/dist/cjs/AiError.js +8 -1
  9. package/dist/cjs/AiError.js.map +1 -1
  10. package/dist/cjs/AiInput.js +335 -248
  11. package/dist/cjs/AiInput.js.map +1 -1
  12. package/dist/cjs/AiLanguageModel.js +311 -0
  13. package/dist/cjs/AiLanguageModel.js.map +1 -0
  14. package/dist/cjs/AiModel.js +11 -5
  15. package/dist/cjs/AiModel.js.map +1 -1
  16. package/dist/cjs/AiPlan.js +10 -3
  17. package/dist/cjs/AiPlan.js.map +1 -1
  18. package/dist/cjs/AiResponse.js +481 -165
  19. package/dist/cjs/AiResponse.js.map +1 -1
  20. package/dist/cjs/AiTelemetry.js +10 -3
  21. package/dist/cjs/AiTelemetry.js.map +1 -1
  22. package/dist/cjs/AiTool.js +93 -0
  23. package/dist/cjs/AiTool.js.map +1 -0
  24. package/dist/cjs/AiToolkit.js +121 -98
  25. package/dist/cjs/AiToolkit.js.map +1 -1
  26. package/dist/cjs/Tokenizer.js +14 -16
  27. package/dist/cjs/Tokenizer.js.map +1 -1
  28. package/dist/cjs/index.js +7 -9
  29. package/dist/cjs/internal/aiPlan.js +6 -9
  30. package/dist/cjs/internal/aiPlan.js.map +1 -1
  31. package/dist/cjs/internal/common.js +22 -0
  32. package/dist/cjs/internal/common.js.map +1 -0
  33. package/dist/dts/AiChat.d.ts +58 -44
  34. package/dist/dts/AiChat.d.ts.map +1 -1
  35. package/dist/dts/{Embeddings.d.ts → AiEmbeddingModel.d.ts} +13 -14
  36. package/dist/dts/AiEmbeddingModel.d.ts.map +1 -0
  37. package/dist/dts/AiError.d.ts +4 -3
  38. package/dist/dts/AiError.d.ts.map +1 -1
  39. package/dist/dts/AiInput.d.ts +441 -146
  40. package/dist/dts/AiInput.d.ts.map +1 -1
  41. package/dist/dts/AiLanguageModel.d.ts +263 -0
  42. package/dist/dts/AiLanguageModel.d.ts.map +1 -0
  43. package/dist/dts/AiModel.d.ts +21 -20
  44. package/dist/dts/AiModel.d.ts.map +1 -1
  45. package/dist/dts/AiPlan.d.ts +90 -26
  46. package/dist/dts/AiPlan.d.ts.map +1 -1
  47. package/dist/dts/AiResponse.d.ts +711 -100
  48. package/dist/dts/AiResponse.d.ts.map +1 -1
  49. package/dist/dts/AiTelemetry.d.ts +175 -157
  50. package/dist/dts/AiTelemetry.d.ts.map +1 -1
  51. package/dist/dts/AiTool.d.ts +288 -0
  52. package/dist/dts/AiTool.d.ts.map +1 -0
  53. package/dist/dts/AiToolkit.d.ts +50 -111
  54. package/dist/dts/AiToolkit.d.ts.map +1 -1
  55. package/dist/dts/Tokenizer.d.ts +8 -6
  56. package/dist/dts/Tokenizer.d.ts.map +1 -1
  57. package/dist/dts/index.d.ts +8 -12
  58. package/dist/dts/index.d.ts.map +1 -1
  59. package/dist/dts/internal/common.d.ts +2 -0
  60. package/dist/dts/internal/common.d.ts.map +1 -0
  61. package/dist/esm/AiChat.js +62 -83
  62. package/dist/esm/AiChat.js.map +1 -1
  63. package/dist/esm/{Embeddings.js → AiEmbeddingModel.js} +10 -10
  64. package/dist/esm/AiEmbeddingModel.js.map +1 -0
  65. package/dist/esm/AiError.js +8 -1
  66. package/dist/esm/AiError.js.map +1 -1
  67. package/dist/esm/AiInput.js +316 -238
  68. package/dist/esm/AiInput.js.map +1 -1
  69. package/dist/esm/AiLanguageModel.js +300 -0
  70. package/dist/esm/AiLanguageModel.js.map +1 -0
  71. package/dist/esm/AiModel.js +11 -5
  72. package/dist/esm/AiModel.js.map +1 -1
  73. package/dist/esm/AiPlan.js +8 -2
  74. package/dist/esm/AiPlan.js.map +1 -1
  75. package/dist/esm/AiResponse.js +467 -162
  76. package/dist/esm/AiResponse.js.map +1 -1
  77. package/dist/esm/AiTelemetry.js +8 -2
  78. package/dist/esm/AiTelemetry.js.map +1 -1
  79. package/dist/esm/AiTool.js +82 -0
  80. package/dist/esm/AiTool.js.map +1 -0
  81. package/dist/esm/AiToolkit.js +118 -96
  82. package/dist/esm/AiToolkit.js.map +1 -1
  83. package/dist/esm/Tokenizer.js +14 -16
  84. package/dist/esm/Tokenizer.js.map +1 -1
  85. package/dist/esm/index.js +8 -12
  86. package/dist/esm/index.js.map +1 -1
  87. package/dist/esm/internal/aiPlan.js +4 -7
  88. package/dist/esm/internal/aiPlan.js.map +1 -1
  89. package/dist/esm/internal/common.js +14 -0
  90. package/dist/esm/internal/common.js.map +1 -0
  91. package/package.json +28 -36
  92. package/src/AiChat.ts +182 -207
  93. package/src/{Embeddings.ts → AiEmbeddingModel.ts} +19 -18
  94. package/src/AiError.ts +8 -1
  95. package/src/AiInput.ts +434 -313
  96. package/src/AiLanguageModel.ts +569 -0
  97. package/src/AiModel.ts +47 -29
  98. package/src/AiPlan.ts +102 -30
  99. package/src/AiResponse.ts +743 -187
  100. package/src/AiTelemetry.ts +214 -197
  101. package/src/AiTool.ts +496 -0
  102. package/src/AiToolkit.ts +200 -240
  103. package/src/Tokenizer.ts +18 -22
  104. package/src/index.ts +9 -14
  105. package/src/internal/aiPlan.ts +12 -14
  106. package/src/internal/common.ts +12 -0
  107. package/AiModels/package.json +0 -6
  108. package/AiRole/package.json +0 -6
  109. package/Completions/package.json +0 -6
  110. package/Embeddings/package.json +0 -6
  111. package/dist/cjs/AiModels.js +0 -54
  112. package/dist/cjs/AiModels.js.map +0 -1
  113. package/dist/cjs/AiRole.js +0 -106
  114. package/dist/cjs/AiRole.js.map +0 -1
  115. package/dist/cjs/Completions.js +0 -256
  116. package/dist/cjs/Completions.js.map +0 -1
  117. package/dist/cjs/Embeddings.js.map +0 -1
  118. package/dist/dts/AiModels.d.ts +0 -34
  119. package/dist/dts/AiModels.d.ts.map +0 -1
  120. package/dist/dts/AiRole.d.ts +0 -111
  121. package/dist/dts/AiRole.d.ts.map +0 -1
  122. package/dist/dts/Completions.d.ts +0 -128
  123. package/dist/dts/Completions.d.ts.map +0 -1
  124. package/dist/dts/Embeddings.d.ts.map +0 -1
  125. package/dist/esm/AiModels.js +0 -44
  126. package/dist/esm/AiModels.js.map +0 -1
  127. package/dist/esm/AiRole.js +0 -93
  128. package/dist/esm/AiRole.js.map +0 -1
  129. package/dist/esm/Completions.js +0 -245
  130. package/dist/esm/Completions.js.map +0 -1
  131. package/dist/esm/Embeddings.js.map +0 -1
  132. package/src/AiModels.ts +0 -77
  133. package/src/AiRole.ts +0 -122
  134. package/src/Completions.ts +0 -434
@@ -0,0 +1,569 @@
1
+ /**
2
+ * @since 1.0.0
3
+ */
4
+ import * as Context from "effect/Context"
5
+ import * as Effect from "effect/Effect"
6
+ import * as JsonSchema from "effect/JSONSchema"
7
+ import * as Option from "effect/Option"
8
+ import * as Predicate from "effect/Predicate"
9
+ import * as Schema from "effect/Schema"
10
+ import * as AST from "effect/SchemaAST"
11
+ import * as Stream from "effect/Stream"
12
+ import type { Span } from "effect/Tracer"
13
+ import type { Concurrency, Mutable, NoExcessProperties } from "effect/Types"
14
+ import { AiError } from "./AiError.js"
15
+ import * as AiInput from "./AiInput.js"
16
+ import * as AiResponse from "./AiResponse.js"
17
+ import { CurrentSpanTransformer } from "./AiTelemetry.js"
18
+ import type * as AiTool from "./AiTool.js"
19
+ import type * as AiToolkit from "./AiToolkit.js"
20
+
21
+ const constDisableValidation = { disableValidation: true }
22
+
23
+ /**
24
+ * @since 1.0.0
25
+ * @category Context
26
+ */
27
+ export class AiLanguageModel extends Context.Tag("@effect/ai/AiLanguageModel")<
28
+ AiLanguageModel,
29
+ AiLanguageModel.Service<never>
30
+ >() {}
31
+
32
+ /**
33
+ * @since 1.0.0
34
+ * @category Models
35
+ */
36
+ export type StructuredSchema<A, I, R> = TaggedSchema<A, I, R> | IdentifiedSchema<A, I, R>
37
+
38
+ /**
39
+ * @since 1.0.0
40
+ * @category Models
41
+ */
42
+ export interface TaggedSchema<A, I, R> extends Schema.Schema<A, I, R> {
43
+ readonly _tag: string
44
+ }
45
+
46
+ /**
47
+ * @since 1.0.0
48
+ * @category Models
49
+ */
50
+ export interface IdentifiedSchema<A, I, R> extends Schema.Schema<A, I, R> {
51
+ readonly identifier: string
52
+ }
53
+
54
+ /**
55
+ * The tool choice mode for the language model.
56
+ *
57
+ * - `auto` (default): The model can decide whether or not to call tools, as well as which tools to call.
58
+ * - `required`: The model **must** call a tool but can decide which tool will be called.
59
+ * - `none`: The model **must not** call a tool.
60
+ * - `{ tool: <tool_name> }`: The model must call the specified tool.
61
+ *
62
+ * @since 1.0.0
63
+ * @category Models
64
+ */
65
+ export type ToolChoice<Tool extends AiTool.Any> = "auto" | "none" | "required" | {
66
+ readonly tool: Tool["name"]
67
+ }
68
+
69
+ /**
70
+ * Options for generating text using a large language model.
71
+ *
72
+ * @since 1.0.0
73
+ * @category Models
74
+ */
75
+ export interface GenerateTextOptions<Tools extends AiTool.Any> {
76
+ /**
77
+ * The prompt input to use to generate text.
78
+ */
79
+ readonly prompt: AiInput.Raw
80
+
81
+ /**
82
+ * An optional system message that will be part of the prompt.
83
+ */
84
+ readonly system?: string | undefined
85
+
86
+ /**
87
+ * A toolkit containing both the tools and the tool call handler to use to
88
+ * augment text generation.
89
+ */
90
+ readonly toolkit?: AiToolkit.ToHandler<Tools> | Effect.Effect<AiToolkit.ToHandler<Tools>, any, any>
91
+
92
+ /**
93
+ * The tool choice mode for the language model.
94
+ *
95
+ * - `auto` (default): The model can decide whether or not to call tools, as well as which tools to call.
96
+ * - `required`: The model **must** call a tool but can decide which tool will be called.
97
+ * - `none`: The model **must not** call a tool.
98
+ * - `{ tool: <tool_name> }`: The model must call the specified tool.
99
+ */
100
+ readonly toolChoice?: ToolChoice<Tools>
101
+
102
+ /**
103
+ * The concurrency level for resolving tool calls.
104
+ */
105
+ readonly concurrency?: Concurrency | undefined
106
+ }
107
+
108
+ /**
109
+ * Options for generating a structured object using a large language model.
110
+ *
111
+ * @since 1.0.0
112
+ * @category Models
113
+ */
114
+ export interface GenerateObjectOptions<A, I, R> {
115
+ /**
116
+ * The prompt input to use to generate text.
117
+ */
118
+ readonly prompt: AiInput.Raw
119
+
120
+ /**
121
+ * An optional system message that will be part of the prompt.
122
+ */
123
+ readonly system?: string | undefined
124
+
125
+ /**
126
+ * The schema to be used to specify the structure of the object to generate.
127
+ */
128
+ readonly schema: Schema.Schema<A, I, R>
129
+
130
+ /**
131
+ * The identifier to use to associating the underlying tool call with the
132
+ * generated output.
133
+ */
134
+ readonly toolCallId?: string | undefined
135
+ }
136
+
137
+ /**
138
+ * A utility type to extract the success type for the text generation methods
139
+ * of `AiLanguageModel` from the provided options.
140
+ *
141
+ * @since 1.0.0
142
+ * @category Utility Types
143
+ */
144
+ export type ExtractSuccess<Options> = Options extends {
145
+ toolkit: AiToolkit.ToHandler<infer _Tools>
146
+ } ? AiResponse.WithToolCallResults<_Tools>
147
+ : Options extends {
148
+ toolkit: Effect.Effect<AiToolkit.ToHandler<infer _Tools>, infer _E, infer _R>
149
+ } ? AiResponse.WithToolCallResults<_Tools>
150
+ : AiResponse.AiResponse
151
+
152
+ /**
153
+ * A utility type to extract the error type for the text generation methods
154
+ * of `AiLanguageModel` from the provided options.
155
+ *
156
+ * @since 1.0.0
157
+ * @category Utility Types
158
+ */
159
+ export type ExtractError<Options> = Options extends {
160
+ toolkit: AiToolkit.ToHandler<infer _Tools>
161
+ } ? AiError | AiTool.Failure<_Tools>
162
+ : Options extends {
163
+ toolkit: Effect.Effect<AiToolkit.ToHandler<infer _Tools>, infer _E, infer _R>
164
+ } ? AiError | AiTool.Failure<_Tools> | _E
165
+ : AiError
166
+
167
+ /**
168
+ * A utility type to extract the context type for the text generation methods
169
+ * of `AiLanguageModel` from the provided options.
170
+ *
171
+ * @since 1.0.0
172
+ * @category Utility Types
173
+ */
174
+ export type ExtractContext<Options> = Options extends {
175
+ toolkit: AiToolkit.ToHandler<infer _Tools>
176
+ } ? AiTool.Context<_Tools>
177
+ : Options extends {
178
+ toolkit: Effect.Effect<AiToolkit.ToHandler<infer _Tools>, infer _E, infer _R>
179
+ } ? AiTool.Context<_Tools> | _R
180
+ : never
181
+
182
+ /**
183
+ * @since 1.0.0
184
+ * @category Models
185
+ */
186
+ export declare namespace AiLanguageModel {
187
+ /**
188
+ * @since 1.0.0
189
+ * @category Models
190
+ */
191
+ export interface Service<Config> {
192
+ /**
193
+ * Generate text using a large language model for the specified `prompt`.
194
+ *
195
+ * If a `toolkit` is specified, the large language model will additionally
196
+ * be able to perform tool calls to augment its response.
197
+ */
198
+ readonly generateText: <Tools extends AiTool.Any, Options>(
199
+ options: Options & GenerateTextOptions<Tools>
200
+ ) => Effect.Effect<
201
+ ExtractSuccess<Options>,
202
+ ExtractError<Options>,
203
+ ExtractContext<Options> | Config
204
+ >
205
+ /**
206
+ * Generate text using a large language model for the specified `prompt`,
207
+ * streaming output from the model as soon as it is available.
208
+ *
209
+ * If a `toolkit` is specified, the large language model will additionally
210
+ * be able to perform tool calls to augment its response.
211
+ */
212
+ readonly streamText: <Tools extends AiTool.Any, Options>(
213
+ options: Options & GenerateTextOptions<Tools>
214
+ ) => Stream.Stream<
215
+ ExtractSuccess<Options>,
216
+ ExtractError<Options>,
217
+ ExtractContext<Options> | Config
218
+ >
219
+
220
+ /**
221
+ * Generate a structured object for the specified prompt and schema using a
222
+ * large language model.
223
+ */
224
+ readonly generateObject: <A, I, R>(
225
+ options: GenerateObjectOptions<A, I, R>
226
+ ) => Effect.Effect<AiResponse.WithStructuredOutput<A>, AiError, R | Config>
227
+ }
228
+ }
229
+
230
+ const constEmptyMap = new Map<never, never>()
231
+
232
+ /**
233
+ * @since 1.0.0
234
+ * @category Models
235
+ */
236
+ export interface AiLanguageModelOptions {
237
+ /**
238
+ * The prompt messages to use to generate text.
239
+ */
240
+ readonly prompt: AiInput.AiInput
241
+ /**
242
+ * An optional system message that will be part of the prompt.
243
+ */
244
+ readonly system: Option.Option<string>
245
+ /**
246
+ * The tools to use to generate text in an encoded format suitable for
247
+ * incorporation into requests to the large language model.
248
+ */
249
+ readonly tools: Array<{
250
+ readonly name: string
251
+ readonly description: string
252
+ readonly parameters: JsonSchema.JsonSchema7
253
+ readonly structured: boolean
254
+ }>
255
+ /**
256
+ * The tool choice mode for the language model.
257
+ *
258
+ * - `auto` (default): The model can decide whether or not to call tools, as well as which tools to call.
259
+ * - `required`: The model **must** call a tool but can decide which tool will be called.
260
+ * - `none`: The model **must not** call a tool.
261
+ * - `{ tool: <tool_name> }`: The model must call the specified tool.
262
+ */
263
+ readonly toolChoice: ToolChoice<any>
264
+ /**
265
+ * The span to use to trace interactions with the large language model.
266
+ */
267
+ readonly span: Span
268
+ }
269
+
270
+ /**
271
+ * @since 1.0.0
272
+ * @category Constructors
273
+ */
274
+ export const make: <Config>(
275
+ opts: {
276
+ readonly generateText: (options: AiLanguageModelOptions) => Effect.Effect<AiResponse.AiResponse, AiError, Config>
277
+ readonly streamText: (options: AiLanguageModelOptions) => Stream.Stream<AiResponse.AiResponse, AiError, Config>
278
+ }
279
+ ) => Effect.Effect<
280
+ AiLanguageModel.Service<Config>
281
+ > = Effect.fnUntraced(function*<Config>(opts: {
282
+ readonly generateText: (options: AiLanguageModelOptions) => Effect.Effect<AiResponse.AiResponse, AiError, Config>
283
+ readonly streamText: (options: AiLanguageModelOptions) => Stream.Stream<AiResponse.AiResponse, AiError, Config>
284
+ }) {
285
+ const parentSpanTransformer = yield* Effect.serviceOption(CurrentSpanTransformer)
286
+ const getSpanTransformer = Effect.serviceOption(CurrentSpanTransformer).pipe(
287
+ Effect.map(Option.orElse(() => parentSpanTransformer))
288
+ )
289
+
290
+ const generateText = <
291
+ Options extends NoExcessProperties<GenerateTextOptions<any>, Options>
292
+ >({ concurrency, toolChoice = "auto", toolkit, ...options }: Options): Effect.Effect<
293
+ ExtractSuccess<Options>,
294
+ ExtractError<Options>,
295
+ ExtractContext<Options> | Config
296
+ > =>
297
+ Effect.useSpan(
298
+ "AiLanguageModel.generateText",
299
+ { captureStackTrace: false, attributes: { concurrency, toolChoice } },
300
+ Effect.fnUntraced(function*(span) {
301
+ const prompt = AiInput.make(options.prompt)
302
+ const system = Option.fromNullable(options.system)
303
+ const spanTransformer = yield* getSpanTransformer
304
+ const modelOptions: Mutable<AiLanguageModelOptions> = { prompt, system, tools: [], toolChoice: "none", span }
305
+ if (Predicate.isUndefined(toolkit)) {
306
+ const response = yield* opts.generateText(modelOptions)
307
+ if (Option.isSome(spanTransformer)) {
308
+ spanTransformer.value({ ...modelOptions, response })
309
+ }
310
+ return response
311
+ }
312
+ modelOptions.toolChoice = toolChoice
313
+ const actualToolkit = Effect.isEffect(toolkit) ? yield* toolkit : toolkit
314
+ for (const tool of actualToolkit.tools) {
315
+ modelOptions.tools.push(convertTool(tool))
316
+ }
317
+ const response = yield* opts.generateText(modelOptions)
318
+ if (Option.isSome(spanTransformer)) {
319
+ spanTransformer.value({ ...modelOptions, response })
320
+ }
321
+ return yield* resolveParts({ response, toolkit: actualToolkit, concurrency, method: "generateText" })
322
+ }, (effect, span) => Effect.withParentSpan(effect, span))
323
+ ) as any
324
+
325
+ const streamText = Effect.fnUntraced(
326
+ function*<
327
+ Options extends NoExcessProperties<GenerateTextOptions<any>, Options>
328
+ >({ concurrency, toolChoice = "auto", toolkit, ...options }: Options) {
329
+ const span = yield* Effect.makeSpanScoped("AiLanguageModel.streamText", {
330
+ captureStackTrace: false,
331
+ attributes: { concurrency, toolChoice }
332
+ })
333
+ const prompt = AiInput.make(options.prompt)
334
+ const system = Option.fromNullable(options.system)
335
+ const modelOptions: Mutable<AiLanguageModelOptions> = { prompt, system, tools: [], toolChoice: "none", span }
336
+ if (Predicate.isUndefined(toolkit)) {
337
+ return [opts.streamText(modelOptions), modelOptions] as const
338
+ }
339
+ modelOptions.toolChoice = toolChoice
340
+ const actualToolkit = Effect.isEffect(toolkit)
341
+ ? yield* (toolkit as Effect.Effect<AiToolkit.ToHandler<any>>)
342
+ : toolkit
343
+ for (const tool of actualToolkit.tools) {
344
+ modelOptions.tools.push(convertTool(tool))
345
+ }
346
+ return [
347
+ opts.streamText(modelOptions).pipe(
348
+ Stream.mapEffect(
349
+ (response) => resolveParts({ response, toolkit: actualToolkit, concurrency, method: "streamText" }),
350
+ { concurrency: "unbounded" }
351
+ )
352
+ ) as Stream.Stream<AiResponse.AiResponse, AiError, Config>,
353
+ modelOptions
354
+ ] as const
355
+ },
356
+ Effect.flatMap(Effect.fnUntraced(function*([stream, options]) {
357
+ const spanTransformer = yield* getSpanTransformer
358
+ if (Option.isNone(spanTransformer)) {
359
+ return stream
360
+ }
361
+ let lastResponse: AiResponse.AiResponse | undefined
362
+ return stream.pipe(
363
+ Stream.map((response) => {
364
+ lastResponse = response
365
+ return response
366
+ }),
367
+ Stream.ensuring(Effect.sync(() => {
368
+ spanTransformer.value({ ...options, response: lastResponse! })
369
+ }))
370
+ )
371
+ })),
372
+ Stream.unwrapScoped
373
+ )
374
+
375
+ const generateObject = <A, I, R>(
376
+ options: GenerateObjectOptions<A, I, R>
377
+ ): Effect.Effect<AiResponse.WithStructuredOutput<A>, AiError, R | Config> => {
378
+ const toolCallId: string = options.toolCallId
379
+ ? options.toolCallId
380
+ : "_tag" in options.schema
381
+ ? options.schema._tag as string
382
+ : "identifier" in options.schema
383
+ ? options.schema.identifier as string
384
+ : "generateObject"
385
+ return Effect.useSpan(
386
+ "AiLanguageModel.generateObject",
387
+ {
388
+ captureStackTrace: false,
389
+ attributes: { toolCallId }
390
+ },
391
+ Effect.fnUntraced(function*(span) {
392
+ const prompt = AiInput.make(options.prompt)
393
+ const system = Option.fromNullable(options.system)
394
+ const spanTransformer = yield* getSpanTransformer
395
+ const decode = Schema.decodeUnknown(options.schema)
396
+ const tool = convertStructured(toolCallId, options.schema)
397
+ const toolChoice = { tool: tool.name } as const
398
+ const modelOptions: AiLanguageModelOptions = { prompt, system, tools: [tool], toolChoice, span }
399
+ const response = yield* opts.generateText(modelOptions)
400
+ if (Option.isSome(spanTransformer)) {
401
+ spanTransformer.value({ ...modelOptions, response })
402
+ }
403
+ const toolCallPart = response.parts.find((part): part is AiResponse.ToolCallPart =>
404
+ part._tag === "ToolCallPart" && part.name === toolCallId
405
+ )
406
+ if (Predicate.isUndefined(toolCallPart)) {
407
+ return yield* new AiError({
408
+ module: "AiLanguageModel",
409
+ method: "generateObject",
410
+ description: `Tool call '${toolCallId}' not found in model response`
411
+ })
412
+ }
413
+ return yield* Effect.matchEffect(decode(toolCallPart.params), {
414
+ onFailure: (cause) =>
415
+ new AiError({
416
+ module: "AiLanguageModel",
417
+ method: "generateObject",
418
+ description: `Failed to decode tool call '${toolCallId}' parameters`,
419
+ cause
420
+ }),
421
+ onSuccess: (output) =>
422
+ Effect.succeed(
423
+ new AiResponse.WithStructuredOutput({
424
+ parts: response.parts,
425
+ id: toolCallPart.id,
426
+ name: toolCallPart.name,
427
+ value: output
428
+ }, constDisableValidation)
429
+ )
430
+ })
431
+ }, (effect, span) => Effect.withParentSpan(effect, span))
432
+ )
433
+ }
434
+
435
+ return AiLanguageModel.of({ generateText, streamText, generateObject } as any)
436
+ })
437
+
438
+ const convertTool = <Tool extends AiTool.Any>(tool: Tool) => ({
439
+ name: tool.name,
440
+ description: tool.description ?? getDescription(tool.parametersSchema.ast),
441
+ parameters: makeJsonSchema(tool.parametersSchema.ast),
442
+ structured: false
443
+ })
444
+
445
+ const convertStructured = <A, I, R>(name: string, schema: Schema.Schema<A, I, R>) => ({
446
+ name,
447
+ description: getDescription(schema.ast),
448
+ parameters: makeJsonSchema(schema.ast),
449
+ structured: true
450
+ })
451
+
452
+ const makeJsonSchema = (ast: AST.AST): JsonSchema.JsonSchema7 => {
453
+ const $defs = {}
454
+ const schema = JsonSchema.fromAST(ast, {
455
+ definitions: $defs,
456
+ topLevelReferenceStrategy: "skip"
457
+ })
458
+ if (Object.keys($defs).length === 0) return schema
459
+ ;(schema as any).$defs = $defs
460
+ return schema
461
+ }
462
+
463
+ const getDescription = (ast: AST.AST): string => {
464
+ const annotations = ast._tag === "Transformation" ?
465
+ {
466
+ ...ast.to.annotations,
467
+ ...ast.annotations
468
+ } :
469
+ ast.annotations
470
+ return AST.DescriptionAnnotationId in annotations ? annotations[AST.DescriptionAnnotationId] as string : ""
471
+ }
472
+
473
+ const resolveParts = <Tools extends AiTool.Any>(options: {
474
+ readonly response: AiResponse.AiResponse
475
+ readonly toolkit: AiToolkit.ToHandler<Tools>
476
+ readonly concurrency: Concurrency | undefined
477
+ readonly method: string
478
+ }) =>
479
+ Effect.gen(function*() {
480
+ const toolNames: Array<string> = []
481
+ const toolParts = options.response.parts.filter(
482
+ (part): part is AiResponse.ToolCallPart => {
483
+ if (part._tag === "ToolCallPart") {
484
+ toolNames.push(part.name)
485
+ return true
486
+ }
487
+ return false
488
+ }
489
+ )
490
+ if (toolParts.length === 0) {
491
+ return new AiResponse.WithToolCallResults({
492
+ parts: options.response.parts,
493
+ results: constEmptyMap,
494
+ encodedResults: constEmptyMap
495
+ }, constDisableValidation)
496
+ }
497
+ yield* Effect.annotateCurrentSpan("toolCalls", toolNames)
498
+ const results = new Map<AiResponse.ToolCallId, AiTool.Success<Tools>>()
499
+ const encodedResults = new Map<AiResponse.ToolCallId, unknown>()
500
+ yield* Effect.forEach(toolParts, (part) => {
501
+ const id = part.id as AiResponse.ToolCallId
502
+ const name = part.name as AiTool.Name<Tools>
503
+ const params = part.params as AiTool.Parameters<Tools>
504
+ const toolCall = options.toolkit.handle(name, params)
505
+ return Effect.map(toolCall, ({ encodedResult, result }) => {
506
+ results.set(id, result)
507
+ encodedResults.set(id, encodedResult)
508
+ })
509
+ }, { concurrency: options.concurrency, discard: true })
510
+ return new AiResponse.WithToolCallResults({
511
+ parts: options.response.parts,
512
+ results,
513
+ encodedResults
514
+ }, constDisableValidation)
515
+ })
516
+
517
+ /**
518
+ * Generate text using a large language model for the specified `prompt`.
519
+ *
520
+ * If a `toolkit` is specified, the large language model will additionally
521
+ * be able to perform tool calls to augment its response.
522
+ *
523
+ * @since 1.0.0
524
+ * @category Functions
525
+ */
526
+ export const generateText: <Tools extends AiTool.Any, Options>(
527
+ options: Options & GenerateTextOptions<Tools>
528
+ ) => Effect.Effect<
529
+ ExtractSuccess<Options>,
530
+ ExtractError<Options>,
531
+ AiLanguageModel | ExtractContext<Options>
532
+ > = Effect.serviceFunctionEffect(AiLanguageModel, (_) => _.generateText)
533
+
534
+ /**
535
+ * Generate a structured object for the specified prompt and schema using a
536
+ * large language model.
537
+ *
538
+ * When using a `Schema` that does not have an `identifier` or `_tag`
539
+ * property, you must specify a `toolCallId` to properly associate the
540
+ * output of the model.
541
+ *
542
+ * @since 1.0.0
543
+ * @category Functions
544
+ */
545
+ export const generateObject: <A, I, R>(
546
+ options: GenerateObjectOptions<A, I, R>
547
+ ) => Effect.Effect<
548
+ AiResponse.WithStructuredOutput<A>,
549
+ AiError,
550
+ AiLanguageModel | R
551
+ > = Effect.serviceFunctionEffect(AiLanguageModel, (_) => _.generateObject)
552
+
553
+ /**
554
+ * Generate text using a large language model for the specified `prompt`,
555
+ * streaming output from the model as soon as it is available.
556
+ *
557
+ * If a `toolkit` is specified, the large language model will additionally
558
+ * be able to perform tool calls to augment its response.
559
+ *
560
+ * @since 1.0.0
561
+ * @category Functions
562
+ */
563
+ export const streamText = <Tools extends AiTool.Any, Options>(
564
+ options: Options & GenerateTextOptions<Tools>
565
+ ): Stream.Stream<
566
+ ExtractSuccess<Options>,
567
+ ExtractError<Options>,
568
+ AiLanguageModel | ExtractContext<Options>
569
+ > => Stream.unwrap(AiLanguageModel.pipe(Effect.map((_) => _.streamText(options))))
package/src/AiModel.ts CHANGED
@@ -2,8 +2,10 @@
2
2
  * @since 1.0.0
3
3
  */
4
4
  import type * as Context from "effect/Context"
5
- import type * as Effect from "effect/Effect"
5
+ import * as Effect from "effect/Effect"
6
+ import * as GlobalValue from "effect/GlobalValue"
6
7
  import * as Option from "effect/Option"
8
+ import * as Predicate from "effect/Predicate"
7
9
  import type * as Scope from "effect/Scope"
8
10
  import type * as AiPlan from "./AiPlan.js"
9
11
  import * as InternalAiPlan from "./internal/aiPlan.js"
@@ -38,50 +40,66 @@ export type PlanTypeId = typeof TypeId
38
40
  */
39
41
  export interface AiModel<in out Provides, in out Requires> extends AiPlan.AiPlan<unknown, Provides, Requires> {
40
42
  readonly [TypeId]: TypeId
41
- readonly model: string
42
- readonly cacheKey: symbol
43
- readonly requires: Context.Tag<Requires, any>
44
- readonly provides: AiModel.ContextBuilder<Provides, Requires>
45
- readonly updateContext: (context: Context.Context<Provides>) => Context.Context<Provides>
43
+ readonly buildContext: ContextBuilder<Provides, Requires>
46
44
  }
47
45
 
48
46
  /**
49
47
  * @since 1.0.0
48
+ * @category AiModel
50
49
  */
51
- export declare namespace AiModel {
52
- /**
53
- * @since 1.0.0
54
- * @category AiModel
55
- */
56
- export type ContextBuilder<Provides, Requires> = Effect.Effect<
57
- Context.Context<Provides>,
58
- never,
59
- Requires | Scope.Scope
60
- >
61
- }
50
+ export type ContextBuilder<Provides, Requires> = Effect.Effect<
51
+ Context.Context<Provides>,
52
+ never,
53
+ Requires | Scope.Scope
54
+ >
62
55
 
63
56
  const AiModelProto = {
64
57
  ...InternalAiPlan.PlanPrototype,
65
58
  [TypeId]: TypeId
66
59
  }
67
60
 
61
+ const contextCache = GlobalValue.globalValue(
62
+ "@effect/ai/AiModel/CachedContexts",
63
+ () => new Map<string, any>()
64
+ )
65
+
68
66
  /**
69
67
  * @since 1.0.0
70
68
  * @category constructors
71
69
  */
72
- export const make = <Provides, Requires>(options: {
73
- readonly model: string
74
- readonly cacheKey: symbol
75
- readonly requires: Context.Tag<Requires, any>
76
- readonly provides: AiModel.ContextBuilder<Provides, Requires>
77
- readonly updateContext: (context: Context.Context<Provides>) => Context.Context<Provides>
78
- }): AiModel<Provides, Requires> => {
70
+ export const make = <Cached, PerRequest, CachedRequires, PerRequestRequires>(options: {
71
+ /**
72
+ * A unique key used to cache the `Context` built from the `cachedContext`
73
+ * effect.
74
+ */
75
+ readonly cacheKey: string
76
+ /**
77
+ * An effect used to build a `Context` that will be cached after creation
78
+ * and used for all provider requests.
79
+ */
80
+ readonly cachedContext: Effect.Effect<
81
+ Context.Context<Cached>,
82
+ never,
83
+ CachedRequires | Scope.Scope
84
+ >
85
+ /**
86
+ * A method that can be used to update the `Context` on a per-request basis
87
+ * for all provider requests.
88
+ */
89
+ readonly updateRequestContext: (context: Context.Context<Cached>) => Effect.Effect<
90
+ Context.Context<PerRequest>,
91
+ never,
92
+ PerRequestRequires
93
+ >
94
+ }): AiModel<Cached | PerRequest, CachedRequires | PerRequestRequires> => {
79
95
  const self = Object.create(AiModelProto)
80
- self.cacheKey = options.cacheKey
81
- self.model = options.model
82
- self.provides = options.provides
83
- self.requires = options.requires
84
- self.updateContext = options.updateContext
96
+ self.buildContext = Effect.gen(function*() {
97
+ let context = contextCache.get(options.cacheKey)
98
+ if (Predicate.isUndefined(context)) {
99
+ context = yield* options.cachedContext
100
+ }
101
+ return yield* options.updateRequestContext(context)
102
+ })
85
103
  self.steps = [{
86
104
  model: self,
87
105
  check: Option.none(),