@effect/ai 0.26.0 → 0.27.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/Chat/package.json +6 -0
- package/EmbeddingModel/package.json +6 -0
- package/IdGenerator/package.json +6 -0
- package/LanguageModel/package.json +6 -0
- package/Model/package.json +6 -0
- package/Prompt/package.json +6 -0
- package/Response/package.json +6 -0
- package/Telemetry/package.json +6 -0
- package/Tool/package.json +6 -0
- package/Toolkit/package.json +6 -0
- package/dist/cjs/AiError.js +575 -11
- package/dist/cjs/AiError.js.map +1 -1
- package/dist/cjs/Chat.js +302 -0
- package/dist/cjs/Chat.js.map +1 -0
- package/dist/cjs/EmbeddingModel.js +184 -0
- package/dist/cjs/EmbeddingModel.js.map +1 -0
- package/dist/cjs/IdGenerator.js +255 -0
- package/dist/cjs/IdGenerator.js.map +1 -0
- package/dist/cjs/LanguageModel.js +584 -0
- package/dist/cjs/LanguageModel.js.map +1 -0
- package/dist/cjs/McpServer.js +12 -4
- package/dist/cjs/McpServer.js.map +1 -1
- package/dist/cjs/Model.js +118 -0
- package/dist/cjs/Model.js.map +1 -0
- package/dist/cjs/Prompt.js +649 -0
- package/dist/cjs/Prompt.js.map +1 -0
- package/dist/cjs/Response.js +635 -0
- package/dist/cjs/Response.js.map +1 -0
- package/dist/cjs/Telemetry.js +176 -0
- package/dist/cjs/Telemetry.js.map +1 -0
- package/dist/cjs/Tokenizer.js +87 -8
- package/dist/cjs/Tokenizer.js.map +1 -1
- package/dist/cjs/Tool.js +556 -0
- package/dist/cjs/Tool.js.map +1 -0
- package/dist/cjs/Toolkit.js +279 -0
- package/dist/cjs/Toolkit.js.map +1 -0
- package/dist/cjs/index.js +21 -19
- package/dist/dts/AiError.d.ts +577 -9
- package/dist/dts/AiError.d.ts.map +1 -1
- package/dist/dts/Chat.d.ts +356 -0
- package/dist/dts/Chat.d.ts.map +1 -0
- package/dist/dts/EmbeddingModel.d.ts +153 -0
- package/dist/dts/EmbeddingModel.d.ts.map +1 -0
- package/dist/dts/IdGenerator.d.ts +272 -0
- package/dist/dts/IdGenerator.d.ts.map +1 -0
- package/dist/dts/LanguageModel.d.ts +458 -0
- package/dist/dts/LanguageModel.d.ts.map +1 -0
- package/dist/dts/McpSchema.d.ts +25 -25
- package/dist/dts/McpServer.d.ts +6 -4
- package/dist/dts/McpServer.d.ts.map +1 -1
- package/dist/dts/Model.d.ts +124 -0
- package/dist/dts/Model.d.ts.map +1 -0
- package/dist/dts/Prompt.d.ts +1119 -0
- package/dist/dts/Prompt.d.ts.map +1 -0
- package/dist/dts/Response.d.ts +1519 -0
- package/dist/dts/Response.d.ts.map +1 -0
- package/dist/dts/Telemetry.d.ts +520 -0
- package/dist/dts/Telemetry.d.ts.map +1 -0
- package/dist/dts/Tokenizer.d.ts +131 -13
- package/dist/dts/Tokenizer.d.ts.map +1 -1
- package/dist/dts/Tool.d.ts +876 -0
- package/dist/dts/Tool.d.ts.map +1 -0
- package/dist/dts/Toolkit.d.ts +310 -0
- package/dist/dts/Toolkit.d.ts.map +1 -0
- package/dist/dts/index.d.ts +498 -13
- package/dist/dts/index.d.ts.map +1 -1
- package/dist/esm/AiError.js +570 -10
- package/dist/esm/AiError.js.map +1 -1
- package/dist/esm/Chat.js +291 -0
- package/dist/esm/Chat.js.map +1 -0
- package/dist/esm/EmbeddingModel.js +173 -0
- package/dist/esm/EmbeddingModel.js.map +1 -0
- package/dist/esm/IdGenerator.js +245 -0
- package/dist/esm/IdGenerator.js.map +1 -0
- package/dist/esm/LanguageModel.js +572 -0
- package/dist/esm/LanguageModel.js.map +1 -0
- package/dist/esm/McpServer.js +12 -4
- package/dist/esm/McpServer.js.map +1 -1
- package/dist/esm/Model.js +108 -0
- package/dist/esm/Model.js.map +1 -0
- package/dist/esm/Prompt.js +633 -0
- package/dist/esm/Prompt.js.map +1 -0
- package/dist/esm/Response.js +619 -0
- package/dist/esm/Response.js.map +1 -0
- package/dist/esm/Telemetry.js +166 -0
- package/dist/esm/Telemetry.js.map +1 -0
- package/dist/esm/Tokenizer.js +87 -8
- package/dist/esm/Tokenizer.js.map +1 -1
- package/dist/esm/Tool.js +534 -0
- package/dist/esm/Tool.js.map +1 -0
- package/dist/esm/Toolkit.js +269 -0
- package/dist/esm/Toolkit.js.map +1 -0
- package/dist/esm/index.js +498 -13
- package/dist/esm/index.js.map +1 -1
- package/package.json +76 -68
- package/src/AiError.ts +739 -9
- package/src/Chat.ts +546 -0
- package/src/EmbeddingModel.ts +311 -0
- package/src/IdGenerator.ts +320 -0
- package/src/LanguageModel.ts +1074 -0
- package/src/McpServer.ts +337 -194
- package/src/Model.ts +155 -0
- package/src/Prompt.ts +1616 -0
- package/src/Response.ts +2131 -0
- package/src/Telemetry.ts +655 -0
- package/src/Tokenizer.ts +145 -24
- package/src/Tool.ts +1267 -0
- package/src/Toolkit.ts +516 -0
- package/src/index.ts +499 -13
- package/AiChat/package.json +0 -6
- package/AiEmbeddingModel/package.json +0 -6
- package/AiInput/package.json +0 -6
- package/AiLanguageModel/package.json +0 -6
- package/AiModel/package.json +0 -6
- package/AiResponse/package.json +0 -6
- package/AiTelemetry/package.json +0 -6
- package/AiTool/package.json +0 -6
- package/AiToolkit/package.json +0 -6
- package/dist/cjs/AiChat.js +0 -122
- package/dist/cjs/AiChat.js.map +0 -1
- package/dist/cjs/AiEmbeddingModel.js +0 -109
- package/dist/cjs/AiEmbeddingModel.js.map +0 -1
- package/dist/cjs/AiInput.js +0 -458
- package/dist/cjs/AiInput.js.map +0 -1
- package/dist/cjs/AiLanguageModel.js +0 -351
- package/dist/cjs/AiLanguageModel.js.map +0 -1
- package/dist/cjs/AiModel.js +0 -37
- package/dist/cjs/AiModel.js.map +0 -1
- package/dist/cjs/AiResponse.js +0 -681
- package/dist/cjs/AiResponse.js.map +0 -1
- package/dist/cjs/AiTelemetry.js +0 -58
- package/dist/cjs/AiTelemetry.js.map +0 -1
- package/dist/cjs/AiTool.js +0 -150
- package/dist/cjs/AiTool.js.map +0 -1
- package/dist/cjs/AiToolkit.js +0 -157
- package/dist/cjs/AiToolkit.js.map +0 -1
- package/dist/cjs/internal/common.js +0 -21
- package/dist/cjs/internal/common.js.map +0 -1
- package/dist/dts/AiChat.d.ts +0 -101
- package/dist/dts/AiChat.d.ts.map +0 -1
- package/dist/dts/AiEmbeddingModel.d.ts +0 -65
- package/dist/dts/AiEmbeddingModel.d.ts.map +0 -1
- package/dist/dts/AiInput.d.ts +0 -590
- package/dist/dts/AiInput.d.ts.map +0 -1
- package/dist/dts/AiLanguageModel.d.ts +0 -302
- package/dist/dts/AiLanguageModel.d.ts.map +0 -1
- package/dist/dts/AiModel.d.ts +0 -25
- package/dist/dts/AiModel.d.ts.map +0 -1
- package/dist/dts/AiResponse.d.ts +0 -863
- package/dist/dts/AiResponse.d.ts.map +0 -1
- package/dist/dts/AiTelemetry.d.ts +0 -242
- package/dist/dts/AiTelemetry.d.ts.map +0 -1
- package/dist/dts/AiTool.d.ts +0 -334
- package/dist/dts/AiTool.d.ts.map +0 -1
- package/dist/dts/AiToolkit.d.ts +0 -96
- package/dist/dts/AiToolkit.d.ts.map +0 -1
- package/dist/dts/internal/common.d.ts +0 -2
- package/dist/dts/internal/common.d.ts.map +0 -1
- package/dist/esm/AiChat.js +0 -111
- package/dist/esm/AiChat.js.map +0 -1
- package/dist/esm/AiEmbeddingModel.js +0 -98
- package/dist/esm/AiEmbeddingModel.js.map +0 -1
- package/dist/esm/AiInput.js +0 -433
- package/dist/esm/AiInput.js.map +0 -1
- package/dist/esm/AiLanguageModel.js +0 -340
- package/dist/esm/AiLanguageModel.js.map +0 -1
- package/dist/esm/AiModel.js +0 -29
- package/dist/esm/AiModel.js.map +0 -1
- package/dist/esm/AiResponse.js +0 -657
- package/dist/esm/AiResponse.js.map +0 -1
- package/dist/esm/AiTelemetry.js +0 -48
- package/dist/esm/AiTelemetry.js.map +0 -1
- package/dist/esm/AiTool.js +0 -134
- package/dist/esm/AiTool.js.map +0 -1
- package/dist/esm/AiToolkit.js +0 -147
- package/dist/esm/AiToolkit.js.map +0 -1
- package/dist/esm/internal/common.js +0 -14
- package/dist/esm/internal/common.js.map +0 -1
- package/src/AiChat.ts +0 -251
- package/src/AiEmbeddingModel.ts +0 -169
- package/src/AiInput.ts +0 -602
- package/src/AiLanguageModel.ts +0 -685
- package/src/AiModel.ts +0 -53
- package/src/AiResponse.ts +0 -986
- package/src/AiTelemetry.ts +0 -333
- package/src/AiTool.ts +0 -579
- package/src/AiToolkit.ts +0 -265
- package/src/internal/common.ts +0 -12
package/src/Chat.ts
ADDED
|
@@ -0,0 +1,546 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* The `Chat` module provides a stateful conversation interface for AI language
|
|
3
|
+
* models.
|
|
4
|
+
*
|
|
5
|
+
* This module enables persistent chat sessions that maintain conversation
|
|
6
|
+
* history, support tool calling, and offer both streaming and non-streaming
|
|
7
|
+
* text generation. It integrates seamlessly with the Effect AI ecosystem,
|
|
8
|
+
* providing type-safe conversational AI capabilities.
|
|
9
|
+
*
|
|
10
|
+
* @example
|
|
11
|
+
* ```ts
|
|
12
|
+
* import { Chat, LanguageModel } from "@effect/ai"
|
|
13
|
+
* import { Effect, Layer } from "effect"
|
|
14
|
+
*
|
|
15
|
+
* // Create a new chat session
|
|
16
|
+
* const program = Effect.gen(function* () {
|
|
17
|
+
* const chat = yield* Chat.empty
|
|
18
|
+
*
|
|
19
|
+
* // Send a message and get response
|
|
20
|
+
* const response = yield* chat.generateText({
|
|
21
|
+
* prompt: "Hello! What can you help me with?"
|
|
22
|
+
* })
|
|
23
|
+
*
|
|
24
|
+
* console.log(response.content)
|
|
25
|
+
*
|
|
26
|
+
* return response
|
|
27
|
+
* })
|
|
28
|
+
* ```
|
|
29
|
+
*
|
|
30
|
+
* @example
|
|
31
|
+
* ```ts
|
|
32
|
+
* import { Chat, LanguageModel } from "@effect/ai"
|
|
33
|
+
* import { Effect, Stream } from "effect"
|
|
34
|
+
*
|
|
35
|
+
* // Streaming chat with tool support
|
|
36
|
+
* const streamingChat = Effect.gen(function* () {
|
|
37
|
+
* const chat = yield* Chat.empty
|
|
38
|
+
*
|
|
39
|
+
* yield* chat.streamText({
|
|
40
|
+
* prompt: "Generate a creative story"
|
|
41
|
+
* }).pipe(Stream.runForEach((part) =>
|
|
42
|
+
* Effect.sync(() => console.log(part))
|
|
43
|
+
* ))
|
|
44
|
+
* })
|
|
45
|
+
* ```
|
|
46
|
+
*
|
|
47
|
+
* @since 1.0.0
|
|
48
|
+
*/
|
|
49
|
+
import * as Channel from "effect/Channel"
|
|
50
|
+
import * as Context from "effect/Context"
|
|
51
|
+
import * as Effect from "effect/Effect"
|
|
52
|
+
import type { ParseError } from "effect/ParseResult"
|
|
53
|
+
import * as Ref from "effect/Ref"
|
|
54
|
+
import * as Schema from "effect/Schema"
|
|
55
|
+
import * as Stream from "effect/Stream"
|
|
56
|
+
import type { NoExcessProperties } from "effect/Types"
|
|
57
|
+
import * as LanguageModel from "./LanguageModel.js"
|
|
58
|
+
import * as Prompt from "./Prompt.js"
|
|
59
|
+
import type * as Response from "./Response.js"
|
|
60
|
+
import type * as Tool from "./Tool.js"
|
|
61
|
+
|
|
62
|
+
/**
|
|
63
|
+
* The `Chat` service tag for dependency injection.
|
|
64
|
+
*
|
|
65
|
+
* This tag provides access to chat functionality throughout your application,
|
|
66
|
+
* enabling persistent conversational AI interactions with full context
|
|
67
|
+
* management.
|
|
68
|
+
*
|
|
69
|
+
* @example
|
|
70
|
+
* ```ts
|
|
71
|
+
* import { Chat } from "@effect/ai"
|
|
72
|
+
* import { Effect } from "effect"
|
|
73
|
+
*
|
|
74
|
+
* const useChat = Effect.gen(function* () {
|
|
75
|
+
* const chat = yield* Chat
|
|
76
|
+
* const response = yield* chat.generateText({
|
|
77
|
+
* prompt: "Explain quantum computing in simple terms"
|
|
78
|
+
* })
|
|
79
|
+
* return response.content
|
|
80
|
+
* })
|
|
81
|
+
* ```
|
|
82
|
+
*
|
|
83
|
+
* @since 1.0.0
|
|
84
|
+
* @category Context
|
|
85
|
+
*/
|
|
86
|
+
export class Chat extends Context.Tag("@effect/ai/Chat")<
|
|
87
|
+
Chat,
|
|
88
|
+
Service
|
|
89
|
+
>() {}
|
|
90
|
+
|
|
91
|
+
/**
|
|
92
|
+
* Represents the interface that the `Chat` service provides.
|
|
93
|
+
*
|
|
94
|
+
* @since 1.0.0
|
|
95
|
+
* @category Models
|
|
96
|
+
*/
|
|
97
|
+
export interface Service {
|
|
98
|
+
/**
|
|
99
|
+
* Reference to the chat history.
|
|
100
|
+
*
|
|
101
|
+
* Provides direct access to the conversation history for advanced use cases
|
|
102
|
+
* like custom history manipulation or inspection.
|
|
103
|
+
*
|
|
104
|
+
* @example
|
|
105
|
+
* ```ts
|
|
106
|
+
* import { Chat } from "@effect/ai"
|
|
107
|
+
* import { Effect, Ref } from "effect"
|
|
108
|
+
*
|
|
109
|
+
* const inspectHistory = Effect.gen(function* () {
|
|
110
|
+
* const chat = yield* Chat.empty
|
|
111
|
+
* const currentHistory = yield* Ref.get(chat.history)
|
|
112
|
+
* console.log("Current conversation:", currentHistory)
|
|
113
|
+
* return currentHistory
|
|
114
|
+
* })
|
|
115
|
+
* ```
|
|
116
|
+
*/
|
|
117
|
+
readonly history: Ref.Ref<Prompt.Prompt>
|
|
118
|
+
|
|
119
|
+
/**
|
|
120
|
+
* Exports the chat history into a structured format.
|
|
121
|
+
*
|
|
122
|
+
* Returns the complete conversation history as a structured object
|
|
123
|
+
* that can be stored, transmitted, or processed by other systems.
|
|
124
|
+
*
|
|
125
|
+
* @example
|
|
126
|
+
* ```ts
|
|
127
|
+
* import { Chat } from "@effect/ai"
|
|
128
|
+
* import { Effect } from "effect"
|
|
129
|
+
*
|
|
130
|
+
* const saveChat = Effect.gen(function* () {
|
|
131
|
+
* const chat = yield* Chat.empty
|
|
132
|
+
* yield* chat.generateText({ prompt: "Hello!" })
|
|
133
|
+
*
|
|
134
|
+
* const exportedData = yield* chat.export
|
|
135
|
+
*
|
|
136
|
+
* // Save to database or file system
|
|
137
|
+
* return exportedData
|
|
138
|
+
* })
|
|
139
|
+
* ```
|
|
140
|
+
*/
|
|
141
|
+
readonly export: Effect.Effect<unknown>
|
|
142
|
+
|
|
143
|
+
/**
|
|
144
|
+
* Exports the chat history as a JSON string.
|
|
145
|
+
*
|
|
146
|
+
* Provides a convenient way to serialize the entire conversation
|
|
147
|
+
* for storage or transmission in JSON format.
|
|
148
|
+
*
|
|
149
|
+
* @example
|
|
150
|
+
* ```ts
|
|
151
|
+
* import { Chat } from "@effect/ai"
|
|
152
|
+
* import { Effect } from "effect"
|
|
153
|
+
*
|
|
154
|
+
* const backupChat = Effect.gen(function* () {
|
|
155
|
+
* const chat = yield* Chat.empty
|
|
156
|
+
* yield* chat.generateText({ prompt: "Explain photosynthesis" })
|
|
157
|
+
*
|
|
158
|
+
* const jsonBackup = yield* chat.exportJson
|
|
159
|
+
*
|
|
160
|
+
* yield* Effect.sync(() =>
|
|
161
|
+
* localStorage.setItem("chat-backup", jsonBackup)
|
|
162
|
+
* )
|
|
163
|
+
*
|
|
164
|
+
* return jsonBackup
|
|
165
|
+
* })
|
|
166
|
+
* ```
|
|
167
|
+
*/
|
|
168
|
+
readonly exportJson: Effect.Effect<string>
|
|
169
|
+
|
|
170
|
+
/**
|
|
171
|
+
* Generate text using a language model for the specified prompt.
|
|
172
|
+
*
|
|
173
|
+
* If a toolkit is specified, the language model will have access to tools
|
|
174
|
+
* for function calling and enhanced capabilities. Both input and output
|
|
175
|
+
* messages are automatically added to the chat history.
|
|
176
|
+
*
|
|
177
|
+
* @example
|
|
178
|
+
* ```ts
|
|
179
|
+
* import { Chat } from "@effect/ai"
|
|
180
|
+
* import { Effect } from "effect"
|
|
181
|
+
*
|
|
182
|
+
* const chatWithAI = Effect.gen(function* () {
|
|
183
|
+
* const chat = yield* Chat.empty
|
|
184
|
+
*
|
|
185
|
+
* const response1 = yield* chat.generateText({
|
|
186
|
+
* prompt: "What is the capital of France?"
|
|
187
|
+
* })
|
|
188
|
+
*
|
|
189
|
+
* const response2 = yield* chat.generateText({
|
|
190
|
+
* prompt: "What's the population of that city?",
|
|
191
|
+
* })
|
|
192
|
+
*
|
|
193
|
+
* return [response1.content, response2.content]
|
|
194
|
+
* })
|
|
195
|
+
* ```
|
|
196
|
+
*/
|
|
197
|
+
readonly generateText: <
|
|
198
|
+
Options extends NoExcessProperties<LanguageModel.GenerateTextOptions<any>, Options>,
|
|
199
|
+
Tools extends Record<string, Tool.Any> = {}
|
|
200
|
+
>(options: Options & LanguageModel.GenerateTextOptions<Tools>) => Effect.Effect<
|
|
201
|
+
LanguageModel.GenerateTextResponse<Tools>,
|
|
202
|
+
LanguageModel.ExtractError<Options>,
|
|
203
|
+
LanguageModel.ExtractContext<Options>
|
|
204
|
+
>
|
|
205
|
+
|
|
206
|
+
/**
|
|
207
|
+
* Generate text using a language model with streaming output.
|
|
208
|
+
*
|
|
209
|
+
* Returns a stream of response parts that are emitted as soon as they're
|
|
210
|
+
* available from the model. Supports tool calling and maintains chat history.
|
|
211
|
+
*
|
|
212
|
+
* @example
|
|
213
|
+
* ```ts
|
|
214
|
+
* import { Chat } from "@effect/ai"
|
|
215
|
+
* import { Effect, Stream, Console } from "effect"
|
|
216
|
+
*
|
|
217
|
+
* const streamingChat = Effect.gen(function* () {
|
|
218
|
+
* const chat = yield* Chat.empty
|
|
219
|
+
*
|
|
220
|
+
* const stream = yield* chat.streamText({
|
|
221
|
+
* prompt: "Write a short story about space exploration"
|
|
222
|
+
* })
|
|
223
|
+
*
|
|
224
|
+
* yield* Stream.runForEach(stream, (part) =>
|
|
225
|
+
* part.type === "text-delta"
|
|
226
|
+
* ? Effect.sync(() => process.stdout.write(part.delta))
|
|
227
|
+
* : Effect.void
|
|
228
|
+
* )
|
|
229
|
+
* })
|
|
230
|
+
* ```
|
|
231
|
+
*/
|
|
232
|
+
readonly streamText: <
|
|
233
|
+
Options extends NoExcessProperties<LanguageModel.GenerateTextOptions<any>, Options>,
|
|
234
|
+
Tools extends Record<string, Tool.Any> = {}
|
|
235
|
+
>(options: Options & LanguageModel.GenerateTextOptions<Tools>) => Stream.Stream<
|
|
236
|
+
Response.StreamPart<Tools>,
|
|
237
|
+
LanguageModel.ExtractError<Options>,
|
|
238
|
+
LanguageModel.ExtractContext<Options>
|
|
239
|
+
>
|
|
240
|
+
|
|
241
|
+
/**
|
|
242
|
+
* Generate a structured object using a language model and schema.
|
|
243
|
+
*
|
|
244
|
+
* Forces the model to return data that conforms to the specified schema,
|
|
245
|
+
* enabling structured data extraction and type-safe responses. The
|
|
246
|
+
* conversation history is maintained across calls.
|
|
247
|
+
*
|
|
248
|
+
* @example
|
|
249
|
+
* ```ts
|
|
250
|
+
* import { Chat } from "@effect/ai"
|
|
251
|
+
* import { Effect, Schema } from "effect"
|
|
252
|
+
*
|
|
253
|
+
* const ContactSchema = Schema.Struct({
|
|
254
|
+
* name: Schema.String,
|
|
255
|
+
* email: Schema.String,
|
|
256
|
+
* phone: Schema.optional(Schema.String)
|
|
257
|
+
* })
|
|
258
|
+
*
|
|
259
|
+
* const extractContact = Effect.gen(function* () {
|
|
260
|
+
* const chat = yield* Chat.empty
|
|
261
|
+
*
|
|
262
|
+
* const contact = yield* chat.generateObject({
|
|
263
|
+
* prompt: "Extract contact info: John Doe, john@example.com, 555-1234",
|
|
264
|
+
* schema: ContactSchema
|
|
265
|
+
* })
|
|
266
|
+
*
|
|
267
|
+
* console.log(contact.object)
|
|
268
|
+
* // { name: "John Doe", email: "john@example.com", phone: "555-1234" }
|
|
269
|
+
*
|
|
270
|
+
* return contact.object
|
|
271
|
+
* })
|
|
272
|
+
* ```
|
|
273
|
+
*/
|
|
274
|
+
readonly generateObject: <
|
|
275
|
+
A,
|
|
276
|
+
I extends Record<string, unknown>,
|
|
277
|
+
R,
|
|
278
|
+
Options extends NoExcessProperties<LanguageModel.GenerateObjectOptions<any, A, I, R>, Options>,
|
|
279
|
+
Tools extends Record<string, Tool.Any> = {}
|
|
280
|
+
>(options: Options & LanguageModel.GenerateObjectOptions<Tools, A, I, R>) => Effect.Effect<
|
|
281
|
+
LanguageModel.GenerateObjectResponse<Tools, A>,
|
|
282
|
+
LanguageModel.ExtractError<Options>,
|
|
283
|
+
LanguageModel.LanguageModel | R | LanguageModel.ExtractContext<Options>
|
|
284
|
+
>
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
/**
|
|
288
|
+
* Creates a new Chat service from an initial prompt.
|
|
289
|
+
*
|
|
290
|
+
* This is the primary constructor for creating chat instances. It initializes
|
|
291
|
+
* a new conversation with the provided prompt as the starting context.
|
|
292
|
+
*
|
|
293
|
+
* @example
|
|
294
|
+
* ```ts
|
|
295
|
+
* import { Chat, Prompt } from "@effect/ai"
|
|
296
|
+
* import { Effect } from "effect"
|
|
297
|
+
*
|
|
298
|
+
* const chatWithSystemPrompt = Effect.gen(function* () {
|
|
299
|
+
* const chat = yield* Chat.fromPrompt([{
|
|
300
|
+
* role: "system",
|
|
301
|
+
* content: "You are a helpful assistant specialized in mathematics."
|
|
302
|
+
* }])
|
|
303
|
+
*
|
|
304
|
+
* const response = yield* chat.generateText({
|
|
305
|
+
* prompt: "What is 2+2?"
|
|
306
|
+
* })
|
|
307
|
+
*
|
|
308
|
+
* return response.content
|
|
309
|
+
* })
|
|
310
|
+
* ```
|
|
311
|
+
*
|
|
312
|
+
* @example
|
|
313
|
+
* ```ts
|
|
314
|
+
* import { Chat, Prompt } from "@effect/ai"
|
|
315
|
+
* import { Effect } from "effect"
|
|
316
|
+
*
|
|
317
|
+
* // Initialize with conversation history
|
|
318
|
+
* const existingChat = Effect.gen(function* () {
|
|
319
|
+
* const chat = yield* Chat.fromPrompt([
|
|
320
|
+
* { role: "user", content: [{ type: "text", text: "What's the weather like?" }] },
|
|
321
|
+
* { role: "assistant", content: [{ type: "text", text: "I don't have access to weather data." }] },
|
|
322
|
+
* { role: "user", content: [{ type: "text", text: "Can you help me with coding?" }] }
|
|
323
|
+
* ])
|
|
324
|
+
*
|
|
325
|
+
* const response = yield* chat.generateText({
|
|
326
|
+
* prompt: "I need help with TypeScript"
|
|
327
|
+
* })
|
|
328
|
+
*
|
|
329
|
+
* return response
|
|
330
|
+
* })
|
|
331
|
+
* ```
|
|
332
|
+
*
|
|
333
|
+
* @since 1.0.0
|
|
334
|
+
* @category Constructors
|
|
335
|
+
*/
|
|
336
|
+
export const fromPrompt = Effect.fnUntraced(function*(
|
|
337
|
+
prompt: Prompt.RawInput
|
|
338
|
+
) {
|
|
339
|
+
const languageModel = yield* LanguageModel.LanguageModel
|
|
340
|
+
const context = yield* Effect.context<never>()
|
|
341
|
+
const provideContext = <A, E, R>(effect: Effect.Effect<A, E, R>): Effect.Effect<A, E, R> =>
|
|
342
|
+
Effect.mapInputContext(effect, (input) => Context.merge(context, input))
|
|
343
|
+
const provideContextStream = <A, E, R>(stream: Stream.Stream<A, E, R>): Stream.Stream<A, E, R> =>
|
|
344
|
+
Stream.mapInputContext(stream, (input) => Context.merge(context, input))
|
|
345
|
+
const history = yield* Ref.make<Prompt.Prompt>(Prompt.make(prompt))
|
|
346
|
+
const semaphore = yield* Effect.makeSemaphore(1)
|
|
347
|
+
|
|
348
|
+
return Chat.of({
|
|
349
|
+
history,
|
|
350
|
+
export: Ref.get(history).pipe(
|
|
351
|
+
Effect.flatMap(Schema.encode(Prompt.Prompt)),
|
|
352
|
+
Effect.withSpan("Chat.export"),
|
|
353
|
+
Effect.orDie
|
|
354
|
+
),
|
|
355
|
+
exportJson: Ref.get(history).pipe(
|
|
356
|
+
Effect.flatMap(Schema.encode(Prompt.FromJson)),
|
|
357
|
+
Effect.withSpan("Chat.exportJson"),
|
|
358
|
+
Effect.orDie
|
|
359
|
+
),
|
|
360
|
+
generateText: Effect.fnUntraced(
|
|
361
|
+
function*(options) {
|
|
362
|
+
const newPrompt = Prompt.make(options.prompt)
|
|
363
|
+
const oldPrompt = yield* Ref.get(history)
|
|
364
|
+
const prompt = Prompt.merge(oldPrompt, newPrompt)
|
|
365
|
+
|
|
366
|
+
const response = yield* languageModel.generateText({ ...options, prompt })
|
|
367
|
+
|
|
368
|
+
const newHistory = Prompt.merge(prompt, Prompt.fromResponseParts(response.content))
|
|
369
|
+
yield* Ref.set(history, newHistory)
|
|
370
|
+
|
|
371
|
+
return response
|
|
372
|
+
},
|
|
373
|
+
provideContext,
|
|
374
|
+
semaphore.withPermits(1),
|
|
375
|
+
Effect.withSpan("Chat.generateText", { captureStackTrace: false })
|
|
376
|
+
),
|
|
377
|
+
streamText: Effect.fnUntraced(
|
|
378
|
+
function*(options) {
|
|
379
|
+
let combined: Prompt.Prompt = Prompt.empty
|
|
380
|
+
return Stream.fromChannel(Channel.acquireUseRelease(
|
|
381
|
+
semaphore.take(1).pipe(
|
|
382
|
+
Effect.zipRight(Ref.get(history)),
|
|
383
|
+
Effect.map((history) => Prompt.merge(history, Prompt.make(options.prompt)))
|
|
384
|
+
),
|
|
385
|
+
(prompt) =>
|
|
386
|
+
languageModel.streamText({ ...options, prompt }).pipe(
|
|
387
|
+
Stream.mapChunksEffect(Effect.fnUntraced(function*(chunk) {
|
|
388
|
+
const parts = Array.from(chunk)
|
|
389
|
+
combined = Prompt.merge(combined, Prompt.fromResponseParts(parts))
|
|
390
|
+
return chunk
|
|
391
|
+
})),
|
|
392
|
+
Stream.toChannel
|
|
393
|
+
),
|
|
394
|
+
(parts) =>
|
|
395
|
+
Effect.zipRight(
|
|
396
|
+
Ref.set(history, Prompt.merge(parts, combined)),
|
|
397
|
+
semaphore.release(1)
|
|
398
|
+
)
|
|
399
|
+
)).pipe(
|
|
400
|
+
provideContextStream,
|
|
401
|
+
Stream.withSpan("Chat.streamText", {
|
|
402
|
+
captureStackTrace: false
|
|
403
|
+
})
|
|
404
|
+
)
|
|
405
|
+
},
|
|
406
|
+
Stream.unwrap
|
|
407
|
+
),
|
|
408
|
+
generateObject: Effect.fnUntraced(
|
|
409
|
+
function*(options) {
|
|
410
|
+
const newPrompt = Prompt.make(options.prompt)
|
|
411
|
+
const oldPrompt = yield* Ref.get(history)
|
|
412
|
+
const prompt = Prompt.merge(oldPrompt, newPrompt)
|
|
413
|
+
|
|
414
|
+
const response = yield* languageModel.generateObject({ ...options, prompt })
|
|
415
|
+
|
|
416
|
+
const newHistory = Prompt.merge(prompt, Prompt.fromResponseParts(response.content))
|
|
417
|
+
yield* Ref.set(history, newHistory)
|
|
418
|
+
|
|
419
|
+
return response
|
|
420
|
+
},
|
|
421
|
+
provideContext,
|
|
422
|
+
semaphore.withPermits(1),
|
|
423
|
+
(effect, options) =>
|
|
424
|
+
Effect.withSpan(effect, "Chat.generateObject", {
|
|
425
|
+
attributes: {
|
|
426
|
+
objectName: "objectName" in options
|
|
427
|
+
? options.objectName
|
|
428
|
+
: "_tag" in options.schema
|
|
429
|
+
? options.schema._tag
|
|
430
|
+
: (options.schema as any).identifier ?? "generateObject"
|
|
431
|
+
},
|
|
432
|
+
captureStackTrace: false
|
|
433
|
+
})
|
|
434
|
+
)
|
|
435
|
+
})
|
|
436
|
+
})
|
|
437
|
+
|
|
438
|
+
/**
|
|
439
|
+
* Creates a new Chat service with empty conversation history.
|
|
440
|
+
*
|
|
441
|
+
* This is the most common way to start a fresh chat session without
|
|
442
|
+
* any initial context or system prompts.
|
|
443
|
+
*
|
|
444
|
+
* @example
|
|
445
|
+
* ```ts
|
|
446
|
+
* import { Chat } from "@effect/ai"
|
|
447
|
+
* import { Effect } from "effect"
|
|
448
|
+
*
|
|
449
|
+
* const freshChat = Effect.gen(function* () {
|
|
450
|
+
* const chat = yield* Chat.empty
|
|
451
|
+
*
|
|
452
|
+
* const response = yield* chat.generateText({
|
|
453
|
+
* prompt: "Hello! Can you introduce yourself?"
|
|
454
|
+
* })
|
|
455
|
+
*
|
|
456
|
+
* console.log(response.content)
|
|
457
|
+
*
|
|
458
|
+
* return chat
|
|
459
|
+
* })
|
|
460
|
+
* ```
|
|
461
|
+
*
|
|
462
|
+
* @since 1.0.0
|
|
463
|
+
* @category Constructors
|
|
464
|
+
*/
|
|
465
|
+
export const empty: Effect.Effect<Service, never, LanguageModel.LanguageModel> = fromPrompt(Prompt.empty)
|
|
466
|
+
|
|
467
|
+
const decodeUnknown = Schema.decodeUnknown(Prompt.Prompt)
|
|
468
|
+
|
|
469
|
+
/**
|
|
470
|
+
* Creates a Chat service from previously exported chat data.
|
|
471
|
+
*
|
|
472
|
+
* Restores a chat session from structured data that was previously exported
|
|
473
|
+
* using the `export` method. Useful for persisting and restoring conversation
|
|
474
|
+
* state.
|
|
475
|
+
*
|
|
476
|
+
* @example
|
|
477
|
+
* ```ts
|
|
478
|
+
* import { Chat } from "@effect/ai"
|
|
479
|
+
* import { Effect } from "effect"
|
|
480
|
+
*
|
|
481
|
+
* declare const loadFromDatabase: (sessionId: string) => Effect.Effect<unknown>
|
|
482
|
+
*
|
|
483
|
+
* const restoreChat = Effect.gen(function* () {
|
|
484
|
+
* // Assume we have previously exported data
|
|
485
|
+
* const savedData = yield* loadFromDatabase("chat-session-123")
|
|
486
|
+
*
|
|
487
|
+
* const restoredChat = yield* Chat.fromExport(savedData)
|
|
488
|
+
*
|
|
489
|
+
* // Continue the conversation from where it left off
|
|
490
|
+
* const response = yield* restoredChat.generateText({
|
|
491
|
+
* prompt: "Let's continue our discussion"
|
|
492
|
+
* })
|
|
493
|
+
* }).pipe(
|
|
494
|
+
* Effect.catchTag("ParseError", (error) => {
|
|
495
|
+
* console.log("Failed to restore chat:", error.message)
|
|
496
|
+
* return Effect.void
|
|
497
|
+
* })
|
|
498
|
+
* )
|
|
499
|
+
* ```
|
|
500
|
+
*
|
|
501
|
+
* @since 1.0.0
|
|
502
|
+
* @category Constructors
|
|
503
|
+
*/
|
|
504
|
+
export const fromExport = (data: unknown): Effect.Effect<Service, ParseError, LanguageModel.LanguageModel> =>
|
|
505
|
+
Effect.flatMap(decodeUnknown(data), fromPrompt)
|
|
506
|
+
|
|
507
|
+
const decodeJson = Schema.decode(Prompt.FromJson)
|
|
508
|
+
|
|
509
|
+
/**
|
|
510
|
+
* Creates a Chat service from previously exported JSON chat data.
|
|
511
|
+
*
|
|
512
|
+
* Restores a chat session from JSON string that was previously exported
|
|
513
|
+
* using the `exportJson` method. This is the most convenient way to
|
|
514
|
+
* persist and restore chat sessions to/from storage systems.
|
|
515
|
+
*
|
|
516
|
+
* @example
|
|
517
|
+
* ```ts
|
|
518
|
+
* import { Chat } from "@effect/ai"
|
|
519
|
+
* import { Effect } from "effect"
|
|
520
|
+
*
|
|
521
|
+
* const restoreFromJson = Effect.gen(function* () {
|
|
522
|
+
* // Load JSON from localStorage or file system
|
|
523
|
+
* const jsonData = localStorage.getItem("my-chat-backup")
|
|
524
|
+
* if (!jsonData) return yield* Chat.empty
|
|
525
|
+
*
|
|
526
|
+
* const restoredChat = yield* Chat.fromJson(jsonData)
|
|
527
|
+
*
|
|
528
|
+
* // Chat history is now restored
|
|
529
|
+
* const response = yield* restoredChat.generateText({
|
|
530
|
+
* prompt: "What were we talking about?"
|
|
531
|
+
* })
|
|
532
|
+
*
|
|
533
|
+
* return response
|
|
534
|
+
* }).pipe(
|
|
535
|
+
* Effect.catchTag("ParseError", (error) => {
|
|
536
|
+
* console.log("Invalid JSON format:", error.message)
|
|
537
|
+
* return Chat.empty // Fallback to empty chat
|
|
538
|
+
* })
|
|
539
|
+
* )
|
|
540
|
+
* ```
|
|
541
|
+
*
|
|
542
|
+
* @since 1.0.0
|
|
543
|
+
* @category Constructors
|
|
544
|
+
*/
|
|
545
|
+
export const fromJson = (data: string): Effect.Effect<Service, ParseError, LanguageModel.LanguageModel> =>
|
|
546
|
+
Effect.flatMap(decodeJson(data), fromPrompt)
|