@effect/ai 0.28.0 → 0.28.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/Chat.ts CHANGED
@@ -46,14 +46,19 @@
46
46
  *
47
47
  * @since 1.0.0
48
48
  */
49
+ import type { PersistenceBackingError } from "@effect/experimental/Persistence"
50
+ import { BackingPersistence } from "@effect/experimental/Persistence"
49
51
  import * as Channel from "effect/Channel"
50
52
  import * as Context from "effect/Context"
51
53
  import * as Effect from "effect/Effect"
54
+ import * as Layer from "effect/Layer"
55
+ import * as Option from "effect/Option"
52
56
  import type { ParseError } from "effect/ParseResult"
53
57
  import * as Ref from "effect/Ref"
54
58
  import * as Schema from "effect/Schema"
55
59
  import * as Stream from "effect/Stream"
56
60
  import type { NoExcessProperties } from "effect/Types"
61
+ import * as AiError from "./AiError.js"
57
62
  import * as LanguageModel from "./LanguageModel.js"
58
63
  import * as Prompt from "./Prompt.js"
59
64
  import type * as Response from "./Response.js"
@@ -138,7 +143,7 @@ export interface Service {
138
143
  * })
139
144
  * ```
140
145
  */
141
- readonly export: Effect.Effect<unknown>
146
+ readonly export: Effect.Effect<unknown, AiError.AiError>
142
147
 
143
148
  /**
144
149
  * Exports the chat history as a JSON string.
@@ -165,7 +170,7 @@ export interface Service {
165
170
  * })
166
171
  * ```
167
172
  */
168
- readonly exportJson: Effect.Effect<string>
173
+ readonly exportJson: Effect.Effect<string, AiError.MalformedOutput>
169
174
 
170
175
  /**
171
176
  * Generate text using a language model for the specified prompt.
@@ -200,7 +205,7 @@ export interface Service {
200
205
  >(options: Options & LanguageModel.GenerateTextOptions<Tools>) => Effect.Effect<
201
206
  LanguageModel.GenerateTextResponse<Tools>,
202
207
  LanguageModel.ExtractError<Options>,
203
- LanguageModel.ExtractContext<Options>
208
+ LanguageModel.LanguageModel | LanguageModel.ExtractContext<Options>
204
209
  >
205
210
 
206
211
  /**
@@ -235,7 +240,7 @@ export interface Service {
235
240
  >(options: Options & LanguageModel.GenerateTextOptions<Tools>) => Stream.Stream<
236
241
  Response.StreamPart<Tools>,
237
242
  LanguageModel.ExtractError<Options>,
238
- LanguageModel.ExtractContext<Options>
243
+ LanguageModel.LanguageModel | LanguageModel.ExtractContext<Options>
239
244
  >
240
245
 
241
246
  /**
@@ -284,78 +289,73 @@ export interface Service {
284
289
  >
285
290
  }
286
291
 
292
+ // =============================================================================
293
+ // Constructors
294
+ // =============================================================================
295
+
287
296
  /**
288
- * Creates a new Chat service from an initial prompt.
297
+ * Creates a new Chat service with empty conversation history.
289
298
  *
290
- * This is the primary constructor for creating chat instances. It initializes
291
- * a new conversation with the provided prompt as the starting context.
299
+ * This is the most common way to start a fresh chat session without
300
+ * any initial context or system prompts.
292
301
  *
293
302
  * @example
294
303
  * ```ts
295
- * import { Chat, Prompt } from "@effect/ai"
304
+ * import { Chat } from "@effect/ai"
296
305
  * import { Effect } from "effect"
297
306
  *
298
- * const chatWithSystemPrompt = Effect.gen(function* () {
299
- * const chat = yield* Chat.fromPrompt([{
300
- * role: "system",
301
- * content: "You are a helpful assistant specialized in mathematics."
302
- * }])
307
+ * const freshChat = Effect.gen(function* () {
308
+ * const chat = yield* Chat.empty
303
309
  *
304
310
  * const response = yield* chat.generateText({
305
- * prompt: "What is 2+2?"
311
+ * prompt: "Hello! Can you introduce yourself?"
306
312
  * })
307
313
  *
308
- * return response.content
309
- * })
310
- * ```
311
- *
312
- * @example
313
- * ```ts
314
- * import { Chat, Prompt } from "@effect/ai"
315
- * import { Effect } from "effect"
316
- *
317
- * // Initialize with conversation history
318
- * const existingChat = Effect.gen(function* () {
319
- * const chat = yield* Chat.fromPrompt([
320
- * { role: "user", content: [{ type: "text", text: "What's the weather like?" }] },
321
- * { role: "assistant", content: [{ type: "text", text: "I don't have access to weather data." }] },
322
- * { role: "user", content: [{ type: "text", text: "Can you help me with coding?" }] }
323
- * ])
324
- *
325
- * const response = yield* chat.generateText({
326
- * prompt: "I need help with TypeScript"
327
- * })
314
+ * console.log(response.content)
328
315
  *
329
- * return response
316
+ * return chat
330
317
  * })
331
318
  * ```
332
319
  *
333
320
  * @since 1.0.0
334
321
  * @category Constructors
335
322
  */
336
- export const fromPrompt = Effect.fnUntraced(function*(
337
- prompt: Prompt.RawInput
338
- ) {
339
- const languageModel = yield* LanguageModel.LanguageModel
323
+ export const empty: Effect.Effect<Service> = Effect.gen(function*() {
324
+ const history = yield* Ref.make(Prompt.empty)
340
325
  const context = yield* Effect.context<never>()
326
+ const semaphore = yield* Effect.makeSemaphore(1)
327
+
341
328
  const provideContext = <A, E, R>(effect: Effect.Effect<A, E, R>): Effect.Effect<A, E, R> =>
342
329
  Effect.mapInputContext(effect, (input) => Context.merge(context, input))
343
330
  const provideContextStream = <A, E, R>(stream: Stream.Stream<A, E, R>): Stream.Stream<A, E, R> =>
344
331
  Stream.mapInputContext(stream, (input) => Context.merge(context, input))
345
- const history = yield* Ref.make<Prompt.Prompt>(Prompt.make(prompt))
346
- const semaphore = yield* Effect.makeSemaphore(1)
332
+
333
+ const encodeHistory = Schema.encode(Prompt.Prompt)
334
+ const encodeHistoryJson = Schema.encode(Prompt.FromJson)
347
335
 
348
336
  return Chat.of({
349
337
  history,
350
338
  export: Ref.get(history).pipe(
351
- Effect.flatMap(Schema.encode(Prompt.Prompt)),
352
- Effect.withSpan("Chat.export"),
353
- Effect.orDie
339
+ Effect.flatMap(encodeHistory),
340
+ Effect.catchTag("ParseError", (error) =>
341
+ AiError.MalformedOutput.fromParseError({
342
+ module: "Chat",
343
+ method: "exportJson",
344
+ description: "Failed to encode chat history",
345
+ error
346
+ })),
347
+ Effect.withSpan("Chat.export")
354
348
  ),
355
349
  exportJson: Ref.get(history).pipe(
356
- Effect.flatMap(Schema.encode(Prompt.FromJson)),
357
- Effect.withSpan("Chat.exportJson"),
358
- Effect.orDie
350
+ Effect.flatMap(encodeHistoryJson),
351
+ Effect.catchTag("ParseError", (error) =>
352
+ AiError.MalformedOutput.fromParseError({
353
+ module: "Chat",
354
+ method: "exportJson",
355
+ description: "Failed to encode chat history into JSON",
356
+ error
357
+ })),
358
+ Effect.withSpan("Chat.exportJson")
359
359
  ),
360
360
  generateText: Effect.fnUntraced(
361
361
  function*(options) {
@@ -363,7 +363,7 @@ export const fromPrompt = Effect.fnUntraced(function*(
363
363
  const oldPrompt = yield* Ref.get(history)
364
364
  const prompt = Prompt.merge(oldPrompt, newPrompt)
365
365
 
366
- const response = yield* languageModel.generateText({ ...options, prompt })
366
+ const response = yield* LanguageModel.generateText({ ...options, prompt })
367
367
 
368
368
  const newHistory = Prompt.merge(prompt, Prompt.fromResponseParts(response.content))
369
369
  yield* Ref.set(history, newHistory)
@@ -383,7 +383,7 @@ export const fromPrompt = Effect.fnUntraced(function*(
383
383
  Effect.map((history) => Prompt.merge(history, Prompt.make(options.prompt)))
384
384
  ),
385
385
  (prompt) =>
386
- languageModel.streamText({ ...options, prompt }).pipe(
386
+ LanguageModel.streamText({ ...options, prompt }).pipe(
387
387
  Stream.mapChunksEffect(Effect.fnUntraced(function*(chunk) {
388
388
  const parts = Array.from(chunk)
389
389
  combined = Prompt.merge(combined, Prompt.fromResponseParts(parts))
@@ -411,7 +411,7 @@ export const fromPrompt = Effect.fnUntraced(function*(
411
411
  const oldPrompt = yield* Ref.get(history)
412
412
  const prompt = Prompt.merge(oldPrompt, newPrompt)
413
413
 
414
- const response = yield* languageModel.generateObject({ ...options, prompt })
414
+ const response = yield* LanguageModel.generateObject({ ...options, prompt })
415
415
 
416
416
  const newHistory = Prompt.merge(prompt, Prompt.fromResponseParts(response.content))
417
417
  yield* Ref.set(history, newHistory)
@@ -436,33 +436,61 @@ export const fromPrompt = Effect.fnUntraced(function*(
436
436
  })
437
437
 
438
438
  /**
439
- * Creates a new Chat service with empty conversation history.
439
+ * Creates a new Chat service from an initial prompt.
440
440
  *
441
- * This is the most common way to start a fresh chat session without
442
- * any initial context or system prompts.
441
+ * This is the primary constructor for creating chat instances. It initializes
442
+ * a new conversation with the provided prompt as the starting context.
443
443
  *
444
444
  * @example
445
445
  * ```ts
446
- * import { Chat } from "@effect/ai"
446
+ * import { Chat, Prompt } from "@effect/ai"
447
447
  * import { Effect } from "effect"
448
448
  *
449
- * const freshChat = Effect.gen(function* () {
450
- * const chat = yield* Chat.empty
449
+ * const chatWithSystemPrompt = Effect.gen(function* () {
450
+ * const chat = yield* Chat.fromPrompt([{
451
+ * role: "system",
452
+ * content: "You are a helpful assistant specialized in mathematics."
453
+ * }])
451
454
  *
452
455
  * const response = yield* chat.generateText({
453
- * prompt: "Hello! Can you introduce yourself?"
456
+ * prompt: "What is 2+2?"
454
457
  * })
455
458
  *
456
- * console.log(response.content)
459
+ * return response.content
460
+ * })
461
+ * ```
457
462
  *
458
- * return chat
463
+ * @example
464
+ * ```ts
465
+ * import { Chat, Prompt } from "@effect/ai"
466
+ * import { Effect } from "effect"
467
+ *
468
+ * // Initialize with conversation history
469
+ * const existingChat = Effect.gen(function* () {
470
+ * const chat = yield* Chat.fromPrompt([
471
+ * { role: "user", content: [{ type: "text", text: "What's the weather like?" }] },
472
+ * { role: "assistant", content: [{ type: "text", text: "I don't have access to weather data." }] },
473
+ * { role: "user", content: [{ type: "text", text: "Can you help me with coding?" }] }
474
+ * ])
475
+ *
476
+ * const response = yield* chat.generateText({
477
+ * prompt: "I need help with TypeScript"
478
+ * })
479
+ *
480
+ * return response
459
481
  * })
460
482
  * ```
461
483
  *
462
484
  * @since 1.0.0
463
485
  * @category Constructors
464
486
  */
465
- export const empty: Effect.Effect<Service, never, LanguageModel.LanguageModel> = fromPrompt(Prompt.empty)
487
+ export const fromPrompt = Effect.fnUntraced(
488
+ function*(prompt: Prompt.RawInput) {
489
+ const chat = yield* empty
490
+ yield* Ref.set(chat.history, Prompt.make(prompt))
491
+ return chat
492
+ }
493
+ )
466
494
 
467
495
  const decodeUnknown = Schema.decodeUnknown(Prompt.Prompt)
468
496
 
@@ -504,7 +532,7 @@ const decodeUnknown = Schema.decodeUnknown(Prompt.Prompt)
504
532
  export const fromExport = (data: unknown): Effect.Effect<Service, ParseError, LanguageModel.LanguageModel> =>
505
533
  Effect.flatMap(decodeUnknown(data), fromPrompt)
506
534
 
507
- const decodeJson = Schema.decode(Prompt.FromJson)
535
+ const decodeHistoryJson = Schema.decodeUnknown(Prompt.FromJson)
508
536
 
509
537
  /**
510
538
  * Creates a Chat service from previously exported JSON chat data.
@@ -543,4 +571,200 @@ const decodeJson = Schema.decode(Prompt.FromJson)
543
571
  * @category Constructors
544
572
  */
545
573
  export const fromJson = (data: string): Effect.Effect<Service, ParseError, LanguageModel.LanguageModel> =>
546
- Effect.flatMap(decodeJson(data), fromPrompt)
574
+ Effect.flatMap(decodeHistoryJson(data), fromPrompt)
575
+
576
+ // =============================================================================
577
+ // Chat Persistence
578
+ // =============================================================================
579
+
580
+ /**
581
+ * An error that occurs when attempting to retrieve a persisted `Chat` that
582
+ * does not exist in the backing persistence store.
583
+ *
584
+ * @since 1.0.0
585
+ * @category Errors
586
+ */
587
+ export class ChatNotFoundError extends Schema.TaggedError<ChatNotFoundError>(
588
+ "@effect/ai/Chat/ChatNotFoundError"
589
+ )("ChatNotFoundError", { chatId: Schema.String }) {}
590
+
591
+ // @effect-diagnostics effect/leakingRequirements:off
592
+ /**
593
+ * The context tag for chat persistence.
594
+ *
595
+ * @since 1.0.0
596
+ * @category Context
597
+ */
598
+ export class Persistence extends Context.Tag("@effect/ai/Chat/Persisted")<
599
+ Persistence,
600
+ Persistence.Service
601
+ >() {}
602
+
603
+ /**
604
+ * @since 1.0.0
605
+ * @category Models
606
+ */
607
+ export declare namespace Persistence {
608
+ /**
609
+ * Represents the backing persistence for a persisted `Chat`. Allows for
610
+ * creating and retrieving chats that have been saved to a persistence store.
611
+ *
612
+ * @since 1.0.0
613
+ * @category Models
614
+ */
615
+ export interface Service {
616
+ /**
617
+ * Attempts to retrieve the persisted chat from the backing persistence
618
+ * store with the specified chat identifer. If the chat does not exist in
619
+ * the persistence store, a `ChatNotFoundError` will be returned.
620
+ */
621
+ readonly get: (chatId: string) => Effect.Effect<
622
+ Persisted,
623
+ ChatNotFoundError | PersistenceBackingError
624
+ >
625
+
626
+ /**
627
+ * Attempts to retrieve the persisted chat from the backing persistence
628
+ * store with the specified chat identifer. If the chat does not exist in
629
+ * the persistence store, an empty chat will be created, saved, and
630
+ * returned.
631
+ */
632
+ readonly getOrCreate: (chatId: string) => Effect.Effect<
633
+ Persisted,
634
+ AiError.MalformedOutput | PersistenceBackingError
635
+ >
636
+ }
637
+ }
638
+
639
+ /**
640
+ * Represents a `Chat` that is backed by persistence.
641
+ *
642
+ * When calling a text generation method (e.g. `generateText`), the previous
643
+ * chat history as well as the relevent response parts will be saved to the
644
+ * backing persistence store.
645
+ *
646
+ * @since 1.0.0
647
+ * @category Models
648
+ */
649
+ export interface Persisted extends Service {
650
+ /**
651
+ * The identifier for the chat in the backing persistence store.
652
+ */
653
+ readonly id: string
654
+ }
655
+
656
+ /**
657
+ * Creates a new chat persistence service.
658
+ *
659
+ * The provided store identifier will be used to indicate which "store" the
660
+ * backing persistence should load chats from.
661
+ *
662
+ * @since 1.0.0
663
+ * @category Constructors
664
+ */
665
+ export const makePersisted = Effect.fnUntraced(function*(options: {
666
+ readonly storeId: string
667
+ }) {
668
+ const persistence = yield* BackingPersistence
669
+ const store = yield* persistence.make(options.storeId)
670
+
671
+ const toPersisted = (chatId: string, chat: Service): Persisted => {
672
+ const persistChat = chat.exportJson.pipe(
673
+ Effect.flatMap((history) => store.set(chatId, history, Option.none())),
674
+ Effect.orDie
675
+ )
676
+ return {
677
+ ...chat,
678
+ id: chatId,
679
+ generateText: (options) =>
680
+ chat.generateText(options).pipe(
681
+ Effect.ensuring(persistChat)
682
+ ),
683
+ generateObject: (options) =>
684
+ chat.generateObject(options).pipe(
685
+ Effect.ensuring(persistChat)
686
+ ),
687
+ streamText: (options) =>
688
+ chat.streamText(options).pipe(
689
+ Stream.ensuring(persistChat)
690
+ )
691
+ }
692
+ }
693
+
694
+ const createChat = Effect.fnUntraced(
695
+ function*(chatId: string) {
696
+ // Create an empty chat
697
+ const chat = yield* empty
698
+
699
+ // Save the history for the newly created chat
700
+ const history = yield* chat.exportJson
701
+ yield* store.set(chatId, history, Option.none())
702
+
703
+ // Convert the chat to a persisted chat
704
+ return toPersisted(chatId, chat)
705
+ },
706
+ Effect.catchTag("PersistenceError", (error) => {
707
+ // Should never happen because we are using the backing persistence
708
+ // store directly, and parse errors can only occur when using result
709
+ // persistence
710
+ if (error.reason === "ParseError") return Effect.die(error)
711
+ return Effect.fail(error)
712
+ })
713
+ )
714
+
715
+ const getChat = Effect.fnUntraced(
716
+ function*(chatId: string) {
717
+ // Create an empty chat
718
+ const chat = yield* empty
719
+
720
+ // Hydrate the chat history
721
+ yield* store.get(chatId).pipe(
722
+ Effect.flatMap(Effect.transposeMapOption(decodeHistoryJson)),
723
+ Effect.flatten,
724
+ Effect.catchTag("NoSuchElementException", () => new ChatNotFoundError({ chatId })),
725
+ Effect.flatMap((history) => Ref.set(chat.history, history))
726
+ )
727
+
728
+ // Convert the chat to a persisted chat
729
+ return toPersisted(chatId, chat)
730
+ },
731
+ Effect.catchTags({
732
+ ParseError: (error) => Effect.die(error),
733
+ PersistenceError: (error) => {
734
+ // Should never happen because we are using the backing persistence
735
+ // store directly, and parse errors can only occur when using result
736
+ // persistence
737
+ if (error.reason === "ParseError") return Effect.die(error)
738
+ return Effect.fail(error)
739
+ }
740
+ })
741
+ )
742
+
743
+ const get = Effect.fn("PersistedChat.get")(function*(chatId: string) {
744
+ return yield* getChat(chatId)
745
+ })
746
+
747
+ const getOrCreate = Effect.fn("PersistedChat.getOrCreate")(function*(chatId: string) {
748
+ return yield* getChat(chatId).pipe(
749
+ Effect.catchTag("ChatNotFoundError", () => createChat(chatId))
750
+ )
751
+ })
752
+
753
+ return Persistence.of({
754
+ get,
755
+ getOrCreate
756
+ })
757
+ })
758
+
759
+ /**
760
+ * Creates a `Layer` new chat persistence service.
761
+ *
762
+ * The provided store identifier will be used to indicate which "store" the
763
+ * backing persistence should load chats from.
764
+ *
765
+ * @since 1.0.0
766
+ * @category Constructors
767
+ */
768
+ export const layerPersisted = (options: {
769
+ readonly storeId: string
770
+ }): Layer.Layer<Persistence, never, BackingPersistence> => Layer.scoped(Persistence, makePersisted(options))