@effect/ai-anthropic 0.4.1 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/AnthropicLanguageModel/package.json +6 -0
  2. package/dist/cjs/AnthropicClient.js +128 -96
  3. package/dist/cjs/AnthropicClient.js.map +1 -1
  4. package/dist/cjs/AnthropicConfig.js +2 -2
  5. package/dist/cjs/AnthropicConfig.js.map +1 -1
  6. package/dist/cjs/AnthropicLanguageModel.js +496 -0
  7. package/dist/cjs/AnthropicLanguageModel.js.map +1 -0
  8. package/dist/cjs/AnthropicTokenizer.js +7 -10
  9. package/dist/cjs/AnthropicTokenizer.js.map +1 -1
  10. package/dist/cjs/index.js +3 -3
  11. package/dist/cjs/internal/utilities.js +24 -0
  12. package/dist/cjs/internal/utilities.js.map +1 -0
  13. package/dist/dts/AnthropicClient.d.ts +9 -64
  14. package/dist/dts/AnthropicClient.d.ts.map +1 -1
  15. package/dist/dts/AnthropicConfig.d.ts +5 -6
  16. package/dist/dts/AnthropicConfig.d.ts.map +1 -1
  17. package/dist/dts/{AnthropicCompletions.d.ts → AnthropicLanguageModel.d.ts} +30 -22
  18. package/dist/dts/AnthropicLanguageModel.d.ts.map +1 -0
  19. package/dist/dts/AnthropicTokenizer.d.ts +2 -2
  20. package/dist/dts/AnthropicTokenizer.d.ts.map +1 -1
  21. package/dist/dts/index.d.ts +2 -2
  22. package/dist/dts/index.d.ts.map +1 -1
  23. package/dist/dts/internal/utilities.d.ts +2 -0
  24. package/dist/dts/internal/utilities.d.ts.map +1 -0
  25. package/dist/esm/AnthropicClient.js +127 -94
  26. package/dist/esm/AnthropicClient.js.map +1 -1
  27. package/dist/esm/AnthropicConfig.js +2 -2
  28. package/dist/esm/AnthropicConfig.js.map +1 -1
  29. package/dist/esm/AnthropicLanguageModel.js +484 -0
  30. package/dist/esm/AnthropicLanguageModel.js.map +1 -0
  31. package/dist/esm/AnthropicTokenizer.js +7 -10
  32. package/dist/esm/AnthropicTokenizer.js.map +1 -1
  33. package/dist/esm/index.js +2 -2
  34. package/dist/esm/index.js.map +1 -1
  35. package/dist/esm/internal/utilities.js +15 -0
  36. package/dist/esm/internal/utilities.js.map +1 -0
  37. package/package.json +10 -10
  38. package/src/AnthropicClient.ts +197 -162
  39. package/src/AnthropicConfig.ts +7 -8
  40. package/src/AnthropicLanguageModel.ts +591 -0
  41. package/src/AnthropicTokenizer.ts +31 -19
  42. package/src/index.ts +2 -2
  43. package/src/internal/utilities.ts +18 -0
  44. package/AnthropicCompletions/package.json +0 -6
  45. package/dist/cjs/AnthropicCompletions.js +0 -358
  46. package/dist/cjs/AnthropicCompletions.js.map +0 -1
  47. package/dist/dts/AnthropicCompletions.d.ts.map +0 -1
  48. package/dist/esm/AnthropicCompletions.js +0 -345
  49. package/dist/esm/AnthropicCompletions.js.map +0 -1
  50. package/src/AnthropicCompletions.ts +0 -468
@@ -1,468 +0,0 @@
1
- /**
2
- * @since 1.0.0
3
- */
4
- import { AiError } from "@effect/ai/AiError"
5
- import type * as AiInput from "@effect/ai/AiInput"
6
- import * as AiModel from "@effect/ai/AiModel"
7
- import * as AiResponse from "@effect/ai/AiResponse"
8
- import * as AiRole from "@effect/ai/AiRole"
9
- import { addGenAIAnnotations } from "@effect/ai/AiTelemetry"
10
- import * as Completions from "@effect/ai/Completions"
11
- import * as Tokenizer from "@effect/ai/Tokenizer"
12
- import * as Arr from "effect/Array"
13
- import * as Chunk from "effect/Chunk"
14
- import * as Context from "effect/Context"
15
- import * as Effect from "effect/Effect"
16
- import { dual } from "effect/Function"
17
- import * as Layer from "effect/Layer"
18
- import * as Option from "effect/Option"
19
- import * as Predicate from "effect/Predicate"
20
- import * as Stream from "effect/Stream"
21
- import type { Span } from "effect/Tracer"
22
- import type { Simplify } from "effect/Types"
23
- import type { StreamChunk } from "./AnthropicClient.js"
24
- import { AnthropicClient } from "./AnthropicClient.js"
25
- import * as AnthropicTokenizer from "./AnthropicTokenizer.js"
26
- import type * as Generated from "./Generated.js"
27
-
28
- /**
29
- * @since 1.0.0
30
- * @category models
31
- */
32
- export type Model = typeof Generated.Model.Encoded
33
-
34
- // =============================================================================
35
- // Configuration
36
- // =============================================================================
37
-
38
- /**
39
- * @since 1.0.0
40
- * @category tags
41
- */
42
- export class Config extends Context.Tag("@effect/ai-anthropic/AnthropicCompletions/Config")<
43
- Config,
44
- Config.Service
45
- >() {
46
- /**
47
- * @since 1.0.0
48
- */
49
- static readonly getOrUndefined: Effect.Effect<typeof Config.Service | undefined> = Effect.map(
50
- Effect.context<never>(),
51
- (context) => context.unsafeMap.get(Config.key)
52
- )
53
- }
54
-
55
- /**
56
- * @since 1.0.0
57
- */
58
- export declare namespace Config {
59
- /**
60
- * @since 1.0.0
61
- * @category configuration
62
- */
63
- export interface Service extends
64
- Simplify<
65
- Partial<
66
- Omit<
67
- typeof Generated.CreateMessageParams.Encoded,
68
- "messages" | "tools" | "tool_choice" | "stream"
69
- >
70
- >
71
- >
72
- {}
73
- }
74
-
75
- // =============================================================================
76
- // Anthropic Completions
77
- // =============================================================================
78
-
79
- const modelCacheKey = Symbol.for("@effect/ai-anthropic/AnthropicCompletions/AiModel")
80
-
81
- /**
82
- * @since 1.0.0
83
- * @category ai models
84
- */
85
- export const model = (
86
- model: (string & {}) | Model,
87
- config?: Omit<Config.Service, "model">
88
- ): AiModel.AiModel<Completions.Completions | Tokenizer.Tokenizer, AnthropicClient> =>
89
- AiModel.make({
90
- model,
91
- cacheKey: modelCacheKey,
92
- requires: AnthropicClient,
93
- provides: make({ model, config }).pipe(
94
- Effect.map((completions) =>
95
- Context.merge(
96
- Context.make(Completions.Completions, completions),
97
- Context.make(Tokenizer.Tokenizer, AnthropicTokenizer.make)
98
- )
99
- )
100
- ),
101
- updateContext: (context) => {
102
- const innerConfig = context.unsafeMap.get(Config.key) as Config.Service | undefined
103
- return Context.merge(context, Context.make(Config, { model, ...config, ...innerConfig }))
104
- }
105
- })
106
-
107
- const make = Effect.fnUntraced(function*(options: {
108
- readonly model: (string & {}) | Model
109
- readonly config?: Omit<Config.Service, "model">
110
- }) {
111
- const client = yield* AnthropicClient
112
-
113
- const makeRequest = ({ input, required, system, tools }: Completions.CompletionOptions) => {
114
- const useStructured = tools.length === 1 && tools[0].structured
115
- return Effect.map(
116
- Effect.context<never>(),
117
- (context): typeof Generated.CreateMessageParams.Encoded => ({
118
- model: options.model,
119
- // TODO: re-evaluate a better way to do this
120
- max_tokens: 4096,
121
- ...options.config,
122
- ...context.unsafeMap.get(Config.key),
123
- system: Option.getOrUndefined(system),
124
- messages: makeMessages(input),
125
- tools: tools.length === 0 ? undefined : tools.map((tool) => ({
126
- name: tool.name,
127
- description: tool.description,
128
- input_schema: tool.parameters as any
129
- })),
130
- tool_choice: !useStructured && tools.length > 0
131
- // For non-structured outputs, ensure tools are used if required
132
- ? typeof required === "boolean"
133
- ? required ? { type: "any" } : { type: "auto" }
134
- : { type: "tool", name: required }
135
- // For structured outputs, ensure the json output tool is used
136
- : useStructured
137
- ? { type: "tool", name: tools[0].name }
138
- : undefined
139
- })
140
- )
141
- }
142
-
143
- return yield* Completions.make({
144
- create({ span, ...options }) {
145
- return makeRequest(options).pipe(
146
- Effect.tap((request) => annotateRequest(span, request)),
147
- Effect.flatMap((payload) => client.client.messagesPost({ params: {}, payload })),
148
- Effect.tap((response) => annotateChatResponse(span, response)),
149
- Effect.flatMap((response) =>
150
- makeResponse(
151
- response,
152
- "create",
153
- options.tools.length === 1 && options.tools[0].structured
154
- ? options.tools[0]
155
- : undefined
156
- )
157
- ),
158
- Effect.catchAll((cause) =>
159
- Effect.fail(
160
- new AiError({
161
- module: "AnthropicCompletions",
162
- method: "create",
163
- description: "An error occurred",
164
- cause
165
- })
166
- )
167
- )
168
- )
169
- },
170
- stream({ span, ...options }) {
171
- return makeRequest(options).pipe(
172
- Effect.tap((request) => annotateRequest(span, request)),
173
- Effect.map(client.stream),
174
- Stream.unwrap,
175
- Stream.tap((response) => {
176
- annotateStreamResponse(span, response)
177
- return Effect.void
178
- }),
179
- Stream.map((response) => response.asAiResponse),
180
- Stream.catchAll((cause) =>
181
- Effect.fail(
182
- new AiError({
183
- module: "AnthropicCompletions",
184
- method: "stream",
185
- description: "An error occurred",
186
- cause
187
- })
188
- )
189
- )
190
- )
191
- }
192
- })
193
- })
194
-
195
- /**
196
- * @since 1.0.0
197
- * @category layers
198
- */
199
- export const layerCompletions = (options: {
200
- readonly model: (string & {}) | Model
201
- readonly config?: Omit<Config.Service, "model">
202
- }): Layer.Layer<Completions.Completions, never, AnthropicClient> =>
203
- Layer.effect(
204
- Completions.Completions,
205
- make({ model: options.model, config: options.config })
206
- )
207
-
208
- /**
209
- * @since 1.0.0
210
- * @category layers
211
- */
212
- export const layer = (options: {
213
- readonly model: (string & {}) | Model
214
- readonly config?: Omit<Config.Service, "model">
215
- }): Layer.Layer<Completions.Completions | Tokenizer.Tokenizer, never, AnthropicClient> =>
216
- Layer.merge(layerCompletions(options), AnthropicTokenizer.layer)
217
-
218
- /**
219
- * @since 1.0.0
220
- * @category configuration
221
- */
222
- export const withConfigOverride: {
223
- /**
224
- * @since 1.0.0
225
- * @category configuration
226
- */
227
- (config: Config.Service): <A, E, R>(self: Effect.Effect<A, E, R>) => Effect.Effect<A, E, R>
228
- /**
229
- * @since 1.0.0
230
- * @category configuration
231
- */
232
- <A, E, R>(self: Effect.Effect<A, E, R>, config: Config.Service): Effect.Effect<A, E, R>
233
- } = dual<
234
- /**
235
- * @since 1.0.0
236
- * @category configuration
237
- */
238
- (config: Config.Service) => <A, E, R>(self: Effect.Effect<A, E, R>) => Effect.Effect<A, E, R>,
239
- /**
240
- * @since 1.0.0
241
- * @category configuration
242
- */
243
- <A, E, R>(self: Effect.Effect<A, E, R>, config: Config.Service) => Effect.Effect<A, E, R>
244
- >(2, (self, overrides) =>
245
- Effect.flatMap(
246
- Config.getOrUndefined,
247
- (config) => Effect.provideService(self, Config, { ...config, ...overrides })
248
- ))
249
-
250
- const makeMessages = (
251
- aiInput: AiInput.AiInput
252
- ): Arr.NonEmptyReadonlyArray<typeof Generated.InputMessage.Encoded> => {
253
- const messages: Array<typeof Generated.InputMessage.Encoded> = []
254
- for (const input of aiInput) {
255
- for (const message of convertMessage(input)) {
256
- messages.push(message)
257
- }
258
- }
259
- return messages as any
260
- }
261
-
262
- const formatRole = (role: AiRole.AiRole) => {
263
- switch (role._tag) {
264
- case "UserWithName":
265
- return {
266
- role: "user",
267
- name: safeName(role.name)
268
- } as const
269
- case "User":
270
- return {
271
- role: "user"
272
- } as const
273
- case "Model":
274
- return {
275
- role: "assistant"
276
- } as const
277
- }
278
- }
279
-
280
- const convertMessage = (
281
- message: AiInput.Message
282
- ): Array<typeof Generated.InputMessage.Encoded> => {
283
- const messages: Array<typeof Generated.InputMessage.Encoded> = []
284
- let parts: Array<typeof Generated.InputContentBlock.Encoded> = []
285
- let toolCalls: Array<typeof Generated.RequestToolUseBlock.Encoded> = []
286
- function flushContent() {
287
- if (parts.length === 0) return
288
- messages.push({
289
- ...formatRole(message.role),
290
- content: parts
291
- })
292
- parts = []
293
- }
294
- function flushToolCalls() {
295
- if (toolCalls.length === 0) return
296
- messages.push({
297
- role: "assistant",
298
- content: toolCalls
299
- })
300
- toolCalls = []
301
- }
302
- for (const part of message.parts) {
303
- if (part._tag === "ToolCall") {
304
- flushContent()
305
- toolCalls.push({
306
- id: part.id,
307
- type: "tool_use",
308
- name: part.name,
309
- input: part.params as Record<string, unknown>
310
- })
311
- } else if (part._tag === "ToolCallResolved") {
312
- flushContent()
313
- flushToolCalls()
314
- messages.push({
315
- role: "user",
316
- content: [
317
- {
318
- type: "tool_result",
319
- tool_use_id: part.toolCallId,
320
- content: JSON.stringify(part.value)
321
- }
322
- ]
323
- })
324
- } else {
325
- flushToolCalls()
326
- parts.push(makeContentPart(part))
327
- }
328
- }
329
- flushContent()
330
- flushToolCalls()
331
- return messages
332
- }
333
-
334
- const makeContentPart = (
335
- part: AiInput.TextPart | AiInput.ImagePart | AiInput.ImageUrlPart
336
- ): typeof Generated.InputContentBlock.Encoded => {
337
- switch (part._tag) {
338
- case "Image":
339
- return {
340
- type: "image",
341
- source: {
342
- type: "base64",
343
- media_type: part.image.contentType as any,
344
- data: part.asBase64
345
- }
346
- }
347
- case "ImageUrl":
348
- throw new AiError({
349
- module: "AnthropicCompletions",
350
- method: "create",
351
- description: "Anthropic does not currently support adding URLs in either the text or image blocks"
352
- })
353
- case "Text":
354
- return {
355
- type: "text",
356
- text: part.content
357
- }
358
- }
359
- }
360
-
361
- const makeResponse = Effect.fnUntraced(function*(
362
- response: Generated.Message,
363
- method: string,
364
- structuredTool?: {
365
- readonly name: string
366
- readonly description: string
367
- }
368
- ) {
369
- if (structuredTool !== undefined && response.stop_reason === "tool_use") {
370
- const toolUse = response.content.filter((chunk) => chunk.type === "tool_use")
371
- const text = response.content.filter((chunk) => chunk.type === "text")
372
- if (toolUse.length !== 1) {
373
- return yield* new AiError({
374
- module: "AnthropicCompletions",
375
- method,
376
- description: "Unable to extract structured output tool call information from response"
377
- })
378
- }
379
- const tool = toolUse[0]
380
- const textParts = text.map(({ text }) => AiResponse.TextPart.fromContent(text))
381
- const toolCallPart = AiResponse.ToolCallPart.fromUnknown({
382
- id: tool.id,
383
- name: tool.name,
384
- params: tool.input
385
- })
386
- return AiResponse.AiResponse.make({
387
- role: AiRole.model,
388
- parts: Chunk.unsafeFromArray([...textParts, toolCallPart])
389
- })
390
- }
391
- const parts = Arr.empty<AiResponse.Part>()
392
- for (const chunk of response.content) {
393
- switch (chunk.type) {
394
- case "text": {
395
- parts.push(AiResponse.TextPart.fromContent(chunk.text))
396
- break
397
- }
398
- case "tool_use": {
399
- parts.push(AiResponse.ToolCallPart.fromUnknown({
400
- id: chunk.id,
401
- name: chunk.name,
402
- params: chunk.input
403
- }))
404
- break
405
- }
406
- }
407
- }
408
- return AiResponse.AiResponse.make({
409
- role: AiRole.model,
410
- parts: Chunk.unsafeFromArray(parts)
411
- })
412
- })
413
-
414
- const safeName = (name: string) => name.replace(/[^a-zA-Z0-9_-]/g, "_").replace(/_+/, "_")
415
-
416
- const annotateRequest = (
417
- span: Span,
418
- request: typeof Generated.CreateMessageParams.Encoded
419
- ): void => {
420
- addGenAIAnnotations(span, {
421
- system: "anthropic",
422
- operation: { name: "chat" },
423
- request: {
424
- model: request.model,
425
- temperature: request.temperature,
426
- topK: request.top_k,
427
- topP: request.top_p,
428
- maxTokens: request.max_tokens,
429
- stopSequences: Arr.ensure(request.stop_sequences).filter(
430
- Predicate.isNotNullable
431
- )
432
- }
433
- })
434
- }
435
-
436
- const annotateChatResponse = (
437
- span: Span,
438
- response: typeof Generated.Message.Encoded
439
- ): void => {
440
- addGenAIAnnotations(span, {
441
- response: {
442
- id: response.id,
443
- model: response.model,
444
- finishReasons: response.stop_reason ? [response.stop_reason] : undefined
445
- },
446
- usage: {
447
- inputTokens: response.usage.input_tokens,
448
- outputTokens: response.usage.output_tokens
449
- }
450
- })
451
- }
452
-
453
- const annotateStreamResponse = (span: Span, response: StreamChunk) => {
454
- const usage = response.parts.find((part) => part._tag === "Usage")
455
- if (Predicate.isNotNullable(usage)) {
456
- addGenAIAnnotations(span, {
457
- response: {
458
- id: usage.id,
459
- model: usage.model,
460
- finishReasons: usage.finishReasons
461
- },
462
- usage: {
463
- inputTokens: usage.inputTokens,
464
- outputTokens: usage.outputTokens
465
- }
466
- })
467
- }
468
- }