@effect/ai-anthropic 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/AnthropicClient/package.json +6 -0
  2. package/AnthropicCompletions/package.json +6 -0
  3. package/AnthropicConfig/package.json +6 -0
  4. package/AnthropicTokenizer/package.json +6 -0
  5. package/Generated/package.json +6 -0
  6. package/LICENSE +21 -0
  7. package/README.md +5 -0
  8. package/dist/cjs/AnthropicClient.js +213 -0
  9. package/dist/cjs/AnthropicClient.js.map +1 -0
  10. package/dist/cjs/AnthropicCompletions.js +290 -0
  11. package/dist/cjs/AnthropicCompletions.js.map +1 -0
  12. package/dist/cjs/AnthropicConfig.js +31 -0
  13. package/dist/cjs/AnthropicConfig.js.map +1 -0
  14. package/dist/cjs/AnthropicTokenizer.js +50 -0
  15. package/dist/cjs/AnthropicTokenizer.js.map +1 -0
  16. package/dist/cjs/Generated.js +1510 -0
  17. package/dist/cjs/Generated.js.map +1 -0
  18. package/dist/cjs/index.js +19 -0
  19. package/dist/cjs/index.js.map +1 -0
  20. package/dist/dts/AnthropicClient.d.ts +126 -0
  21. package/dist/dts/AnthropicClient.d.ts.map +1 -0
  22. package/dist/dts/AnthropicCompletions.d.ts +25 -0
  23. package/dist/dts/AnthropicCompletions.d.ts.map +1 -0
  24. package/dist/dts/AnthropicConfig.d.ts +39 -0
  25. package/dist/dts/AnthropicConfig.d.ts.map +1 -0
  26. package/dist/dts/AnthropicTokenizer.d.ts +8 -0
  27. package/dist/dts/AnthropicTokenizer.d.ts.map +1 -0
  28. package/dist/dts/Generated.d.ts +3937 -0
  29. package/dist/dts/Generated.d.ts.map +1 -0
  30. package/dist/dts/index.d.ts +21 -0
  31. package/dist/dts/index.d.ts.map +1 -0
  32. package/dist/esm/AnthropicClient.js +199 -0
  33. package/dist/esm/AnthropicClient.js.map +1 -0
  34. package/dist/esm/AnthropicCompletions.js +279 -0
  35. package/dist/esm/AnthropicCompletions.js.map +1 -0
  36. package/dist/esm/AnthropicConfig.js +22 -0
  37. package/dist/esm/AnthropicConfig.js.map +1 -0
  38. package/dist/esm/AnthropicTokenizer.js +41 -0
  39. package/dist/esm/AnthropicTokenizer.js.map +1 -0
  40. package/dist/esm/Generated.js +1273 -0
  41. package/dist/esm/Generated.js.map +1 -0
  42. package/dist/esm/index.js +21 -0
  43. package/dist/esm/index.js.map +1 -0
  44. package/dist/esm/package.json +4 -0
  45. package/package.json +79 -0
  46. package/src/AnthropicClient.ts +415 -0
  47. package/src/AnthropicCompletions.ts +352 -0
  48. package/src/AnthropicConfig.ts +76 -0
  49. package/src/AnthropicTokenizer.ts +52 -0
  50. package/src/Generated.ts +1811 -0
  51. package/src/index.ts +24 -0
@@ -0,0 +1,352 @@
1
+ /**
2
+ * @since 1.0.0
3
+ */
4
+ import { AiError } from "@effect/ai/AiError"
5
+ import type * as AiInput from "@effect/ai/AiInput"
6
+ import * as AiResponse from "@effect/ai/AiResponse"
7
+ import * as AiRole from "@effect/ai/AiRole"
8
+ import { addGenAIAnnotations } from "@effect/ai/AiTelemetry"
9
+ import * as Completions from "@effect/ai/Completions"
10
+ import type * as Tokenizer from "@effect/ai/Tokenizer"
11
+ import * as Arr from "effect/Array"
12
+ import * as Chunk from "effect/Chunk"
13
+ import * as Effect from "effect/Effect"
14
+ import * as Layer from "effect/Layer"
15
+ import * as Option from "effect/Option"
16
+ import * as Predicate from "effect/Predicate"
17
+ import * as Stream from "effect/Stream"
18
+ import type { Span } from "effect/Tracer"
19
+ import type { StreamChunk } from "./AnthropicClient.js"
20
+ import { AnthropicClient } from "./AnthropicClient.js"
21
+ import { AnthropicConfig } from "./AnthropicConfig.js"
22
+ import * as AnthropicTokenizer from "./AnthropicTokenizer.js"
23
+ import type * as Generated from "./Generated.js"
24
+
25
+ /**
26
+ * @since 1.0.0
27
+ * @category models
28
+ */
29
+ export type Model = typeof Generated.ModelEnum.Encoded
30
+
31
+ const make = (options: { readonly model: (string & {}) | Model }) =>
32
+ Effect.gen(function*() {
33
+ const client = yield* AnthropicClient
34
+ const config = yield* AnthropicConfig.getOrUndefined
35
+
36
+ const makeRequest = ({
37
+ input,
38
+ required,
39
+ system,
40
+ tools
41
+ }: Completions.CompletionOptions) => {
42
+ const useStructured = tools.length === 1 && tools[0].structured
43
+ return Effect.map(
44
+ Effect.context<never>(),
45
+ (context): typeof Generated.CreateMessageParams.Encoded => ({
46
+ model: options.model,
47
+ // TODO: re-evaluate a better way to do this
48
+ max_tokens: 4096,
49
+ ...config,
50
+ ...context.unsafeMap.get(AnthropicConfig.key),
51
+ system: Option.getOrUndefined(system),
52
+ messages: makeMessages(input),
53
+ tools: tools.length === 0 ? undefined : tools.map((tool) => ({
54
+ name: tool.name,
55
+ description: tool.description,
56
+ input_schema: tool.parameters as any
57
+ })),
58
+ tool_choice: !useStructured && tools.length > 0
59
+ // For non-structured outputs, ensure tools are used if required
60
+ ? typeof required === "boolean"
61
+ ? required ? { type: "any" } : { type: "auto" }
62
+ : { type: "tool", name: required }
63
+ // For structured outputs, ensure the json output tool is used
64
+ : useStructured
65
+ ? { type: "tool", name: tools[0].name }
66
+ : undefined
67
+ })
68
+ )
69
+ }
70
+
71
+ return yield* Completions.make({
72
+ create({ span, ...options }) {
73
+ return makeRequest(options).pipe(
74
+ Effect.tap((request) => annotateRequest(span, request)),
75
+ Effect.flatMap((payload) => client.client.messagesPost({ params: {}, payload })),
76
+ Effect.tap((response) => annotateChatResponse(span, response)),
77
+ Effect.flatMap((response) =>
78
+ makeResponse(
79
+ response,
80
+ "create",
81
+ options.tools.length === 1 && options.tools[0].structured
82
+ ? options.tools[0]
83
+ : undefined
84
+ )
85
+ ),
86
+ Effect.catchAll((cause) =>
87
+ Effect.fail(
88
+ new AiError({
89
+ module: "AnthropicCompletions",
90
+ method: "create",
91
+ description: "An error occurred",
92
+ cause
93
+ })
94
+ )
95
+ )
96
+ )
97
+ },
98
+ stream({ span, ...options }) {
99
+ return makeRequest(options).pipe(
100
+ Effect.tap((request) => annotateRequest(span, request)),
101
+ Effect.map(client.stream),
102
+ Stream.unwrap,
103
+ Stream.tap((response) => {
104
+ annotateStreamResponse(span, response)
105
+ return Effect.void
106
+ }),
107
+ Stream.map((response) => response.asAiResponse),
108
+ Stream.catchAll((cause) =>
109
+ Effect.fail(
110
+ new AiError({
111
+ module: "AnthropicCompletions",
112
+ method: "stream",
113
+ description: "An error occurred",
114
+ cause
115
+ })
116
+ )
117
+ )
118
+ )
119
+ }
120
+ })
121
+ })
122
+
123
+ /**
124
+ * @since 1.0.0
125
+ * @category layers
126
+ */
127
+ export const layerCompletions = (options: {
128
+ readonly model: (string & {}) | Model
129
+ }): Layer.Layer<Completions.Completions, never, AnthropicClient> => Layer.effect(Completions.Completions, make(options))
130
+
131
+ /**
132
+ * @since 1.0.0
133
+ * @category layers
134
+ */
135
+ export const layer = (options: {
136
+ readonly model: (string & {}) | Model
137
+ }): Layer.Layer<
138
+ Completions.Completions | Tokenizer.Tokenizer,
139
+ never,
140
+ AnthropicClient
141
+ > => Layer.merge(layerCompletions(options), AnthropicTokenizer.layer)
142
+
143
+ const makeMessages = (
144
+ aiInput: AiInput.AiInput
145
+ ): Arr.NonEmptyReadonlyArray<typeof Generated.InputMessage.Encoded> => {
146
+ const messages: Array<typeof Generated.InputMessage.Encoded> = []
147
+ for (const input of aiInput) {
148
+ for (const message of convertMessage(input)) {
149
+ messages.push(message)
150
+ }
151
+ }
152
+ return messages as any
153
+ }
154
+
155
+ const formatRole = (role: AiRole.AiRole) => {
156
+ switch (role._tag) {
157
+ case "UserWithName":
158
+ return {
159
+ role: "user",
160
+ name: safeName(role.name)
161
+ } as const
162
+ case "User":
163
+ return {
164
+ role: "user"
165
+ } as const
166
+ case "Model":
167
+ return {
168
+ role: "assistant"
169
+ } as const
170
+ }
171
+ }
172
+
173
+ const convertMessage = (
174
+ message: AiInput.Message
175
+ ): Array<typeof Generated.InputMessage.Encoded> => {
176
+ const messages: Array<typeof Generated.InputMessage.Encoded> = []
177
+ let parts: Array<typeof Generated.InputContentBlock.Encoded> = []
178
+ let toolCalls: Array<typeof Generated.RequestToolUseBlock.Encoded> = []
179
+ function flushContent() {
180
+ if (parts.length === 0) return
181
+ messages.push({
182
+ ...formatRole(message.role),
183
+ content: parts
184
+ })
185
+ parts = []
186
+ }
187
+ function flushToolCalls() {
188
+ if (toolCalls.length === 0) return
189
+ messages.push({
190
+ role: "assistant",
191
+ content: toolCalls
192
+ })
193
+ toolCalls = []
194
+ }
195
+ for (const part of message.parts) {
196
+ if (part._tag === "ToolCall") {
197
+ flushContent()
198
+ toolCalls.push({
199
+ id: part.id,
200
+ type: "tool_use",
201
+ name: part.name,
202
+ input: part.params as Record<string, unknown>
203
+ })
204
+ } else if (part._tag === "ToolCallResolved") {
205
+ flushContent()
206
+ flushToolCalls()
207
+ messages.push({
208
+ role: "user",
209
+ content: [
210
+ {
211
+ type: "tool_result",
212
+ tool_use_id: part.toolCallId,
213
+ content: JSON.stringify(part.value)
214
+ }
215
+ ]
216
+ })
217
+ } else {
218
+ flushToolCalls()
219
+ parts.push(makeContentPart(part))
220
+ }
221
+ }
222
+ flushContent()
223
+ flushToolCalls()
224
+ return messages
225
+ }
226
+
227
+ const makeContentPart = (
228
+ part: AiInput.TextPart | AiInput.ImagePart | AiInput.ImageUrlPart
229
+ ): typeof Generated.InputContentBlock.Encoded => {
230
+ switch (part._tag) {
231
+ case "Image":
232
+ return {
233
+ type: "image",
234
+ source: {
235
+ type: "base64",
236
+ media_type: part.image.contentType as any,
237
+ data: part.asBase64
238
+ }
239
+ }
240
+ case "ImageUrl":
241
+ throw new AiError({
242
+ module: "AnthropicCompletions",
243
+ method: "create",
244
+ description: "Anthropic does not currently support adding URLs in either the text or image blocks"
245
+ })
246
+ case "Text":
247
+ return {
248
+ type: "text",
249
+ text: part.content
250
+ }
251
+ }
252
+ }
253
+
254
+ const makeResponse = Effect.fnUntraced(function*(
255
+ response: Generated.Message,
256
+ method: string,
257
+ structuredTool?: {
258
+ readonly name: string
259
+ readonly description: string
260
+ }
261
+ ) {
262
+ if (structuredTool !== undefined && response.stop_reason === "tool_use") {
263
+ const [text, toolUse] = Arr.partition(response.content, (chunk) => chunk.type === "tool_use")
264
+ if (toolUse.length !== 1) {
265
+ return yield* new AiError({
266
+ module: "AnthropicCompletions",
267
+ method,
268
+ description: "Unable to extract structured output tool call information from response"
269
+ })
270
+ }
271
+ const tool = toolUse[0]
272
+ const textParts = text.map(({ text }) => AiResponse.TextPart.fromContent(text))
273
+ const toolCallPart = AiResponse.ToolCallPart.fromUnknown({
274
+ id: tool.id,
275
+ name: tool.name,
276
+ params: tool.input
277
+ })
278
+ return AiResponse.AiResponse.make({
279
+ role: AiRole.model,
280
+ parts: Chunk.unsafeFromArray([...textParts, toolCallPart])
281
+ })
282
+ }
283
+ const parts = response.content.map((chunk) =>
284
+ chunk.type === "text"
285
+ ? AiResponse.TextPart.fromContent(chunk.text)
286
+ : AiResponse.ToolCallPart.fromUnknown({
287
+ id: chunk.id,
288
+ name: chunk.name,
289
+ params: chunk.input
290
+ })
291
+ )
292
+ return AiResponse.AiResponse.make({
293
+ role: AiRole.model,
294
+ parts: Chunk.unsafeFromArray(parts)
295
+ })
296
+ })
297
+
298
+ const safeName = (name: string) => name.replace(/[^a-zA-Z0-9_-]/g, "_").replace(/_+/, "_")
299
+
300
+ const annotateRequest = (
301
+ span: Span,
302
+ request: typeof Generated.CreateMessageParams.Encoded
303
+ ): void => {
304
+ addGenAIAnnotations(span, {
305
+ system: "anthropic",
306
+ operation: { name: "chat" },
307
+ request: {
308
+ model: request.model,
309
+ temperature: request.temperature,
310
+ topK: request.top_k,
311
+ topP: request.top_p,
312
+ maxTokens: request.max_tokens,
313
+ stopSequences: Arr.ensure(request.stop_sequences).filter(
314
+ Predicate.isNotNullable
315
+ )
316
+ }
317
+ })
318
+ }
319
+
320
+ const annotateChatResponse = (
321
+ span: Span,
322
+ response: typeof Generated.Message.Encoded
323
+ ): void => {
324
+ addGenAIAnnotations(span, {
325
+ response: {
326
+ id: response.id,
327
+ model: response.model,
328
+ finishReasons: response.stop_reason ? [response.stop_reason] : undefined
329
+ },
330
+ usage: {
331
+ inputTokens: response.usage.input_tokens,
332
+ outputTokens: response.usage.output_tokens
333
+ }
334
+ })
335
+ }
336
+
337
+ const annotateStreamResponse = (span: Span, response: StreamChunk) => {
338
+ const usage = response.parts.find((part) => part._tag === "Usage")
339
+ if (Predicate.isNotNullable(usage)) {
340
+ addGenAIAnnotations(span, {
341
+ response: {
342
+ id: usage.id,
343
+ model: usage.model,
344
+ finishReasons: usage.finishReasons
345
+ },
346
+ usage: {
347
+ inputTokens: usage.inputTokens,
348
+ outputTokens: usage.outputTokens
349
+ }
350
+ })
351
+ }
352
+ }
@@ -0,0 +1,76 @@
1
+ /**
2
+ * @since 1.0.0
3
+ */
4
+ import type { HttpClient } from "@effect/platform/HttpClient"
5
+ import * as Context from "effect/Context"
6
+ import * as Effect from "effect/Effect"
7
+ import { dual } from "effect/Function"
8
+ import type { Simplify } from "effect/Types"
9
+ import type * as Generated from "./Generated.js"
10
+
11
+ /**
12
+ * @since 1.0.0
13
+ * @category tags
14
+ */
15
+ export class AnthropicConfig extends Context.Tag("@effect/ai-openai/AnthropicConfig")<
16
+ AnthropicConfig,
17
+ AnthropicConfig.Service
18
+ >() {
19
+ /**
20
+ * @since 1.0.0
21
+ */
22
+ static readonly getOrUndefined: Effect.Effect<typeof AnthropicConfig.Service | undefined> = Effect.map(
23
+ Effect.context<never>(),
24
+ (context) => context.unsafeMap.get(AnthropicConfig.key)
25
+ )
26
+ }
27
+
28
+ /**
29
+ * @since 1.0.0
30
+ * @category models
31
+ */
32
+ export declare namespace AnthropicConfig {
33
+ /**
34
+ * @since 1.0.0
35
+ * @category models
36
+ */
37
+ export interface Service extends
38
+ Simplify<
39
+ Partial<
40
+ Omit<
41
+ typeof Generated.CreateMessageParams.Encoded,
42
+ "messages" | "tools" | "tool_choice" | "stream"
43
+ >
44
+ >
45
+ >
46
+ {
47
+ readonly transformClient?: (client: HttpClient) => HttpClient
48
+ }
49
+ }
50
+
51
+ /**
52
+ * @since 1.0.0
53
+ * @category configuration
54
+ */
55
+ export const withClientTransform = dual<
56
+ /**
57
+ * @since 1.0.0
58
+ * @category configuration
59
+ */
60
+ (transform: (client: HttpClient) => HttpClient) => <A, E, R>(self: Effect.Effect<A, E, R>) => Effect.Effect<A, E, R>,
61
+ /**
62
+ * @since 1.0.0
63
+ * @category configuration
64
+ */
65
+ <A, E, R>(
66
+ self: Effect.Effect<A, E, R>,
67
+ transform: (client: HttpClient) => HttpClient
68
+ ) => Effect.Effect<A, E, R>
69
+ >(
70
+ 2,
71
+ (self, transformClient) =>
72
+ Effect.flatMap(
73
+ AnthropicConfig.getOrUndefined,
74
+ (config) => Effect.provideService(self, AnthropicConfig, { ...config, transformClient })
75
+ )
76
+ )
@@ -0,0 +1,52 @@
1
+ /**
2
+ * @since 1.0.0
3
+ */
4
+ import { getTokenizer } from "@anthropic-ai/tokenizer"
5
+ import { AiError } from "@effect/ai/AiError"
6
+ import * as Tokenizer from "@effect/ai/Tokenizer"
7
+ import * as Arr from "effect/Array"
8
+ import * as Chunk from "effect/Chunk"
9
+ import * as Effect from "effect/Effect"
10
+ import * as Layer from "effect/Layer"
11
+ import * as Option from "effect/Option"
12
+
13
+ const make = Tokenizer.make({
14
+ tokenize(content) {
15
+ return Effect.try({
16
+ try: () => {
17
+ const tokenizer = getTokenizer()
18
+ const text = Arr.flatMap(Chunk.toReadonlyArray(content), (message) =>
19
+ Arr.filterMap(message.parts, (part) => {
20
+ if (part._tag === "Image" || part._tag === "ImageUrl") {
21
+ return Option.none()
22
+ }
23
+ return Option.some(
24
+ part._tag === "Text"
25
+ ? part.content
26
+ : JSON.stringify(
27
+ part._tag === "ToolCall"
28
+ ? part.params :
29
+ part.value
30
+ )
31
+ )
32
+ })).join("")
33
+ const encoded = tokenizer.encode(text.normalize("NFKC"), "all")
34
+ tokenizer.free()
35
+ return Array.from(encoded)
36
+ },
37
+ catch: (cause) =>
38
+ new AiError({
39
+ module: "OpenAiCompletions",
40
+ method: "tokenize",
41
+ description: "Could not tokenize",
42
+ cause
43
+ })
44
+ })
45
+ }
46
+ })
47
+
48
+ /**
49
+ * @since 1.0.0
50
+ * @category layers
51
+ */
52
+ export const layer: Layer.Layer<Tokenizer.Tokenizer> = Layer.succeed(Tokenizer.Tokenizer, make)