@effect/ai-anthropic 0.4.0 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/AnthropicLanguageModel/package.json +6 -0
- package/dist/cjs/AnthropicClient.js +128 -96
- package/dist/cjs/AnthropicClient.js.map +1 -1
- package/dist/cjs/AnthropicConfig.js +2 -2
- package/dist/cjs/AnthropicConfig.js.map +1 -1
- package/dist/cjs/AnthropicLanguageModel.js +496 -0
- package/dist/cjs/AnthropicLanguageModel.js.map +1 -0
- package/dist/cjs/AnthropicTokenizer.js +7 -10
- package/dist/cjs/AnthropicTokenizer.js.map +1 -1
- package/dist/cjs/index.js +3 -3
- package/dist/cjs/internal/utilities.js +24 -0
- package/dist/cjs/internal/utilities.js.map +1 -0
- package/dist/dts/AnthropicClient.d.ts +9 -64
- package/dist/dts/AnthropicClient.d.ts.map +1 -1
- package/dist/dts/AnthropicConfig.d.ts +5 -6
- package/dist/dts/AnthropicConfig.d.ts.map +1 -1
- package/dist/dts/{AnthropicCompletions.d.ts → AnthropicLanguageModel.d.ts} +30 -22
- package/dist/dts/AnthropicLanguageModel.d.ts.map +1 -0
- package/dist/dts/AnthropicTokenizer.d.ts +2 -2
- package/dist/dts/AnthropicTokenizer.d.ts.map +1 -1
- package/dist/dts/index.d.ts +2 -2
- package/dist/dts/index.d.ts.map +1 -1
- package/dist/dts/internal/utilities.d.ts +2 -0
- package/dist/dts/internal/utilities.d.ts.map +1 -0
- package/dist/esm/AnthropicClient.js +127 -94
- package/dist/esm/AnthropicClient.js.map +1 -1
- package/dist/esm/AnthropicConfig.js +2 -2
- package/dist/esm/AnthropicConfig.js.map +1 -1
- package/dist/esm/AnthropicLanguageModel.js +484 -0
- package/dist/esm/AnthropicLanguageModel.js.map +1 -0
- package/dist/esm/AnthropicTokenizer.js +7 -10
- package/dist/esm/AnthropicTokenizer.js.map +1 -1
- package/dist/esm/index.js +2 -2
- package/dist/esm/index.js.map +1 -1
- package/dist/esm/internal/utilities.js +15 -0
- package/dist/esm/internal/utilities.js.map +1 -0
- package/package.json +13 -13
- package/src/AnthropicClient.ts +197 -162
- package/src/AnthropicConfig.ts +7 -8
- package/src/AnthropicLanguageModel.ts +591 -0
- package/src/AnthropicTokenizer.ts +31 -19
- package/src/index.ts +2 -2
- package/src/internal/utilities.ts +18 -0
- package/AnthropicCompletions/package.json +0 -6
- package/dist/cjs/AnthropicCompletions.js +0 -358
- package/dist/cjs/AnthropicCompletions.js.map +0 -1
- package/dist/dts/AnthropicCompletions.d.ts.map +0 -1
- package/dist/esm/AnthropicCompletions.js +0 -345
- package/dist/esm/AnthropicCompletions.js.map +0 -1
- package/src/AnthropicCompletions.ts +0 -468
|
@@ -0,0 +1,591 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @since 1.0.0
|
|
3
|
+
*/
|
|
4
|
+
import { AiError } from "@effect/ai/AiError"
|
|
5
|
+
import type * as AiInput from "@effect/ai/AiInput"
|
|
6
|
+
import * as AiLanguageModel from "@effect/ai/AiLanguageModel"
|
|
7
|
+
import * as AiModel from "@effect/ai/AiModel"
|
|
8
|
+
import * as AiResponse from "@effect/ai/AiResponse"
|
|
9
|
+
import { addGenAIAnnotations } from "@effect/ai/AiTelemetry"
|
|
10
|
+
import * as Tokenizer from "@effect/ai/Tokenizer"
|
|
11
|
+
import * as Arr from "effect/Array"
|
|
12
|
+
import * as Context from "effect/Context"
|
|
13
|
+
import * as Effect from "effect/Effect"
|
|
14
|
+
import * as Encoding from "effect/Encoding"
|
|
15
|
+
import { dual } from "effect/Function"
|
|
16
|
+
import * as Option from "effect/Option"
|
|
17
|
+
import * as Predicate from "effect/Predicate"
|
|
18
|
+
import * as Stream from "effect/Stream"
|
|
19
|
+
import type { Span } from "effect/Tracer"
|
|
20
|
+
import type { Mutable, Simplify } from "effect/Types"
|
|
21
|
+
import { AnthropicClient } from "./AnthropicClient.js"
|
|
22
|
+
import * as AnthropicTokenizer from "./AnthropicTokenizer.js"
|
|
23
|
+
import type * as Generated from "./Generated.js"
|
|
24
|
+
import * as InternalUtilities from "./internal/utilities.js"
|
|
25
|
+
|
|
26
|
+
const constDisableValidation = { disableValidation: true } as const
|
|
27
|
+
|
|
28
|
+
/**
|
|
29
|
+
* @since 1.0.0
|
|
30
|
+
* @category Models
|
|
31
|
+
*/
|
|
32
|
+
export type Model = typeof Generated.Model.Encoded
|
|
33
|
+
|
|
34
|
+
// =============================================================================
|
|
35
|
+
// Configuration
|
|
36
|
+
// =============================================================================
|
|
37
|
+
|
|
38
|
+
/**
|
|
39
|
+
* @since 1.0.0
|
|
40
|
+
* @category Context
|
|
41
|
+
*/
|
|
42
|
+
export class Config extends Context.Tag("@effect/ai-anthropic/AnthropicLanguageModel/Config")<
|
|
43
|
+
Config,
|
|
44
|
+
Config.Service
|
|
45
|
+
>() {
|
|
46
|
+
/**
|
|
47
|
+
* @since 1.0.0
|
|
48
|
+
*/
|
|
49
|
+
static readonly getOrUndefined: Effect.Effect<typeof Config.Service | undefined> = Effect.map(
|
|
50
|
+
Effect.context<never>(),
|
|
51
|
+
(context) => context.unsafeMap.get(Config.key)
|
|
52
|
+
)
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
/**
|
|
56
|
+
* @since 1.0.0
|
|
57
|
+
*/
|
|
58
|
+
export declare namespace Config {
|
|
59
|
+
/**
|
|
60
|
+
* @since 1.0.0
|
|
61
|
+
* @category Configuration
|
|
62
|
+
*/
|
|
63
|
+
export interface Service extends
|
|
64
|
+
Simplify<
|
|
65
|
+
Partial<
|
|
66
|
+
Omit<
|
|
67
|
+
typeof Generated.CreateMessageParams.Encoded,
|
|
68
|
+
"messages" | "tools" | "tool_choice" | "stream"
|
|
69
|
+
>
|
|
70
|
+
>
|
|
71
|
+
>
|
|
72
|
+
{}
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
// =============================================================================
|
|
76
|
+
// Anthropic Provider Metadata
|
|
77
|
+
// =============================================================================
|
|
78
|
+
|
|
79
|
+
/**
|
|
80
|
+
* @since 1.0.0
|
|
81
|
+
* @category Context
|
|
82
|
+
*/
|
|
83
|
+
export class ProviderMetadata extends Context.Tag(InternalUtilities.ProviderMetadataKey)<
|
|
84
|
+
ProviderMetadata,
|
|
85
|
+
ProviderMetadata.Service
|
|
86
|
+
>() {}
|
|
87
|
+
|
|
88
|
+
/**
|
|
89
|
+
* @since 1.0.0
|
|
90
|
+
*/
|
|
91
|
+
export declare namespace ProviderMetadata {
|
|
92
|
+
/**
|
|
93
|
+
* @since 1.0.0
|
|
94
|
+
* @category Provider Metadata
|
|
95
|
+
*/
|
|
96
|
+
export interface Service {
|
|
97
|
+
/**
|
|
98
|
+
* Which custom stop sequence was generated, if any.
|
|
99
|
+
*
|
|
100
|
+
* Will be a non-null string if one of your custom stop sequences was
|
|
101
|
+
* generated.
|
|
102
|
+
*/
|
|
103
|
+
readonly stopSequence?: string
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
// =============================================================================
|
|
108
|
+
// Anthropic Language Model
|
|
109
|
+
// =============================================================================
|
|
110
|
+
|
|
111
|
+
const cacheKey = "@effect/ai-anthropic/AnthropicLanguageModel"
|
|
112
|
+
|
|
113
|
+
/**
|
|
114
|
+
* @since 1.0.0
|
|
115
|
+
* @category Models
|
|
116
|
+
*/
|
|
117
|
+
export const model = (
|
|
118
|
+
model: (string & {}) | Model,
|
|
119
|
+
config?: Omit<Config.Service, "model">
|
|
120
|
+
): AiModel.AiModel<AiLanguageModel.AiLanguageModel | Tokenizer.Tokenizer, AnthropicClient> =>
|
|
121
|
+
AiModel.make({
|
|
122
|
+
cacheKey,
|
|
123
|
+
cachedContext: Effect.map(make, (model) => Context.make(AiLanguageModel.AiLanguageModel, model)),
|
|
124
|
+
updateRequestContext: Effect.fnUntraced(function*(context: Context.Context<AiLanguageModel.AiLanguageModel>) {
|
|
125
|
+
const perRequestConfig = yield* Config.getOrUndefined
|
|
126
|
+
return Context.mergeAll(
|
|
127
|
+
context,
|
|
128
|
+
Context.make(Config, { model, ...config, ...perRequestConfig }),
|
|
129
|
+
Context.make(Tokenizer.Tokenizer, AnthropicTokenizer.make)
|
|
130
|
+
)
|
|
131
|
+
})
|
|
132
|
+
})
|
|
133
|
+
|
|
134
|
+
const make = Effect.gen(function*() {
|
|
135
|
+
const client = yield* AnthropicClient
|
|
136
|
+
|
|
137
|
+
const makeRequest = Effect.fnUntraced(
|
|
138
|
+
function*(method: string, { prompt, system, tools, ...options }: AiLanguageModel.AiLanguageModelOptions) {
|
|
139
|
+
const config = yield* Config
|
|
140
|
+
const model = config.model
|
|
141
|
+
if (Predicate.isUndefined(model)) {
|
|
142
|
+
return yield* Effect.die(
|
|
143
|
+
new AiError({
|
|
144
|
+
module: "OpenAiLanguageModel",
|
|
145
|
+
method,
|
|
146
|
+
description: "No `model` specified for request"
|
|
147
|
+
})
|
|
148
|
+
)
|
|
149
|
+
}
|
|
150
|
+
const useStructured = tools.length === 1 && tools[0].structured
|
|
151
|
+
let toolChoice: typeof Generated.ToolChoice.Encoded | undefined = undefined
|
|
152
|
+
if (useStructured) {
|
|
153
|
+
toolChoice = { type: "tool", name: tools[0].name }
|
|
154
|
+
} else if (Predicate.isNotUndefined(toolChoice) && tools.length > 0) {
|
|
155
|
+
if (options.toolChoice === "required") {
|
|
156
|
+
toolChoice = { type: "any" }
|
|
157
|
+
} else if (typeof options.toolChoice === "string") {
|
|
158
|
+
toolChoice = { type: options.toolChoice }
|
|
159
|
+
} else {
|
|
160
|
+
toolChoice = { type: "tool", name: options.toolChoice.tool }
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
const messages = yield* makeMessages(method, prompt)
|
|
164
|
+
return {
|
|
165
|
+
model,
|
|
166
|
+
// TODO: re-evaluate a better way to do this
|
|
167
|
+
max_tokens: 4096,
|
|
168
|
+
...config,
|
|
169
|
+
system: Option.getOrUndefined(system),
|
|
170
|
+
messages,
|
|
171
|
+
tools: tools.length === 0 ? undefined : tools.map((tool) => ({
|
|
172
|
+
name: tool.name,
|
|
173
|
+
description: tool.description,
|
|
174
|
+
input_schema: tool.parameters as any
|
|
175
|
+
})),
|
|
176
|
+
tool_choice: toolChoice
|
|
177
|
+
} satisfies typeof Generated.CreateMessageParams.Encoded
|
|
178
|
+
}
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
return yield* AiLanguageModel.make({
|
|
182
|
+
generateText: Effect.fnUntraced(
|
|
183
|
+
function*(options) {
|
|
184
|
+
const request = yield* makeRequest("generateText", options)
|
|
185
|
+
annotateRequest(options.span, request)
|
|
186
|
+
const rawResponse = yield* client.client.messagesPost({ params: {}, payload: request })
|
|
187
|
+
annotateChatResponse(options.span, rawResponse)
|
|
188
|
+
const response = yield* makeResponse(rawResponse)
|
|
189
|
+
return response
|
|
190
|
+
},
|
|
191
|
+
Effect.catchAll((cause) =>
|
|
192
|
+
AiError.is(cause) ? cause : new AiError({
|
|
193
|
+
module: "AnthropicLanguageModel",
|
|
194
|
+
method: "generateText",
|
|
195
|
+
description: "An error occurred",
|
|
196
|
+
cause
|
|
197
|
+
})
|
|
198
|
+
)
|
|
199
|
+
),
|
|
200
|
+
streamText(options) {
|
|
201
|
+
return makeRequest("streamText", options).pipe(
|
|
202
|
+
Effect.tap((request) => annotateRequest(options.span, request)),
|
|
203
|
+
Effect.map(client.stream),
|
|
204
|
+
Stream.unwrap,
|
|
205
|
+
Stream.map((response) => {
|
|
206
|
+
annotateStreamResponse(options.span, response)
|
|
207
|
+
return response
|
|
208
|
+
}),
|
|
209
|
+
Stream.catchAll((cause) =>
|
|
210
|
+
AiError.is(cause) ? Effect.fail(cause) : Effect.fail(
|
|
211
|
+
new AiError({
|
|
212
|
+
module: "AnthropicLanguageModel",
|
|
213
|
+
method: "streamText",
|
|
214
|
+
description: "An error occurred",
|
|
215
|
+
cause
|
|
216
|
+
})
|
|
217
|
+
)
|
|
218
|
+
)
|
|
219
|
+
)
|
|
220
|
+
}
|
|
221
|
+
})
|
|
222
|
+
})
|
|
223
|
+
|
|
224
|
+
/**
|
|
225
|
+
* @since 1.0.0
|
|
226
|
+
* @category Configuration
|
|
227
|
+
*/
|
|
228
|
+
export const withConfigOverride: {
|
|
229
|
+
/**
|
|
230
|
+
* @since 1.0.0
|
|
231
|
+
* @category Configuration
|
|
232
|
+
*/
|
|
233
|
+
(config: Config.Service): <A, E, R>(self: Effect.Effect<A, E, R>) => Effect.Effect<A, E, R>
|
|
234
|
+
/**
|
|
235
|
+
* @since 1.0.0
|
|
236
|
+
* @category Configuration
|
|
237
|
+
*/
|
|
238
|
+
<A, E, R>(self: Effect.Effect<A, E, R>, config: Config.Service): Effect.Effect<A, E, R>
|
|
239
|
+
} = dual<
|
|
240
|
+
/**
|
|
241
|
+
* @since 1.0.0
|
|
242
|
+
* @category Configuration
|
|
243
|
+
*/
|
|
244
|
+
(config: Config.Service) => <A, E, R>(self: Effect.Effect<A, E, R>) => Effect.Effect<A, E, R>,
|
|
245
|
+
/**
|
|
246
|
+
* @since 1.0.0
|
|
247
|
+
* @category Configuration
|
|
248
|
+
*/
|
|
249
|
+
<A, E, R>(self: Effect.Effect<A, E, R>, config: Config.Service) => Effect.Effect<A, E, R>
|
|
250
|
+
>(2, (self, overrides) =>
|
|
251
|
+
Effect.flatMap(
|
|
252
|
+
Config.getOrUndefined,
|
|
253
|
+
(config) => Effect.provideService(self, Config, { ...config, ...overrides })
|
|
254
|
+
))
|
|
255
|
+
|
|
256
|
+
// =============================================================================
|
|
257
|
+
// Utilities
|
|
258
|
+
// =============================================================================
|
|
259
|
+
|
|
260
|
+
type MessageGroup = AssistantMessageGroup | UserMessageGroup
|
|
261
|
+
|
|
262
|
+
interface AssistantMessageGroup {
|
|
263
|
+
readonly type: "assistant"
|
|
264
|
+
readonly messages: Array<AiInput.AssistantMessage>
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
interface UserMessageGroup {
|
|
268
|
+
readonly type: "user"
|
|
269
|
+
readonly messages: Array<AiInput.ToolMessage | AiInput.UserMessage>
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
const groupMessages = (prompt: AiInput.AiInput): Array<MessageGroup> => {
|
|
273
|
+
const messages: Array<MessageGroup> = []
|
|
274
|
+
let current: MessageGroup | undefined = undefined
|
|
275
|
+
for (const message of prompt.messages) {
|
|
276
|
+
switch (message._tag) {
|
|
277
|
+
case "AssistantMessage": {
|
|
278
|
+
if (current?.type !== "assistant") {
|
|
279
|
+
current = { type: "assistant", messages: [] }
|
|
280
|
+
messages.push(current)
|
|
281
|
+
}
|
|
282
|
+
current.messages.push(message)
|
|
283
|
+
break
|
|
284
|
+
}
|
|
285
|
+
case "ToolMessage": {
|
|
286
|
+
if (current?.type !== "user") {
|
|
287
|
+
current = { type: "user", messages: [] }
|
|
288
|
+
messages.push(current)
|
|
289
|
+
}
|
|
290
|
+
current.messages.push(message)
|
|
291
|
+
break
|
|
292
|
+
}
|
|
293
|
+
case "UserMessage": {
|
|
294
|
+
if (current?.type !== "user") {
|
|
295
|
+
current = { type: "user", messages: [] }
|
|
296
|
+
messages.push(current)
|
|
297
|
+
}
|
|
298
|
+
current.messages.push(message)
|
|
299
|
+
break
|
|
300
|
+
}
|
|
301
|
+
}
|
|
302
|
+
}
|
|
303
|
+
return messages
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
const makeMessages = Effect.fnUntraced(
|
|
307
|
+
function*(method: string, prompt: AiInput.AiInput) {
|
|
308
|
+
const messages: Array<typeof Generated.InputMessage.Encoded> = []
|
|
309
|
+
const groups = groupMessages(prompt)
|
|
310
|
+
for (let i = 0; i < groups.length; i++) {
|
|
311
|
+
const group = groups[i]
|
|
312
|
+
const isLastGroup = i === groups.length - 1
|
|
313
|
+
switch (group.type) {
|
|
314
|
+
case "assistant": {
|
|
315
|
+
const content: Array<typeof Generated.InputContentBlock.Encoded> = []
|
|
316
|
+
for (let j = 0; j < group.messages.length; j++) {
|
|
317
|
+
const message = group.messages[j]
|
|
318
|
+
const isLastMessage = j === group.messages.length - 1
|
|
319
|
+
for (let k = 0; k < message.parts.length; k++) {
|
|
320
|
+
const part = message.parts[k]
|
|
321
|
+
const isLastPart = k === message.parts.length - 1
|
|
322
|
+
switch (part._tag) {
|
|
323
|
+
case "ReasoningPart": {
|
|
324
|
+
content.push({
|
|
325
|
+
type: "thinking",
|
|
326
|
+
thinking: part.reasoningText,
|
|
327
|
+
signature: part.signature!
|
|
328
|
+
})
|
|
329
|
+
break
|
|
330
|
+
}
|
|
331
|
+
case "RedactedReasoningPart": {
|
|
332
|
+
content.push({
|
|
333
|
+
type: "redacted_thinking",
|
|
334
|
+
data: part.redactedText
|
|
335
|
+
})
|
|
336
|
+
break
|
|
337
|
+
}
|
|
338
|
+
case "TextPart": {
|
|
339
|
+
content.push({
|
|
340
|
+
type: "text",
|
|
341
|
+
text:
|
|
342
|
+
// Anthropic does not allow trailing whitespace in assistant
|
|
343
|
+
// content blocks
|
|
344
|
+
isLastGroup && isLastMessage && isLastPart
|
|
345
|
+
? part.text.trim()
|
|
346
|
+
: part.text
|
|
347
|
+
})
|
|
348
|
+
break
|
|
349
|
+
}
|
|
350
|
+
case "ToolCallPart": {
|
|
351
|
+
content.push({
|
|
352
|
+
type: "tool_use",
|
|
353
|
+
id: part.id,
|
|
354
|
+
name: part.name,
|
|
355
|
+
input: part.params as any
|
|
356
|
+
})
|
|
357
|
+
break
|
|
358
|
+
}
|
|
359
|
+
}
|
|
360
|
+
}
|
|
361
|
+
}
|
|
362
|
+
messages.push({ role: "assistant", content })
|
|
363
|
+
break
|
|
364
|
+
}
|
|
365
|
+
case "user": {
|
|
366
|
+
const content: Array<typeof Generated.InputContentBlock.Encoded> = []
|
|
367
|
+
for (let j = 0; j < group.messages.length; j++) {
|
|
368
|
+
const message = group.messages[j]
|
|
369
|
+
switch (message._tag) {
|
|
370
|
+
case "ToolMessage": {
|
|
371
|
+
for (let k = 0; k < message.parts.length; k++) {
|
|
372
|
+
const part = message.parts[k]
|
|
373
|
+
// TODO: support advanced tool result content parts
|
|
374
|
+
content.push({
|
|
375
|
+
type: "tool_result",
|
|
376
|
+
tool_use_id: part.id,
|
|
377
|
+
content: JSON.stringify(part.result)
|
|
378
|
+
})
|
|
379
|
+
}
|
|
380
|
+
break
|
|
381
|
+
}
|
|
382
|
+
case "UserMessage": {
|
|
383
|
+
for (let k = 0; k < message.parts.length; k++) {
|
|
384
|
+
const part = message.parts[k]
|
|
385
|
+
switch (part._tag) {
|
|
386
|
+
case "FilePart": {
|
|
387
|
+
if (Predicate.isUndefined(part.mediaType) || part.mediaType !== "application/pdf") {
|
|
388
|
+
return yield* new AiError({
|
|
389
|
+
module: "AnthropicLanguageModel",
|
|
390
|
+
method,
|
|
391
|
+
description: "AnthropicLanguageModel only supports PDF file inputs"
|
|
392
|
+
})
|
|
393
|
+
}
|
|
394
|
+
content.push({
|
|
395
|
+
type: "document",
|
|
396
|
+
source: {
|
|
397
|
+
type: "base64",
|
|
398
|
+
media_type: "application/pdf",
|
|
399
|
+
data: Encoding.encodeBase64(part.data)
|
|
400
|
+
}
|
|
401
|
+
})
|
|
402
|
+
break
|
|
403
|
+
}
|
|
404
|
+
case "FileUrlPart": {
|
|
405
|
+
content.push({
|
|
406
|
+
type: "document",
|
|
407
|
+
source: {
|
|
408
|
+
type: "url",
|
|
409
|
+
url: part.url.toString()
|
|
410
|
+
}
|
|
411
|
+
})
|
|
412
|
+
break
|
|
413
|
+
}
|
|
414
|
+
case "TextPart": {
|
|
415
|
+
content.push({
|
|
416
|
+
type: "text",
|
|
417
|
+
text: part.text
|
|
418
|
+
})
|
|
419
|
+
break
|
|
420
|
+
}
|
|
421
|
+
case "ImagePart": {
|
|
422
|
+
content.push({
|
|
423
|
+
type: "image",
|
|
424
|
+
source: {
|
|
425
|
+
type: "base64",
|
|
426
|
+
media_type: part.mediaType ?? "image/jpeg" as any,
|
|
427
|
+
data: Encoding.encodeBase64(part.data)
|
|
428
|
+
}
|
|
429
|
+
})
|
|
430
|
+
break
|
|
431
|
+
}
|
|
432
|
+
case "ImageUrlPart": {
|
|
433
|
+
content.push({
|
|
434
|
+
type: "image",
|
|
435
|
+
source: {
|
|
436
|
+
type: "url",
|
|
437
|
+
url: part.url.toString()
|
|
438
|
+
}
|
|
439
|
+
})
|
|
440
|
+
break
|
|
441
|
+
}
|
|
442
|
+
}
|
|
443
|
+
}
|
|
444
|
+
break
|
|
445
|
+
}
|
|
446
|
+
}
|
|
447
|
+
}
|
|
448
|
+
messages.push({ role: "user", content })
|
|
449
|
+
break
|
|
450
|
+
}
|
|
451
|
+
}
|
|
452
|
+
}
|
|
453
|
+
if (Arr.isNonEmptyReadonlyArray(messages)) {
|
|
454
|
+
return messages
|
|
455
|
+
}
|
|
456
|
+
return yield* new AiError({
|
|
457
|
+
module: "AnthropicLanguageModel",
|
|
458
|
+
method,
|
|
459
|
+
description: "Prompt contained no messages"
|
|
460
|
+
})
|
|
461
|
+
}
|
|
462
|
+
)
|
|
463
|
+
|
|
464
|
+
const makeResponse = Effect.fnUntraced(
|
|
465
|
+
function*(response: Generated.Message) {
|
|
466
|
+
const parts: Array<AiResponse.Part> = []
|
|
467
|
+
parts.push(
|
|
468
|
+
new AiResponse.MetadataPart({
|
|
469
|
+
id: response.id,
|
|
470
|
+
model: response.model
|
|
471
|
+
}, constDisableValidation)
|
|
472
|
+
)
|
|
473
|
+
for (const part of response.content) {
|
|
474
|
+
switch (part.type) {
|
|
475
|
+
case "text": {
|
|
476
|
+
parts.push(
|
|
477
|
+
new AiResponse.TextPart({
|
|
478
|
+
text: part.text
|
|
479
|
+
}, constDisableValidation)
|
|
480
|
+
)
|
|
481
|
+
break
|
|
482
|
+
}
|
|
483
|
+
case "tool_use": {
|
|
484
|
+
parts.push(
|
|
485
|
+
AiResponse.ToolCallPart.fromUnknown({
|
|
486
|
+
id: part.id,
|
|
487
|
+
name: part.name,
|
|
488
|
+
params: part.input
|
|
489
|
+
})
|
|
490
|
+
)
|
|
491
|
+
break
|
|
492
|
+
}
|
|
493
|
+
case "thinking": {
|
|
494
|
+
parts.push(
|
|
495
|
+
new AiResponse.ReasoningPart({
|
|
496
|
+
reasoningText: part.thinking,
|
|
497
|
+
signature: part.signature
|
|
498
|
+
}, constDisableValidation)
|
|
499
|
+
)
|
|
500
|
+
break
|
|
501
|
+
}
|
|
502
|
+
case "redacted_thinking": {
|
|
503
|
+
parts.push(
|
|
504
|
+
new AiResponse.RedactedReasoningPart({
|
|
505
|
+
redactedText: part.data
|
|
506
|
+
}, constDisableValidation)
|
|
507
|
+
)
|
|
508
|
+
break
|
|
509
|
+
}
|
|
510
|
+
}
|
|
511
|
+
}
|
|
512
|
+
const metadata: Mutable<ProviderMetadata.Service> = {}
|
|
513
|
+
if (response.stop_sequence !== null) {
|
|
514
|
+
metadata.stopSequence = response.stop_sequence
|
|
515
|
+
}
|
|
516
|
+
parts.push(
|
|
517
|
+
new AiResponse.FinishPart({
|
|
518
|
+
// Anthropic always returns a non-null `stop_reason` for non-streaming responses
|
|
519
|
+
reason: InternalUtilities.resolveFinishReason(response.stop_reason!),
|
|
520
|
+
usage: new AiResponse.Usage({
|
|
521
|
+
inputTokens: response.usage.input_tokens,
|
|
522
|
+
outputTokens: response.usage.output_tokens,
|
|
523
|
+
totalTokens: response.usage.input_tokens + response.usage.output_tokens,
|
|
524
|
+
reasoningTokens: 0,
|
|
525
|
+
cacheReadInputTokens: response.usage.cache_read_input_tokens ?? 0,
|
|
526
|
+
cacheWriteInputTokens: response.usage.cache_creation_input_tokens ?? 0
|
|
527
|
+
}),
|
|
528
|
+
providerMetadata: { [InternalUtilities.ProviderMetadataKey]: metadata }
|
|
529
|
+
}, constDisableValidation)
|
|
530
|
+
)
|
|
531
|
+
return new AiResponse.AiResponse({
|
|
532
|
+
parts
|
|
533
|
+
}, constDisableValidation)
|
|
534
|
+
}
|
|
535
|
+
)
|
|
536
|
+
|
|
537
|
+
const annotateRequest = (
|
|
538
|
+
span: Span,
|
|
539
|
+
request: typeof Generated.CreateMessageParams.Encoded
|
|
540
|
+
): void => {
|
|
541
|
+
addGenAIAnnotations(span, {
|
|
542
|
+
system: "anthropic",
|
|
543
|
+
operation: { name: "chat" },
|
|
544
|
+
request: {
|
|
545
|
+
model: request.model,
|
|
546
|
+
temperature: request.temperature,
|
|
547
|
+
topK: request.top_k,
|
|
548
|
+
topP: request.top_p,
|
|
549
|
+
maxTokens: request.max_tokens,
|
|
550
|
+
stopSequences: Arr.ensure(request.stop_sequences).filter(
|
|
551
|
+
Predicate.isNotNullable
|
|
552
|
+
)
|
|
553
|
+
}
|
|
554
|
+
})
|
|
555
|
+
}
|
|
556
|
+
|
|
557
|
+
const annotateChatResponse = (
|
|
558
|
+
span: Span,
|
|
559
|
+
response: typeof Generated.Message.Encoded
|
|
560
|
+
): void => {
|
|
561
|
+
addGenAIAnnotations(span, {
|
|
562
|
+
response: {
|
|
563
|
+
id: response.id,
|
|
564
|
+
model: response.model,
|
|
565
|
+
finishReasons: response.stop_reason ? [response.stop_reason] : undefined
|
|
566
|
+
},
|
|
567
|
+
usage: {
|
|
568
|
+
inputTokens: response.usage.input_tokens,
|
|
569
|
+
outputTokens: response.usage.output_tokens
|
|
570
|
+
}
|
|
571
|
+
})
|
|
572
|
+
}
|
|
573
|
+
|
|
574
|
+
const annotateStreamResponse = (
|
|
575
|
+
span: Span,
|
|
576
|
+
response: AiResponse.AiResponse
|
|
577
|
+
) => {
|
|
578
|
+
const metadataPart = response.parts.find((part) => part._tag === "MetadataPart")
|
|
579
|
+
const finishPart = response.parts.find((part) => part._tag === "FinishPart")
|
|
580
|
+
addGenAIAnnotations(span, {
|
|
581
|
+
response: {
|
|
582
|
+
id: metadataPart?.id,
|
|
583
|
+
model: metadataPart?.model,
|
|
584
|
+
finishReasons: finishPart?.reason ? [finishPart.reason] : undefined
|
|
585
|
+
},
|
|
586
|
+
usage: {
|
|
587
|
+
inputTokens: finishPart?.usage.inputTokens,
|
|
588
|
+
outputTokens: finishPart?.usage.outputTokens
|
|
589
|
+
}
|
|
590
|
+
})
|
|
591
|
+
}
|
|
@@ -3,44 +3,56 @@
|
|
|
3
3
|
*/
|
|
4
4
|
import { getTokenizer } from "@anthropic-ai/tokenizer"
|
|
5
5
|
import { AiError } from "@effect/ai/AiError"
|
|
6
|
+
import type * as AiInput from "@effect/ai/AiInput"
|
|
6
7
|
import * as Tokenizer from "@effect/ai/Tokenizer"
|
|
7
8
|
import * as Arr from "effect/Array"
|
|
8
|
-
import * as Chunk from "effect/Chunk"
|
|
9
9
|
import * as Effect from "effect/Effect"
|
|
10
10
|
import * as Layer from "effect/Layer"
|
|
11
11
|
import * as Option from "effect/Option"
|
|
12
12
|
|
|
13
13
|
/**
|
|
14
14
|
* @since 1.0.0
|
|
15
|
-
* @category
|
|
15
|
+
* @category Constructors
|
|
16
16
|
*/
|
|
17
17
|
export const make = Tokenizer.make({
|
|
18
|
-
tokenize(
|
|
18
|
+
tokenize(input) {
|
|
19
19
|
return Effect.try({
|
|
20
20
|
try: () => {
|
|
21
21
|
const tokenizer = getTokenizer()
|
|
22
|
-
const text = Arr.flatMap(
|
|
23
|
-
Arr.filterMap(
|
|
24
|
-
|
|
25
|
-
|
|
22
|
+
const text = Arr.flatMap(input.messages, (message) =>
|
|
23
|
+
Arr.filterMap(
|
|
24
|
+
message.parts as Array<
|
|
25
|
+
| AiInput.AssistantMessagePart
|
|
26
|
+
| AiInput.ToolMessagePart
|
|
27
|
+
| AiInput.UserMessagePart
|
|
28
|
+
>,
|
|
29
|
+
(part) => {
|
|
30
|
+
if (
|
|
31
|
+
part._tag === "FilePart" ||
|
|
32
|
+
part._tag === "FileUrlPart" ||
|
|
33
|
+
part._tag === "ImagePart" ||
|
|
34
|
+
part._tag === "ImageUrlPart" ||
|
|
35
|
+
part._tag === "ReasoningPart" ||
|
|
36
|
+
part._tag === "RedactedReasoningPart"
|
|
37
|
+
) return Option.none()
|
|
38
|
+
return Option.some(
|
|
39
|
+
part._tag === "TextPart"
|
|
40
|
+
? part.text
|
|
41
|
+
: JSON.stringify(
|
|
42
|
+
part._tag === "ToolCallPart"
|
|
43
|
+
? part.params :
|
|
44
|
+
part.result
|
|
45
|
+
)
|
|
46
|
+
)
|
|
26
47
|
}
|
|
27
|
-
|
|
28
|
-
part._tag === "Text"
|
|
29
|
-
? part.content
|
|
30
|
-
: JSON.stringify(
|
|
31
|
-
part._tag === "ToolCall"
|
|
32
|
-
? part.params :
|
|
33
|
-
part.value
|
|
34
|
-
)
|
|
35
|
-
)
|
|
36
|
-
})).join("")
|
|
48
|
+
)).join("")
|
|
37
49
|
const encoded = tokenizer.encode(text.normalize("NFKC"), "all")
|
|
38
50
|
tokenizer.free()
|
|
39
51
|
return Array.from(encoded)
|
|
40
52
|
},
|
|
41
53
|
catch: (cause) =>
|
|
42
54
|
new AiError({
|
|
43
|
-
module: "
|
|
55
|
+
module: "AnthropicTokenizer",
|
|
44
56
|
method: "tokenize",
|
|
45
57
|
description: "Could not tokenize",
|
|
46
58
|
cause
|
|
@@ -51,6 +63,6 @@ export const make = Tokenizer.make({
|
|
|
51
63
|
|
|
52
64
|
/**
|
|
53
65
|
* @since 1.0.0
|
|
54
|
-
* @category
|
|
66
|
+
* @category Layers
|
|
55
67
|
*/
|
|
56
68
|
export const layer: Layer.Layer<Tokenizer.Tokenizer> = Layer.succeed(Tokenizer.Tokenizer, make)
|
package/src/index.ts
CHANGED
|
@@ -6,12 +6,12 @@ export * as AnthropicClient from "./AnthropicClient.js"
|
|
|
6
6
|
/**
|
|
7
7
|
* @since 1.0.0
|
|
8
8
|
*/
|
|
9
|
-
export * as
|
|
9
|
+
export * as AnthropicConfig from "./AnthropicConfig.js"
|
|
10
10
|
|
|
11
11
|
/**
|
|
12
12
|
* @since 1.0.0
|
|
13
13
|
*/
|
|
14
|
-
export * as
|
|
14
|
+
export * as AnthropicLanguageModel from "./AnthropicLanguageModel.js"
|
|
15
15
|
|
|
16
16
|
/**
|
|
17
17
|
* @since 1.0.0
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
import type * as AiResponse from "@effect/ai/AiResponse"
|
|
2
|
+
import * as Predicate from "effect/Predicate"
|
|
3
|
+
|
|
4
|
+
/** @internal */
|
|
5
|
+
export const ProviderMetadataKey = "@effect/ai-anthropic/AnthropicLanguageModel/ProviderMetadata"
|
|
6
|
+
|
|
7
|
+
const finishReasonMap: Record<string, AiResponse.FinishReason> = {
|
|
8
|
+
end_turn: "stop",
|
|
9
|
+
max_tokens: "length",
|
|
10
|
+
stop_sequence: "stop",
|
|
11
|
+
tool_use: "tool-calls"
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
/** @internal */
|
|
15
|
+
export const resolveFinishReason = (finishReason: string): AiResponse.FinishReason => {
|
|
16
|
+
const reason = finishReasonMap[finishReason]
|
|
17
|
+
return Predicate.isUndefined(reason) ? "unknown" : reason
|
|
18
|
+
}
|