@effect/ai-openai 4.0.0-beta.6 → 4.0.0-beta.60
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/Generated.d.ts +66011 -38686
- package/dist/Generated.d.ts.map +1 -1
- package/dist/Generated.js +1 -1
- package/dist/Generated.js.map +1 -1
- package/dist/OpenAiClient.d.ts +63 -17
- package/dist/OpenAiClient.d.ts.map +1 -1
- package/dist/OpenAiClient.js +210 -33
- package/dist/OpenAiClient.js.map +1 -1
- package/dist/OpenAiClientGenerated.d.ts +91 -0
- package/dist/OpenAiClientGenerated.d.ts.map +1 -0
- package/dist/OpenAiClientGenerated.js +84 -0
- package/dist/OpenAiClientGenerated.js.map +1 -0
- package/dist/OpenAiConfig.d.ts +2 -2
- package/dist/OpenAiConfig.d.ts.map +1 -1
- package/dist/OpenAiConfig.js +3 -3
- package/dist/OpenAiConfig.js.map +1 -1
- package/dist/OpenAiEmbeddingModel.d.ts +85 -0
- package/dist/OpenAiEmbeddingModel.d.ts.map +1 -0
- package/dist/OpenAiEmbeddingModel.js +119 -0
- package/dist/OpenAiEmbeddingModel.js.map +1 -0
- package/dist/OpenAiError.d.ts +22 -32
- package/dist/OpenAiError.d.ts.map +1 -1
- package/dist/OpenAiLanguageModel.d.ts +43 -49
- package/dist/OpenAiLanguageModel.d.ts.map +1 -1
- package/dist/OpenAiLanguageModel.js +296 -152
- package/dist/OpenAiLanguageModel.js.map +1 -1
- package/dist/OpenAiSchema.d.ts +1920 -0
- package/dist/OpenAiSchema.d.ts.map +1 -0
- package/dist/OpenAiSchema.js +536 -0
- package/dist/OpenAiSchema.js.map +1 -0
- package/dist/OpenAiTool.d.ts +8 -7
- package/dist/OpenAiTool.d.ts.map +1 -1
- package/dist/OpenAiTool.js +2 -1
- package/dist/OpenAiTool.js.map +1 -1
- package/dist/index.d.ts +18 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +18 -0
- package/dist/index.js.map +1 -1
- package/dist/internal/errors.js +4 -4
- package/dist/internal/errors.js.map +1 -1
- package/package.json +3 -3
- package/src/Generated.ts +7416 -4257
- package/src/OpenAiClient.ts +377 -81
- package/src/OpenAiClientGenerated.ts +202 -0
- package/src/OpenAiConfig.ts +3 -3
- package/src/OpenAiEmbeddingModel.ts +203 -0
- package/src/OpenAiError.ts +24 -32
- package/src/OpenAiLanguageModel.ts +420 -144
- package/src/OpenAiSchema.ts +875 -0
- package/src/OpenAiTool.ts +2 -1
- package/src/index.ts +21 -0
- package/src/internal/errors.ts +6 -4
|
@@ -6,23 +6,25 @@
|
|
|
6
6
|
*
|
|
7
7
|
* @since 1.0.0
|
|
8
8
|
*/
|
|
9
|
+
import * as Context from "effect/Context"
|
|
9
10
|
import * as DateTime from "effect/DateTime"
|
|
10
11
|
import * as Effect from "effect/Effect"
|
|
11
12
|
import * as Encoding from "effect/Encoding"
|
|
12
13
|
import { dual } from "effect/Function"
|
|
13
14
|
import * as Layer from "effect/Layer"
|
|
15
|
+
import * as Option from "effect/Option"
|
|
14
16
|
import * as Predicate from "effect/Predicate"
|
|
15
17
|
import * as Redactable from "effect/Redactable"
|
|
16
18
|
import * as Schema from "effect/Schema"
|
|
17
19
|
import * as AST from "effect/SchemaAST"
|
|
18
|
-
import * as ServiceMap from "effect/ServiceMap"
|
|
19
20
|
import * as Stream from "effect/Stream"
|
|
20
21
|
import type { Span } from "effect/Tracer"
|
|
21
|
-
import type { DeepMutable, Simplify } from "effect/Types"
|
|
22
|
+
import type { DeepMutable, Mutable, Simplify } from "effect/Types"
|
|
22
23
|
import * as AiError from "effect/unstable/ai/AiError"
|
|
23
24
|
import * as IdGenerator from "effect/unstable/ai/IdGenerator"
|
|
24
25
|
import * as LanguageModel from "effect/unstable/ai/LanguageModel"
|
|
25
26
|
import * as AiModel from "effect/unstable/ai/Model"
|
|
27
|
+
import { toCodecOpenAI } from "effect/unstable/ai/OpenAiStructuredOutput"
|
|
26
28
|
import type * as Prompt from "effect/unstable/ai/Prompt"
|
|
27
29
|
import type * as Response from "effect/unstable/ai/Response"
|
|
28
30
|
import * as Tool from "effect/unstable/ai/Tool"
|
|
@@ -31,6 +33,7 @@ import type * as HttpClientResponse from "effect/unstable/http/HttpClientRespons
|
|
|
31
33
|
import * as Generated from "./Generated.ts"
|
|
32
34
|
import * as InternalUtilities from "./internal/utilities.ts"
|
|
33
35
|
import { OpenAiClient } from "./OpenAiClient.ts"
|
|
36
|
+
import type * as OpenAiSchema from "./OpenAiSchema.ts"
|
|
34
37
|
import { addGenAIAnnotations } from "./OpenAiTelemetry.ts"
|
|
35
38
|
import type * as OpenAiTool from "./OpenAiTool.ts"
|
|
36
39
|
|
|
@@ -58,12 +61,12 @@ type ImageDetail = "auto" | "low" | "high"
|
|
|
58
61
|
* @since 1.0.0
|
|
59
62
|
* @category services
|
|
60
63
|
*/
|
|
61
|
-
export class Config extends
|
|
64
|
+
export class Config extends Context.Service<
|
|
62
65
|
Config,
|
|
63
66
|
Simplify<
|
|
64
67
|
& Partial<
|
|
65
68
|
Omit<
|
|
66
|
-
typeof
|
|
69
|
+
typeof OpenAiSchema.CreateResponse.Encoded,
|
|
67
70
|
"input" | "tools" | "tool_choice" | "stream" | "text"
|
|
68
71
|
>
|
|
69
72
|
>
|
|
@@ -138,7 +141,7 @@ declare module "effect/unstable/ai/Prompt" {
|
|
|
138
141
|
/**
|
|
139
142
|
* The status of item.
|
|
140
143
|
*/
|
|
141
|
-
readonly status?: typeof
|
|
144
|
+
readonly status?: typeof OpenAiSchema.MessageStatus.Encoded | null
|
|
142
145
|
/**
|
|
143
146
|
* The ID of the approval request.
|
|
144
147
|
*/
|
|
@@ -155,7 +158,7 @@ declare module "effect/unstable/ai/Prompt" {
|
|
|
155
158
|
/**
|
|
156
159
|
* The status of item.
|
|
157
160
|
*/
|
|
158
|
-
readonly status?: typeof
|
|
161
|
+
readonly status?: typeof OpenAiSchema.MessageStatus.Encoded | null
|
|
159
162
|
/**
|
|
160
163
|
* The ID of the approval request.
|
|
161
164
|
*/
|
|
@@ -172,11 +175,11 @@ declare module "effect/unstable/ai/Prompt" {
|
|
|
172
175
|
/**
|
|
173
176
|
* The status of item.
|
|
174
177
|
*/
|
|
175
|
-
readonly status?: typeof
|
|
178
|
+
readonly status?: typeof OpenAiSchema.MessageStatus.Encoded | null
|
|
176
179
|
/**
|
|
177
180
|
* A list of annotations that apply to the output text.
|
|
178
181
|
*/
|
|
179
|
-
readonly annotations?: ReadonlyArray<typeof
|
|
182
|
+
readonly annotations?: ReadonlyArray<typeof OpenAiSchema.Annotation.Encoded> | null
|
|
180
183
|
} | null
|
|
181
184
|
}
|
|
182
185
|
}
|
|
@@ -194,11 +197,11 @@ declare module "effect/unstable/ai/Response" {
|
|
|
194
197
|
/**
|
|
195
198
|
* The status of item.
|
|
196
199
|
*/
|
|
197
|
-
readonly status?: typeof
|
|
200
|
+
readonly status?: typeof OpenAiSchema.MessageStatus.Encoded | null
|
|
198
201
|
/**
|
|
199
202
|
* The text content part annotations.
|
|
200
203
|
*/
|
|
201
|
-
readonly annotations?: ReadonlyArray<typeof
|
|
204
|
+
readonly annotations?: ReadonlyArray<typeof OpenAiSchema.Annotation.Encoded> | null
|
|
202
205
|
}
|
|
203
206
|
}
|
|
204
207
|
|
|
@@ -211,7 +214,7 @@ declare module "effect/unstable/ai/Response" {
|
|
|
211
214
|
export interface TextEndPartMetadata extends ProviderMetadata {
|
|
212
215
|
readonly openai?: {
|
|
213
216
|
readonly itemId?: string | null
|
|
214
|
-
readonly annotations?: ReadonlyArray<typeof
|
|
217
|
+
readonly annotations?: ReadonlyArray<typeof OpenAiSchema.Annotation.Encoded> | null
|
|
215
218
|
} | null
|
|
216
219
|
}
|
|
217
220
|
|
|
@@ -319,7 +322,7 @@ export const model = (
|
|
|
319
322
|
model: (string & {}) | Model,
|
|
320
323
|
config?: Omit<typeof Config.Service, "model">
|
|
321
324
|
): AiModel.Model<"openai", LanguageModel.LanguageModel, OpenAiClient> =>
|
|
322
|
-
AiModel.make("openai", layer({ model, config }))
|
|
325
|
+
AiModel.make("openai", model, layer({ model, config }))
|
|
323
326
|
|
|
324
327
|
// TODO
|
|
325
328
|
// /**
|
|
@@ -330,7 +333,7 @@ export const model = (
|
|
|
330
333
|
// model: (string & {}) | Model,
|
|
331
334
|
// config?: Omit<typeof Config.Service, "model">
|
|
332
335
|
// ): AiModel.Model<"openai", LanguageModel.LanguageModel | Tokenizer.Tokenizer, OpenAiClient> =>
|
|
333
|
-
// AiModel.make("openai", layerWithTokenizer({ model, config }))
|
|
336
|
+
// AiModel.make("openai", model, layerWithTokenizer({ model, config }))
|
|
334
337
|
|
|
335
338
|
/**
|
|
336
339
|
* Creates an OpenAI language model service.
|
|
@@ -345,7 +348,7 @@ export const make = Effect.fnUntraced(function*({ model, config: providerConfig
|
|
|
345
348
|
const client = yield* OpenAiClient
|
|
346
349
|
|
|
347
350
|
const makeConfig = Effect.gen(function*() {
|
|
348
|
-
const services = yield* Effect.
|
|
351
|
+
const services = yield* Effect.context<never>()
|
|
349
352
|
return { model, ...providerConfig, ...services.mapUnsafe.get(Config.key) }
|
|
350
353
|
})
|
|
351
354
|
|
|
@@ -354,9 +357,9 @@ export const make = Effect.fnUntraced(function*({ model, config: providerConfig
|
|
|
354
357
|
readonly config: typeof Config.Service
|
|
355
358
|
readonly options: LanguageModel.ProviderOptions
|
|
356
359
|
readonly toolNameMapper: Tool.NameMapper<Tools>
|
|
357
|
-
}): Effect.fn.Return<typeof
|
|
358
|
-
const include = new Set<typeof
|
|
359
|
-
const capabilities = getModelCapabilities(config.model
|
|
360
|
+
}): Effect.fn.Return<typeof OpenAiSchema.CreateResponse.Encoded, AiError.AiError> {
|
|
361
|
+
const include = new Set<typeof OpenAiSchema.IncludeEnum.Encoded>()
|
|
362
|
+
const capabilities = getModelCapabilities(config.model as string)
|
|
360
363
|
const messages = yield* prepareMessages({
|
|
361
364
|
config,
|
|
362
365
|
options,
|
|
@@ -369,26 +372,28 @@ export const make = Effect.fnUntraced(function*({ model, config: providerConfig
|
|
|
369
372
|
options,
|
|
370
373
|
toolNameMapper
|
|
371
374
|
})
|
|
372
|
-
const responseFormat = prepareResponseFormat({
|
|
375
|
+
const responseFormat = yield* prepareResponseFormat({
|
|
373
376
|
config,
|
|
374
377
|
options
|
|
375
378
|
})
|
|
376
|
-
const request: typeof
|
|
379
|
+
const request: Mutable<typeof OpenAiSchema.CreateResponse.Encoded> = {
|
|
377
380
|
...config,
|
|
378
381
|
input: messages,
|
|
379
|
-
include: include.size > 0 ? Array.from(include) :
|
|
382
|
+
include: include.size > 0 ? Array.from(include) : undefined,
|
|
380
383
|
text: {
|
|
381
|
-
verbosity: config.text?.verbosity ??
|
|
384
|
+
verbosity: config.text?.verbosity ?? undefined,
|
|
382
385
|
format: responseFormat
|
|
383
|
-
}
|
|
384
|
-
...(Predicate.isNotUndefined(tools) ? { tools } : undefined),
|
|
385
|
-
...(Predicate.isNotUndefined(toolChoice) ? { tool_choice: toolChoice } : undefined)
|
|
386
|
+
}
|
|
386
387
|
}
|
|
388
|
+
if (tools) request.tools = tools
|
|
389
|
+
if (toolChoice) request.tool_choice = toolChoice
|
|
390
|
+
if (options.previousResponseId) request.previous_response_id = options.previousResponseId
|
|
387
391
|
return request
|
|
388
392
|
}
|
|
389
393
|
)
|
|
390
394
|
|
|
391
395
|
return yield* LanguageModel.make({
|
|
396
|
+
codecTransformer: toCodecOpenAI,
|
|
392
397
|
generateText: Effect.fnUntraced(
|
|
393
398
|
function*(options) {
|
|
394
399
|
const config = yield* makeConfig
|
|
@@ -512,10 +517,10 @@ const prepareMessages = Effect.fnUntraced(
|
|
|
512
517
|
}: {
|
|
513
518
|
readonly config: typeof Config.Service
|
|
514
519
|
readonly options: LanguageModel.ProviderOptions
|
|
515
|
-
readonly include: Set<typeof
|
|
520
|
+
readonly include: Set<typeof OpenAiSchema.IncludeEnum.Encoded>
|
|
516
521
|
readonly capabilities: ModelCapabilities
|
|
517
522
|
readonly toolNameMapper: Tool.NameMapper<Tools>
|
|
518
|
-
}): Effect.fn.Return<ReadonlyArray<typeof
|
|
523
|
+
}): Effect.fn.Return<ReadonlyArray<typeof OpenAiSchema.InputItem.Encoded>, AiError.AiError> {
|
|
519
524
|
const processedApprovalIds = new Set<string>()
|
|
520
525
|
|
|
521
526
|
const hasConversation = Predicate.isNotNullish(config.conversation)
|
|
@@ -547,27 +552,28 @@ const prepareMessages = Effect.fnUntraced(
|
|
|
547
552
|
if (config.store === false && capabilities.isReasoningModel) {
|
|
548
553
|
include.add("reasoning.encrypted_content")
|
|
549
554
|
}
|
|
550
|
-
if (
|
|
555
|
+
if (codeInterpreterTool) {
|
|
551
556
|
include.add("code_interpreter_call.outputs")
|
|
552
557
|
}
|
|
553
|
-
if (
|
|
558
|
+
if (webSearchTool || webSearchPreviewTool) {
|
|
554
559
|
include.add("web_search_call.action.sources")
|
|
555
560
|
}
|
|
556
561
|
|
|
557
|
-
const messages: Array<typeof
|
|
562
|
+
const messages: Array<typeof OpenAiSchema.InputItem.Encoded> = []
|
|
563
|
+
const prompt = options.incrementalPrompt ?? options.prompt
|
|
558
564
|
|
|
559
|
-
for (const message of
|
|
565
|
+
for (const message of prompt.content) {
|
|
560
566
|
switch (message.role) {
|
|
561
567
|
case "system": {
|
|
562
568
|
messages.push({
|
|
563
|
-
role: getSystemMessageMode(config.model
|
|
569
|
+
role: getSystemMessageMode(config.model as string),
|
|
564
570
|
content: message.content
|
|
565
571
|
})
|
|
566
572
|
break
|
|
567
573
|
}
|
|
568
574
|
|
|
569
575
|
case "user": {
|
|
570
|
-
const content: Array<typeof
|
|
576
|
+
const content: Array<typeof OpenAiSchema.InputContent.Encoded> = []
|
|
571
577
|
|
|
572
578
|
for (let index = 0; index < message.content.length; index++) {
|
|
573
579
|
const part = message.content[index]
|
|
@@ -630,7 +636,7 @@ const prepareMessages = Effect.fnUntraced(
|
|
|
630
636
|
}
|
|
631
637
|
|
|
632
638
|
case "assistant": {
|
|
633
|
-
const reasoningMessages: Record<string, DeepMutable<typeof
|
|
639
|
+
const reasoningMessages: Record<string, DeepMutable<typeof OpenAiSchema.ReasoningItem.Encoded>> = {}
|
|
634
640
|
|
|
635
641
|
for (const part of message.content) {
|
|
636
642
|
switch (part.type) {
|
|
@@ -689,7 +695,7 @@ const prepareMessages = Effect.fnUntraced(
|
|
|
689
695
|
}
|
|
690
696
|
}
|
|
691
697
|
} else {
|
|
692
|
-
const summaryParts: Array<typeof
|
|
698
|
+
const summaryParts: Array<typeof OpenAiSchema.SummaryTextContent.Encoded> = []
|
|
693
699
|
|
|
694
700
|
if (part.text.length > 0) {
|
|
695
701
|
summaryParts.push({ type: "summary_text", text: part.text })
|
|
@@ -700,7 +706,9 @@ const prepareMessages = Effect.fnUntraced(
|
|
|
700
706
|
type: "reasoning",
|
|
701
707
|
id,
|
|
702
708
|
summary: summaryParts,
|
|
703
|
-
|
|
709
|
+
...(Predicate.isNotNull(encryptedContent)
|
|
710
|
+
? { encrypted_content: encryptedContent }
|
|
711
|
+
: undefined)
|
|
704
712
|
}
|
|
705
713
|
|
|
706
714
|
messages.push(reasoningMessages[id])
|
|
@@ -921,7 +929,7 @@ const buildHttpRequestDetails = (
|
|
|
921
929
|
method: request.method,
|
|
922
930
|
url: request.url,
|
|
923
931
|
urlParams: Array.from(request.urlParams),
|
|
924
|
-
hash: request.hash,
|
|
932
|
+
hash: Option.getOrUndefined(request.hash),
|
|
925
933
|
headers: Redactable.redact(request.headers) as Record<string, string>
|
|
926
934
|
})
|
|
927
935
|
|
|
@@ -936,7 +944,56 @@ const buildHttpResponseDetails = (
|
|
|
936
944
|
// Response Conversion
|
|
937
945
|
// =============================================================================
|
|
938
946
|
|
|
939
|
-
type ResponseStreamEvent = typeof
|
|
947
|
+
type ResponseStreamEvent = typeof OpenAiSchema.ResponseStreamEvent.Type
|
|
948
|
+
|
|
949
|
+
type KnownResponseStreamEventType =
|
|
950
|
+
| "response.created"
|
|
951
|
+
| "response.completed"
|
|
952
|
+
| "response.incomplete"
|
|
953
|
+
| "response.failed"
|
|
954
|
+
| "response.output_item.added"
|
|
955
|
+
| "response.output_item.done"
|
|
956
|
+
| "response.output_text.delta"
|
|
957
|
+
| "response.output_text.annotation.added"
|
|
958
|
+
| "response.reasoning_summary_part.added"
|
|
959
|
+
| "response.reasoning_summary_part.done"
|
|
960
|
+
| "response.reasoning_summary_text.delta"
|
|
961
|
+
| "response.function_call_arguments.delta"
|
|
962
|
+
| "response.function_call_arguments.done"
|
|
963
|
+
| "response.code_interpreter_call_code.delta"
|
|
964
|
+
| "response.code_interpreter_call_code.done"
|
|
965
|
+
| "response.apply_patch_call_operation_diff.delta"
|
|
966
|
+
| "response.apply_patch_call_operation_diff.done"
|
|
967
|
+
| "response.image_generation_call.partial_image"
|
|
968
|
+
| "error"
|
|
969
|
+
|
|
970
|
+
type KnownResponseStreamEvent = Extract<ResponseStreamEvent, { readonly type: KnownResponseStreamEventType }>
|
|
971
|
+
|
|
972
|
+
const knownResponseStreamEventTypes = new Set<KnownResponseStreamEventType>([
|
|
973
|
+
"response.created",
|
|
974
|
+
"response.completed",
|
|
975
|
+
"response.incomplete",
|
|
976
|
+
"response.failed",
|
|
977
|
+
"response.output_item.added",
|
|
978
|
+
"response.output_item.done",
|
|
979
|
+
"response.output_text.delta",
|
|
980
|
+
"response.output_text.annotation.added",
|
|
981
|
+
"response.reasoning_summary_part.added",
|
|
982
|
+
"response.reasoning_summary_part.done",
|
|
983
|
+
"response.reasoning_summary_text.delta",
|
|
984
|
+
"response.function_call_arguments.delta",
|
|
985
|
+
"response.function_call_arguments.done",
|
|
986
|
+
"response.code_interpreter_call_code.delta",
|
|
987
|
+
"response.code_interpreter_call_code.done",
|
|
988
|
+
"response.apply_patch_call_operation_diff.delta",
|
|
989
|
+
"response.apply_patch_call_operation_diff.done",
|
|
990
|
+
"response.image_generation_call.partial_image",
|
|
991
|
+
"error"
|
|
992
|
+
])
|
|
993
|
+
|
|
994
|
+
const isKnownResponseStreamEvent = (
|
|
995
|
+
event: ResponseStreamEvent
|
|
996
|
+
): event is KnownResponseStreamEvent => knownResponseStreamEventTypes.has(event.type as KnownResponseStreamEventType)
|
|
940
997
|
|
|
941
998
|
const makeResponse = Effect.fnUntraced(
|
|
942
999
|
function*<Tools extends ReadonlyArray<Tool.Any>>({
|
|
@@ -946,7 +1003,7 @@ const makeResponse = Effect.fnUntraced(
|
|
|
946
1003
|
toolNameMapper
|
|
947
1004
|
}: {
|
|
948
1005
|
readonly options: LanguageModel.ProviderOptions
|
|
949
|
-
readonly rawResponse:
|
|
1006
|
+
readonly rawResponse: OpenAiSchema.Response
|
|
950
1007
|
readonly response: HttpClientResponse.HttpClientResponse
|
|
951
1008
|
readonly toolNameMapper: Tool.NameMapper<Tools>
|
|
952
1009
|
}): Effect.fn.Return<
|
|
@@ -985,7 +1042,7 @@ const makeResponse = Effect.fnUntraced(
|
|
|
985
1042
|
id: part.call_id,
|
|
986
1043
|
name: toolName,
|
|
987
1044
|
params: { call_id: part.call_id, operation: part.operation },
|
|
988
|
-
metadata: { openai:
|
|
1045
|
+
metadata: { openai: makeItemIdMetadata(part.id) }
|
|
989
1046
|
})
|
|
990
1047
|
break
|
|
991
1048
|
}
|
|
@@ -1036,10 +1093,11 @@ const makeResponse = Effect.fnUntraced(
|
|
|
1036
1093
|
|
|
1037
1094
|
case "function_call": {
|
|
1038
1095
|
hasToolCalls = true
|
|
1096
|
+
|
|
1039
1097
|
const toolName = part.name
|
|
1040
|
-
|
|
1041
|
-
const
|
|
1042
|
-
try: () => Tool.unsafeSecureJsonParse(
|
|
1098
|
+
|
|
1099
|
+
const toolParams = yield* Effect.try({
|
|
1100
|
+
try: () => Tool.unsafeSecureJsonParse(part.arguments),
|
|
1043
1101
|
catch: (cause) =>
|
|
1044
1102
|
AiError.make({
|
|
1045
1103
|
module: "OpenAiLanguageModel",
|
|
@@ -1051,12 +1109,15 @@ const makeResponse = Effect.fnUntraced(
|
|
|
1051
1109
|
})
|
|
1052
1110
|
})
|
|
1053
1111
|
})
|
|
1112
|
+
|
|
1113
|
+
const params = yield* transformToolCallParams(options.tools, part.name, toolParams)
|
|
1114
|
+
|
|
1054
1115
|
parts.push({
|
|
1055
1116
|
type: "tool-call",
|
|
1056
1117
|
id: part.call_id,
|
|
1057
1118
|
name: toolName,
|
|
1058
1119
|
params,
|
|
1059
|
-
metadata: { openai:
|
|
1120
|
+
metadata: { openai: makeItemIdMetadata(part.id) }
|
|
1060
1121
|
})
|
|
1061
1122
|
break
|
|
1062
1123
|
}
|
|
@@ -1087,7 +1148,7 @@ const makeResponse = Effect.fnUntraced(
|
|
|
1087
1148
|
id: part.call_id,
|
|
1088
1149
|
name: toolName,
|
|
1089
1150
|
params: { action: part.action },
|
|
1090
|
-
metadata: { openai:
|
|
1151
|
+
metadata: { openai: makeItemIdMetadata(part.id) }
|
|
1091
1152
|
})
|
|
1092
1153
|
break
|
|
1093
1154
|
}
|
|
@@ -1097,13 +1158,17 @@ const makeResponse = Effect.fnUntraced(
|
|
|
1097
1158
|
? (approvalRequests.get(part.approval_request_id) ?? part.id)
|
|
1098
1159
|
: part.id
|
|
1099
1160
|
|
|
1100
|
-
const toolName =
|
|
1161
|
+
const { toolName, params } = yield* normalizeMcpToolCall({
|
|
1162
|
+
toolNameMapper,
|
|
1163
|
+
toolParams: part.arguments,
|
|
1164
|
+
method: "makeResponse"
|
|
1165
|
+
})
|
|
1101
1166
|
|
|
1102
1167
|
parts.push({
|
|
1103
1168
|
type: "tool-call",
|
|
1104
1169
|
id: toolId,
|
|
1105
1170
|
name: toolName,
|
|
1106
|
-
params
|
|
1171
|
+
params,
|
|
1107
1172
|
providerExecuted: true
|
|
1108
1173
|
})
|
|
1109
1174
|
|
|
@@ -1114,14 +1179,14 @@ const makeResponse = Effect.fnUntraced(
|
|
|
1114
1179
|
isFailure: false,
|
|
1115
1180
|
providerExecuted: true,
|
|
1116
1181
|
result: {
|
|
1117
|
-
type: "
|
|
1182
|
+
type: "mcp_call",
|
|
1118
1183
|
name: part.name,
|
|
1119
1184
|
arguments: part.arguments,
|
|
1120
1185
|
server_label: part.server_label,
|
|
1121
1186
|
...(Predicate.isNotNullish(part.output) ? { output: part.output } : undefined),
|
|
1122
1187
|
...(Predicate.isNotNullish(part.error) ? { error: part.error } : undefined)
|
|
1123
1188
|
},
|
|
1124
|
-
metadata: { openai:
|
|
1189
|
+
metadata: { openai: makeItemIdMetadata(part.id) }
|
|
1125
1190
|
})
|
|
1126
1191
|
|
|
1127
1192
|
break
|
|
@@ -1135,20 +1200,11 @@ const makeResponse = Effect.fnUntraced(
|
|
|
1135
1200
|
case "mcp_approval_request": {
|
|
1136
1201
|
const approvalRequestId = (part as any).approval_request_id ?? part.id
|
|
1137
1202
|
const toolId = yield* idGenerator.generateId()
|
|
1138
|
-
const toolName = `mcp.${part.name}`
|
|
1139
1203
|
|
|
1140
|
-
const params = yield*
|
|
1141
|
-
|
|
1142
|
-
|
|
1143
|
-
|
|
1144
|
-
module: "OpenAiLanguageModel",
|
|
1145
|
-
method: "makeResponse",
|
|
1146
|
-
reason: new AiError.ToolParameterValidationError({
|
|
1147
|
-
toolName,
|
|
1148
|
-
toolParams: {},
|
|
1149
|
-
description: `Failed securely JSON parse tool parameters: ${cause}`
|
|
1150
|
-
})
|
|
1151
|
-
})
|
|
1204
|
+
const { toolName, params } = yield* normalizeMcpToolCall({
|
|
1205
|
+
toolNameMapper,
|
|
1206
|
+
toolParams: part.arguments,
|
|
1207
|
+
method: "makeResponse"
|
|
1152
1208
|
})
|
|
1153
1209
|
|
|
1154
1210
|
parts.push({
|
|
@@ -1296,7 +1352,7 @@ const makeResponse = Effect.fnUntraced(
|
|
|
1296
1352
|
id: part.call_id,
|
|
1297
1353
|
name: toolName,
|
|
1298
1354
|
params: { action: part.action },
|
|
1299
|
-
metadata: { openai:
|
|
1355
|
+
metadata: { openai: makeItemIdMetadata(part.id) }
|
|
1300
1356
|
})
|
|
1301
1357
|
break
|
|
1302
1358
|
}
|
|
@@ -1335,7 +1391,7 @@ const makeResponse = Effect.fnUntraced(
|
|
|
1335
1391
|
reason: finishReason,
|
|
1336
1392
|
usage: getUsage(rawResponse.usage),
|
|
1337
1393
|
response: buildHttpResponseDetails(response),
|
|
1338
|
-
...(rawResponse.service_tier
|
|
1394
|
+
...toServiceTier(rawResponse.service_tier)
|
|
1339
1395
|
})
|
|
1340
1396
|
|
|
1341
1397
|
return parts
|
|
@@ -1368,18 +1424,44 @@ const makeStreamResponse = Effect.fnUntraced(
|
|
|
1368
1424
|
let hasToolCalls = false
|
|
1369
1425
|
|
|
1370
1426
|
// Track annotations for current message to include in text-end metadata
|
|
1371
|
-
const activeAnnotations: Array<typeof
|
|
1427
|
+
const activeAnnotations: Array<typeof OpenAiSchema.Annotation.Encoded> = []
|
|
1428
|
+
|
|
1429
|
+
type ReasoningSummaryPartStatus = "active" | "can-conclude" | "concluded"
|
|
1430
|
+
type ReasoningPart = {
|
|
1431
|
+
encryptedContent: string | undefined
|
|
1432
|
+
summaryParts: Record<number, ReasoningSummaryPartStatus>
|
|
1433
|
+
}
|
|
1372
1434
|
|
|
1373
1435
|
// Track active reasoning items with state machine for proper concluding logic
|
|
1374
|
-
const activeReasoning: Record<string, {
|
|
1375
|
-
|
|
1376
|
-
|
|
1377
|
-
|
|
1436
|
+
const activeReasoning: Record<string, ReasoningPart> = {}
|
|
1437
|
+
|
|
1438
|
+
const getOrCreateReasoningPart = (
|
|
1439
|
+
itemId: string,
|
|
1440
|
+
encryptedContent?: string | null
|
|
1441
|
+
): ReasoningPart => {
|
|
1442
|
+
const activePart = activeReasoning[itemId]
|
|
1443
|
+
if (Predicate.isNotUndefined(activePart)) {
|
|
1444
|
+
if (Predicate.isNotNullish(encryptedContent)) {
|
|
1445
|
+
activePart.encryptedContent = encryptedContent
|
|
1446
|
+
}
|
|
1447
|
+
return activePart
|
|
1448
|
+
}
|
|
1449
|
+
|
|
1450
|
+
const reasoningPart: ReasoningPart = {
|
|
1451
|
+
encryptedContent: Predicate.isNotNullish(encryptedContent) ? encryptedContent : undefined,
|
|
1452
|
+
summaryParts: {}
|
|
1453
|
+
}
|
|
1454
|
+
activeReasoning[itemId] = reasoningPart
|
|
1455
|
+
return reasoningPart
|
|
1456
|
+
}
|
|
1378
1457
|
|
|
1379
1458
|
// Track active tool calls with optional provider-specific state
|
|
1380
1459
|
const activeToolCalls: Record<number, {
|
|
1381
1460
|
readonly id: string
|
|
1382
1461
|
readonly name: string
|
|
1462
|
+
readonly functionCall?: {
|
|
1463
|
+
emitted: boolean
|
|
1464
|
+
}
|
|
1383
1465
|
readonly applyPatch?: {
|
|
1384
1466
|
hasDiff: boolean
|
|
1385
1467
|
endEmitted: boolean
|
|
@@ -1399,6 +1481,10 @@ const makeStreamResponse = Effect.fnUntraced(
|
|
|
1399
1481
|
Stream.mapEffect(Effect.fnUntraced(function*(event) {
|
|
1400
1482
|
const parts: Array<Response.StreamPartEncoded> = []
|
|
1401
1483
|
|
|
1484
|
+
if (!isKnownResponseStreamEvent(event)) {
|
|
1485
|
+
return parts
|
|
1486
|
+
}
|
|
1487
|
+
|
|
1402
1488
|
switch (event.type) {
|
|
1403
1489
|
case "response.created": {
|
|
1404
1490
|
const createdAt = new Date(event.response.created_at * 1000)
|
|
@@ -1428,7 +1514,7 @@ const makeStreamResponse = Effect.fnUntraced(
|
|
|
1428
1514
|
),
|
|
1429
1515
|
usage: getUsage(event.response.usage),
|
|
1430
1516
|
response: buildHttpResponseDetails(response),
|
|
1431
|
-
...(event.response.service_tier
|
|
1517
|
+
...toServiceTier(event.response.service_tier)
|
|
1432
1518
|
})
|
|
1433
1519
|
break
|
|
1434
1520
|
}
|
|
@@ -1529,7 +1615,8 @@ const makeStreamResponse = Effect.fnUntraced(
|
|
|
1529
1615
|
case "function_call": {
|
|
1530
1616
|
activeToolCalls[event.output_index] = {
|
|
1531
1617
|
id: event.item.call_id,
|
|
1532
|
-
name: event.item.name
|
|
1618
|
+
name: event.item.name,
|
|
1619
|
+
functionCall: { emitted: false }
|
|
1533
1620
|
}
|
|
1534
1621
|
parts.push({
|
|
1535
1622
|
type: "tool-params-start",
|
|
@@ -1566,34 +1653,33 @@ const makeStreamResponse = Effect.fnUntraced(
|
|
|
1566
1653
|
parts.push({
|
|
1567
1654
|
type: "text-start",
|
|
1568
1655
|
id: event.item.id,
|
|
1569
|
-
metadata: { openai:
|
|
1656
|
+
metadata: { openai: makeItemIdMetadata(event.item.id) }
|
|
1570
1657
|
})
|
|
1571
1658
|
break
|
|
1572
1659
|
}
|
|
1573
1660
|
|
|
1574
1661
|
case "reasoning": {
|
|
1575
|
-
const
|
|
1576
|
-
|
|
1577
|
-
|
|
1578
|
-
|
|
1579
|
-
|
|
1580
|
-
|
|
1581
|
-
|
|
1582
|
-
|
|
1583
|
-
|
|
1584
|
-
|
|
1585
|
-
|
|
1586
|
-
...makeEncryptedContentMetadata(event.item.encrypted_content)
|
|
1662
|
+
const reasoningPart = getOrCreateReasoningPart(event.item.id, event.item.encrypted_content)
|
|
1663
|
+
if (Predicate.isUndefined(reasoningPart.summaryParts[0])) {
|
|
1664
|
+
reasoningPart.summaryParts[0] = "active"
|
|
1665
|
+
parts.push({
|
|
1666
|
+
type: "reasoning-start",
|
|
1667
|
+
id: `${event.item.id}:0`,
|
|
1668
|
+
metadata: {
|
|
1669
|
+
openai: {
|
|
1670
|
+
...makeItemIdMetadata(event.item.id),
|
|
1671
|
+
...makeEncryptedContentMetadata(reasoningPart.encryptedContent)
|
|
1672
|
+
}
|
|
1587
1673
|
}
|
|
1588
|
-
}
|
|
1589
|
-
}
|
|
1674
|
+
})
|
|
1675
|
+
}
|
|
1590
1676
|
break
|
|
1591
1677
|
}
|
|
1592
1678
|
|
|
1593
1679
|
case "shell_call": {
|
|
1594
1680
|
const toolName = toolNameMapper.getCustomName("shell")
|
|
1595
1681
|
activeToolCalls[event.output_index] = {
|
|
1596
|
-
id: event.item.id,
|
|
1682
|
+
id: event.item.id ?? event.item.call_id,
|
|
1597
1683
|
name: toolName
|
|
1598
1684
|
}
|
|
1599
1685
|
break
|
|
@@ -1644,7 +1730,7 @@ const makeStreamResponse = Effect.fnUntraced(
|
|
|
1644
1730
|
parts.push({
|
|
1645
1731
|
type: "tool-params-delta",
|
|
1646
1732
|
id: toolCall.id,
|
|
1647
|
-
delta: InternalUtilities.escapeJSONDelta(event.item.operation.diff)
|
|
1733
|
+
delta: InternalUtilities.escapeJSONDelta(event.item.operation.diff ?? "")
|
|
1648
1734
|
})
|
|
1649
1735
|
}
|
|
1650
1736
|
parts.push({
|
|
@@ -1666,7 +1752,7 @@ const makeStreamResponse = Effect.fnUntraced(
|
|
|
1666
1752
|
id: toolCall.id,
|
|
1667
1753
|
name: toolName,
|
|
1668
1754
|
params: { call_id: event.item.call_id, operation: event.item.operation },
|
|
1669
|
-
metadata: { openai:
|
|
1755
|
+
metadata: { openai: makeItemIdMetadata(event.item.id) }
|
|
1670
1756
|
})
|
|
1671
1757
|
}
|
|
1672
1758
|
delete activeToolCalls[event.output_index]
|
|
@@ -1729,12 +1815,20 @@ const makeStreamResponse = Effect.fnUntraced(
|
|
|
1729
1815
|
}
|
|
1730
1816
|
|
|
1731
1817
|
case "function_call": {
|
|
1818
|
+
const toolCall = activeToolCalls[event.output_index]
|
|
1819
|
+
if (Predicate.isNotUndefined(toolCall?.functionCall?.emitted) && toolCall.functionCall.emitted) {
|
|
1820
|
+
delete activeToolCalls[event.output_index]
|
|
1821
|
+
break
|
|
1822
|
+
}
|
|
1732
1823
|
delete activeToolCalls[event.output_index]
|
|
1824
|
+
|
|
1733
1825
|
hasToolCalls = true
|
|
1826
|
+
|
|
1734
1827
|
const toolName = event.item.name
|
|
1735
|
-
const
|
|
1736
|
-
|
|
1737
|
-
|
|
1828
|
+
const toolArgs = event.item.arguments
|
|
1829
|
+
|
|
1830
|
+
const toolParams = yield* Effect.try({
|
|
1831
|
+
try: () => Tool.unsafeSecureJsonParse(toolArgs),
|
|
1738
1832
|
catch: (cause) =>
|
|
1739
1833
|
AiError.make({
|
|
1740
1834
|
module: "OpenAiLanguageModel",
|
|
@@ -1746,17 +1840,22 @@ const makeStreamResponse = Effect.fnUntraced(
|
|
|
1746
1840
|
})
|
|
1747
1841
|
})
|
|
1748
1842
|
})
|
|
1843
|
+
|
|
1844
|
+
const params = yield* transformToolCallParams(options.tools, toolName, toolParams)
|
|
1845
|
+
|
|
1749
1846
|
parts.push({
|
|
1750
1847
|
type: "tool-params-end",
|
|
1751
1848
|
id: event.item.call_id
|
|
1752
1849
|
})
|
|
1850
|
+
|
|
1753
1851
|
parts.push({
|
|
1754
1852
|
type: "tool-call",
|
|
1755
1853
|
id: event.item.call_id,
|
|
1756
1854
|
name: toolName,
|
|
1757
1855
|
params,
|
|
1758
|
-
metadata: { openai:
|
|
1856
|
+
metadata: { openai: makeItemIdMetadata(event.item.id) }
|
|
1759
1857
|
})
|
|
1858
|
+
|
|
1760
1859
|
break
|
|
1761
1860
|
}
|
|
1762
1861
|
|
|
@@ -1780,7 +1879,7 @@ const makeStreamResponse = Effect.fnUntraced(
|
|
|
1780
1879
|
id: event.item.call_id,
|
|
1781
1880
|
name: toolName,
|
|
1782
1881
|
params: { action: event.item.action },
|
|
1783
|
-
metadata: { openai:
|
|
1882
|
+
metadata: { openai: makeItemIdMetadata(event.item.id) }
|
|
1784
1883
|
})
|
|
1785
1884
|
break
|
|
1786
1885
|
}
|
|
@@ -1794,13 +1893,17 @@ const makeStreamResponse = Effect.fnUntraced(
|
|
|
1794
1893
|
event.item.id)
|
|
1795
1894
|
: event.item.id
|
|
1796
1895
|
|
|
1797
|
-
const toolName =
|
|
1896
|
+
const { toolName, params } = yield* normalizeMcpToolCall({
|
|
1897
|
+
toolNameMapper,
|
|
1898
|
+
toolParams: event.item.arguments,
|
|
1899
|
+
method: "makeStreamResponse"
|
|
1900
|
+
})
|
|
1798
1901
|
|
|
1799
1902
|
parts.push({
|
|
1800
1903
|
type: "tool-call",
|
|
1801
1904
|
id: toolId,
|
|
1802
1905
|
name: toolName,
|
|
1803
|
-
params
|
|
1906
|
+
params,
|
|
1804
1907
|
providerExecuted: true
|
|
1805
1908
|
})
|
|
1806
1909
|
|
|
@@ -1811,14 +1914,14 @@ const makeStreamResponse = Effect.fnUntraced(
|
|
|
1811
1914
|
isFailure: false,
|
|
1812
1915
|
providerExecuted: true,
|
|
1813
1916
|
result: {
|
|
1814
|
-
type: "
|
|
1917
|
+
type: "mcp_call",
|
|
1815
1918
|
name: event.item.name,
|
|
1816
1919
|
arguments: event.item.arguments,
|
|
1817
1920
|
server_label: event.item.server_label,
|
|
1818
1921
|
...(Predicate.isNotNullish(event.item.output) ? { output: event.item.output } : undefined),
|
|
1819
1922
|
...(Predicate.isNotNullish(event.item.error) ? { error: event.item.error } : undefined)
|
|
1820
1923
|
},
|
|
1821
|
-
metadata: { openai:
|
|
1924
|
+
metadata: { openai: makeItemIdMetadata(event.item.id) }
|
|
1822
1925
|
})
|
|
1823
1926
|
|
|
1824
1927
|
break
|
|
@@ -1833,12 +1936,16 @@ const makeStreamResponse = Effect.fnUntraced(
|
|
|
1833
1936
|
const toolId = yield* idGenerator.generateId()
|
|
1834
1937
|
const approvalRequestId = (event.item as any).approval_request_id ?? event.item.id
|
|
1835
1938
|
streamApprovalRequests.set(approvalRequestId, toolId)
|
|
1836
|
-
const toolName =
|
|
1939
|
+
const { toolName, params } = yield* normalizeMcpToolCall({
|
|
1940
|
+
toolNameMapper,
|
|
1941
|
+
toolParams: event.item.arguments,
|
|
1942
|
+
method: "makeStreamResponse"
|
|
1943
|
+
})
|
|
1837
1944
|
parts.push({
|
|
1838
1945
|
type: "tool-call",
|
|
1839
1946
|
id: toolId,
|
|
1840
1947
|
name: toolName,
|
|
1841
|
-
params
|
|
1948
|
+
params,
|
|
1842
1949
|
providerExecuted: true
|
|
1843
1950
|
})
|
|
1844
1951
|
parts.push({
|
|
@@ -1862,7 +1969,7 @@ const makeStreamResponse = Effect.fnUntraced(
|
|
|
1862
1969
|
}
|
|
1863
1970
|
|
|
1864
1971
|
case "reasoning": {
|
|
1865
|
-
const reasoningPart =
|
|
1972
|
+
const reasoningPart = getOrCreateReasoningPart(event.item.id, event.item.encrypted_content)
|
|
1866
1973
|
for (const [summaryIndex, status] of Object.entries(reasoningPart.summaryParts)) {
|
|
1867
1974
|
if (status === "active" || status === "can-conclude") {
|
|
1868
1975
|
parts.push({
|
|
@@ -1871,7 +1978,7 @@ const makeStreamResponse = Effect.fnUntraced(
|
|
|
1871
1978
|
metadata: {
|
|
1872
1979
|
openai: {
|
|
1873
1980
|
...makeItemIdMetadata(event.item.id),
|
|
1874
|
-
...makeEncryptedContentMetadata(
|
|
1981
|
+
...makeEncryptedContentMetadata(reasoningPart.encryptedContent)
|
|
1875
1982
|
}
|
|
1876
1983
|
}
|
|
1877
1984
|
})
|
|
@@ -1886,10 +1993,10 @@ const makeStreamResponse = Effect.fnUntraced(
|
|
|
1886
1993
|
const toolName = toolNameMapper.getCustomName("shell")
|
|
1887
1994
|
parts.push({
|
|
1888
1995
|
type: "tool-call",
|
|
1889
|
-
id: event.item.id,
|
|
1996
|
+
id: event.item.id ?? event.item.call_id,
|
|
1890
1997
|
name: toolName,
|
|
1891
1998
|
params: { action: event.item.action },
|
|
1892
|
-
metadata: { openai:
|
|
1999
|
+
metadata: { openai: makeItemIdMetadata(event.item.id) }
|
|
1893
2000
|
})
|
|
1894
2001
|
break
|
|
1895
2002
|
}
|
|
@@ -1924,7 +2031,7 @@ const makeStreamResponse = Effect.fnUntraced(
|
|
|
1924
2031
|
}
|
|
1925
2032
|
|
|
1926
2033
|
case "response.output_text.annotation.added": {
|
|
1927
|
-
const annotation = event.annotation as typeof
|
|
2034
|
+
const annotation = event.annotation as typeof OpenAiSchema.Annotation.Encoded
|
|
1928
2035
|
// Track annotation for text-end metadata
|
|
1929
2036
|
activeAnnotations.push(annotation)
|
|
1930
2037
|
if (annotation.type === "container_file_citation") {
|
|
@@ -2006,6 +2113,48 @@ const makeStreamResponse = Effect.fnUntraced(
|
|
|
2006
2113
|
break
|
|
2007
2114
|
}
|
|
2008
2115
|
|
|
2116
|
+
case "response.function_call_arguments.done": {
|
|
2117
|
+
const toolCall = activeToolCalls[event.output_index]
|
|
2118
|
+
if (
|
|
2119
|
+
Predicate.isNotUndefined(toolCall?.functionCall) &&
|
|
2120
|
+
!toolCall.functionCall.emitted
|
|
2121
|
+
) {
|
|
2122
|
+
hasToolCalls = true
|
|
2123
|
+
|
|
2124
|
+
const toolParams = yield* Effect.try({
|
|
2125
|
+
try: () => Tool.unsafeSecureJsonParse(event.arguments),
|
|
2126
|
+
catch: (cause) =>
|
|
2127
|
+
AiError.make({
|
|
2128
|
+
module: "OpenAiLanguageModel",
|
|
2129
|
+
method: "makeStreamResponse",
|
|
2130
|
+
reason: new AiError.ToolParameterValidationError({
|
|
2131
|
+
toolName: toolCall.name,
|
|
2132
|
+
toolParams: {},
|
|
2133
|
+
description: `Failed securely JSON parse tool parameters: ${cause}`
|
|
2134
|
+
})
|
|
2135
|
+
})
|
|
2136
|
+
})
|
|
2137
|
+
|
|
2138
|
+
const params = yield* transformToolCallParams(options.tools, toolCall.name, toolParams)
|
|
2139
|
+
|
|
2140
|
+
parts.push({
|
|
2141
|
+
type: "tool-params-end",
|
|
2142
|
+
id: toolCall.id
|
|
2143
|
+
})
|
|
2144
|
+
|
|
2145
|
+
parts.push({
|
|
2146
|
+
type: "tool-call",
|
|
2147
|
+
id: toolCall.id,
|
|
2148
|
+
name: toolCall.name,
|
|
2149
|
+
params,
|
|
2150
|
+
metadata: { openai: makeItemIdMetadata(event.item_id) }
|
|
2151
|
+
})
|
|
2152
|
+
|
|
2153
|
+
toolCall.functionCall.emitted = true
|
|
2154
|
+
}
|
|
2155
|
+
break
|
|
2156
|
+
}
|
|
2157
|
+
|
|
2009
2158
|
case "response.apply_patch_call_operation_diff.delta": {
|
|
2010
2159
|
const toolCall = activeToolCalls[event.output_index]
|
|
2011
2160
|
if (Predicate.isNotUndefined(toolCall?.applyPatch)) {
|
|
@@ -2095,28 +2244,28 @@ const makeStreamResponse = Effect.fnUntraced(
|
|
|
2095
2244
|
}
|
|
2096
2245
|
|
|
2097
2246
|
case "response.reasoning_summary_part.added": {
|
|
2098
|
-
|
|
2247
|
+
const reasoningPart = getOrCreateReasoningPart(event.item_id)
|
|
2099
2248
|
if (event.summary_index > 0) {
|
|
2100
|
-
|
|
2101
|
-
|
|
2102
|
-
|
|
2103
|
-
|
|
2104
|
-
|
|
2105
|
-
|
|
2106
|
-
|
|
2107
|
-
|
|
2108
|
-
|
|
2109
|
-
|
|
2110
|
-
...makeItemIdMetadata(event.item_id),
|
|
2111
|
-
...makeEncryptedContentMetadata(reasoningPart.encryptedContent)
|
|
2112
|
-
}
|
|
2249
|
+
// Conclude all can-conclude parts before starting new one
|
|
2250
|
+
for (const [summaryIndex, status] of Object.entries(reasoningPart.summaryParts)) {
|
|
2251
|
+
if (status === "can-conclude") {
|
|
2252
|
+
parts.push({
|
|
2253
|
+
type: "reasoning-end",
|
|
2254
|
+
id: `${event.item_id}:${summaryIndex}`,
|
|
2255
|
+
metadata: {
|
|
2256
|
+
openai: {
|
|
2257
|
+
...makeItemIdMetadata(event.item_id),
|
|
2258
|
+
...makeEncryptedContentMetadata(reasoningPart.encryptedContent)
|
|
2113
2259
|
}
|
|
2114
|
-
}
|
|
2115
|
-
|
|
2116
|
-
|
|
2260
|
+
}
|
|
2261
|
+
})
|
|
2262
|
+
reasoningPart.summaryParts[Number(summaryIndex)] = "concluded"
|
|
2117
2263
|
}
|
|
2118
|
-
reasoningPart.summaryParts[event.summary_index] = "active"
|
|
2119
2264
|
}
|
|
2265
|
+
}
|
|
2266
|
+
|
|
2267
|
+
if (Predicate.isUndefined(reasoningPart.summaryParts[event.summary_index])) {
|
|
2268
|
+
reasoningPart.summaryParts[event.summary_index] = "active"
|
|
2120
2269
|
parts.push({
|
|
2121
2270
|
type: "reasoning-start",
|
|
2122
2271
|
id: `${event.item_id}:${event.summary_index}`,
|
|
@@ -2136,26 +2285,27 @@ const makeStreamResponse = Effect.fnUntraced(
|
|
|
2136
2285
|
type: "reasoning-delta",
|
|
2137
2286
|
id: `${event.item_id}:${event.summary_index}`,
|
|
2138
2287
|
delta: event.delta,
|
|
2139
|
-
metadata: { openai:
|
|
2288
|
+
metadata: { openai: makeItemIdMetadata(event.item_id) }
|
|
2140
2289
|
})
|
|
2141
2290
|
break
|
|
2142
2291
|
}
|
|
2143
2292
|
|
|
2144
2293
|
case "response.reasoning_summary_part.done": {
|
|
2294
|
+
const reasoningPart = getOrCreateReasoningPart(event.item_id)
|
|
2145
2295
|
// When OpenAI stores message data, we can immediately conclude the
|
|
2146
2296
|
// reasoning part given that we do not need the encrypted content
|
|
2147
2297
|
if (config.store === true) {
|
|
2148
2298
|
parts.push({
|
|
2149
2299
|
type: "reasoning-end",
|
|
2150
2300
|
id: `${event.item_id}:${event.summary_index}`,
|
|
2151
|
-
metadata: { openai:
|
|
2301
|
+
metadata: { openai: makeItemIdMetadata(event.item_id) }
|
|
2152
2302
|
})
|
|
2153
2303
|
// Mark the summary part concluded
|
|
2154
|
-
|
|
2304
|
+
reasoningPart.summaryParts[event.summary_index] = "concluded"
|
|
2155
2305
|
} else {
|
|
2156
2306
|
// Mark the summary part as can-conclude given we still need a
|
|
2157
2307
|
// final summary part with the encrypted content
|
|
2158
|
-
|
|
2308
|
+
reasoningPart.summaryParts[event.summary_index] = "can-conclude"
|
|
2159
2309
|
}
|
|
2160
2310
|
break
|
|
2161
2311
|
}
|
|
@@ -2174,7 +2324,7 @@ const makeStreamResponse = Effect.fnUntraced(
|
|
|
2174
2324
|
|
|
2175
2325
|
const annotateRequest = (
|
|
2176
2326
|
span: Span,
|
|
2177
|
-
request: typeof
|
|
2327
|
+
request: typeof OpenAiSchema.CreateResponse.Encoded
|
|
2178
2328
|
): void => {
|
|
2179
2329
|
addGenAIAnnotations(span, {
|
|
2180
2330
|
system: "openai",
|
|
@@ -2194,7 +2344,7 @@ const annotateRequest = (
|
|
|
2194
2344
|
})
|
|
2195
2345
|
}
|
|
2196
2346
|
|
|
2197
|
-
const annotateResponse = (span: Span, response:
|
|
2347
|
+
const annotateResponse = (span: Span, response: OpenAiSchema.Response): void => {
|
|
2198
2348
|
const finishReason = response.incomplete_details?.reason as string | undefined
|
|
2199
2349
|
addGenAIAnnotations(span, {
|
|
2200
2350
|
response: {
|
|
@@ -2244,7 +2394,7 @@ const annotateStreamResponse = (span: Span, part: Response.StreamPartEncoded) =>
|
|
|
2244
2394
|
// Tool Conversion
|
|
2245
2395
|
// =============================================================================
|
|
2246
2396
|
|
|
2247
|
-
type OpenAiToolChoice = typeof
|
|
2397
|
+
type OpenAiToolChoice = typeof OpenAiSchema.CreateResponse.Encoded["tool_choice"]
|
|
2248
2398
|
|
|
2249
2399
|
const prepareTools = Effect.fnUntraced(function*<Tools extends ReadonlyArray<Tool.Any>>({
|
|
2250
2400
|
config,
|
|
@@ -2255,7 +2405,7 @@ const prepareTools = Effect.fnUntraced(function*<Tools extends ReadonlyArray<Too
|
|
|
2255
2405
|
readonly options: LanguageModel.ProviderOptions
|
|
2256
2406
|
readonly toolNameMapper: Tool.NameMapper<Tools>
|
|
2257
2407
|
}): Effect.fn.Return<{
|
|
2258
|
-
readonly tools: ReadonlyArray<typeof
|
|
2408
|
+
readonly tools: ReadonlyArray<typeof OpenAiSchema.Tool.Encoded> | undefined
|
|
2259
2409
|
readonly toolChoice: OpenAiToolChoice | undefined
|
|
2260
2410
|
}, AiError.AiError> {
|
|
2261
2411
|
// Return immediately if no tools are in the toolkit
|
|
@@ -2263,7 +2413,7 @@ const prepareTools = Effect.fnUntraced(function*<Tools extends ReadonlyArray<Too
|
|
|
2263
2413
|
return { tools: undefined, toolChoice: undefined }
|
|
2264
2414
|
}
|
|
2265
2415
|
|
|
2266
|
-
const tools: Array<typeof
|
|
2416
|
+
const tools: Array<typeof OpenAiSchema.Tool.Encoded> = []
|
|
2267
2417
|
let toolChoice: OpenAiToolChoice | undefined = undefined
|
|
2268
2418
|
|
|
2269
2419
|
// Filter the incoming tools down to the set of allowed tools as indicated by
|
|
@@ -2279,14 +2429,16 @@ const prepareTools = Effect.fnUntraced(function*<Tools extends ReadonlyArray<Too
|
|
|
2279
2429
|
|
|
2280
2430
|
// Convert the tools in the toolkit to the provider-defined format
|
|
2281
2431
|
for (const tool of allowedTools) {
|
|
2282
|
-
if (Tool.isUserDefined(tool)) {
|
|
2432
|
+
if (Tool.isUserDefined(tool) || Tool.isDynamic(tool)) {
|
|
2283
2433
|
const strict = Tool.getStrictMode(tool) ?? config.strictJsonSchema ?? true
|
|
2434
|
+
const description = Tool.getDescription(tool)
|
|
2435
|
+
const parameters = yield* tryToolJsonSchema(tool, "prepareTools")
|
|
2284
2436
|
tools.push({
|
|
2285
2437
|
type: "function",
|
|
2286
2438
|
name: tool.name,
|
|
2287
|
-
|
|
2288
|
-
|
|
2289
|
-
|
|
2439
|
+
parameters,
|
|
2440
|
+
strict,
|
|
2441
|
+
...(Predicate.isNotUndefined(description) ? { description } : undefined)
|
|
2290
2442
|
})
|
|
2291
2443
|
}
|
|
2292
2444
|
|
|
@@ -2468,35 +2620,63 @@ const getStatus = (
|
|
|
2468
2620
|
| Prompt.TextPart
|
|
2469
2621
|
| Prompt.ToolCallPart
|
|
2470
2622
|
| Prompt.ToolResultPart
|
|
2471
|
-
): typeof
|
|
2623
|
+
): typeof OpenAiSchema.MessageStatus.Encoded | null => part.options.openai?.status ?? null
|
|
2472
2624
|
const getEncryptedContent = (
|
|
2473
2625
|
part: Prompt.ReasoningPart
|
|
2474
2626
|
): string | null => part.options.openai?.encryptedContent ?? null
|
|
2475
2627
|
|
|
2476
2628
|
const getImageDetail = (part: Prompt.FilePart): ImageDetail => part.options.openai?.imageDetail ?? "auto"
|
|
2477
2629
|
|
|
2478
|
-
const makeItemIdMetadata = (itemId: string | undefined) => Predicate.isNotUndefined(itemId) ? { itemId } :
|
|
2630
|
+
const makeItemIdMetadata = (itemId: string | undefined) => Predicate.isNotUndefined(itemId) ? { itemId } : {}
|
|
2479
2631
|
|
|
2480
2632
|
const makeEncryptedContentMetadata = (encryptedContent: string | null | undefined) =>
|
|
2481
2633
|
Predicate.isNotNullish(encryptedContent) ? { encryptedContent } : undefined
|
|
2482
2634
|
|
|
2483
|
-
const
|
|
2635
|
+
const unsupportedSchemaError = (error: unknown, method: string): AiError.AiError =>
|
|
2636
|
+
AiError.make({
|
|
2637
|
+
module: "OpenAiLanguageModel",
|
|
2638
|
+
method,
|
|
2639
|
+
reason: new AiError.UnsupportedSchemaError({
|
|
2640
|
+
description: error instanceof Error ? error.message : String(error)
|
|
2641
|
+
})
|
|
2642
|
+
})
|
|
2643
|
+
|
|
2644
|
+
const tryCodecTransform = <S extends Schema.Top>(schema: S, method: string) =>
|
|
2645
|
+
Effect.try({
|
|
2646
|
+
try: () => toCodecOpenAI(schema),
|
|
2647
|
+
catch: (error) => unsupportedSchemaError(error, method)
|
|
2648
|
+
})
|
|
2649
|
+
|
|
2650
|
+
const tryJsonSchema = <S extends Schema.Top>(schema: S, method: string) =>
|
|
2651
|
+
Effect.try({
|
|
2652
|
+
try: () => Tool.getJsonSchemaFromSchema(schema, { transformer: toCodecOpenAI }),
|
|
2653
|
+
catch: (error) => unsupportedSchemaError(error, method)
|
|
2654
|
+
})
|
|
2655
|
+
|
|
2656
|
+
const tryToolJsonSchema = <T extends Tool.Any>(tool: T, method: string) =>
|
|
2657
|
+
Effect.try({
|
|
2658
|
+
try: () => Tool.getJsonSchema(tool, { transformer: toCodecOpenAI }),
|
|
2659
|
+
catch: (error) => unsupportedSchemaError(error, method)
|
|
2660
|
+
})
|
|
2661
|
+
|
|
2662
|
+
const prepareResponseFormat = Effect.fnUntraced(function*({ config, options }: {
|
|
2484
2663
|
readonly config: typeof Config.Service
|
|
2485
2664
|
readonly options: LanguageModel.ProviderOptions
|
|
2486
|
-
}): typeof
|
|
2665
|
+
}): Effect.fn.Return<typeof OpenAiSchema.TextResponseFormatConfiguration.Encoded, AiError.AiError> {
|
|
2487
2666
|
if (options.responseFormat.type === "json") {
|
|
2488
2667
|
const name = options.responseFormat.objectName
|
|
2489
2668
|
const schema = options.responseFormat.schema
|
|
2669
|
+
const jsonSchema = yield* tryJsonSchema(schema, "prepareResponseFormat")
|
|
2490
2670
|
return {
|
|
2491
2671
|
type: "json_schema",
|
|
2492
2672
|
name,
|
|
2493
2673
|
description: AST.resolveDescription(schema.ast) ?? "Response with a JSON object",
|
|
2494
|
-
schema:
|
|
2674
|
+
schema: jsonSchema,
|
|
2495
2675
|
strict: config.strictJsonSchema ?? true
|
|
2496
2676
|
}
|
|
2497
2677
|
}
|
|
2498
2678
|
return { type: "text" }
|
|
2499
|
-
}
|
|
2679
|
+
})
|
|
2500
2680
|
|
|
2501
2681
|
interface ModelCapabilities {
|
|
2502
2682
|
readonly isReasoningModel: boolean
|
|
@@ -2570,7 +2750,42 @@ const getApprovalRequestIdMapping = (prompt: Prompt.Prompt): ReadonlyMap<string,
|
|
|
2570
2750
|
return mapping
|
|
2571
2751
|
}
|
|
2572
2752
|
|
|
2573
|
-
const
|
|
2753
|
+
const normalizeMcpToolCall = Effect.fnUntraced(function*<Tools extends ReadonlyArray<Tool.Any>>({
|
|
2754
|
+
toolNameMapper,
|
|
2755
|
+
toolParams,
|
|
2756
|
+
method
|
|
2757
|
+
}: {
|
|
2758
|
+
readonly toolNameMapper: Tool.NameMapper<Tools>
|
|
2759
|
+
readonly toolParams: unknown
|
|
2760
|
+
readonly method: string
|
|
2761
|
+
}): Effect.fn.Return<{
|
|
2762
|
+
readonly toolName: string
|
|
2763
|
+
readonly params: unknown
|
|
2764
|
+
}, AiError.AiError> {
|
|
2765
|
+
const toolName = toolNameMapper.getCustomName("mcp")
|
|
2766
|
+
|
|
2767
|
+
if (typeof toolParams !== "string") {
|
|
2768
|
+
return { toolName, params: toolParams }
|
|
2769
|
+
}
|
|
2770
|
+
|
|
2771
|
+
const params = yield* Effect.try({
|
|
2772
|
+
try: () => Tool.unsafeSecureJsonParse(toolParams),
|
|
2773
|
+
catch: (cause) =>
|
|
2774
|
+
AiError.make({
|
|
2775
|
+
module: "OpenAiLanguageModel",
|
|
2776
|
+
method,
|
|
2777
|
+
reason: new AiError.ToolParameterValidationError({
|
|
2778
|
+
toolName,
|
|
2779
|
+
toolParams,
|
|
2780
|
+
description: `Failed to securely JSON parse tool parameters: ${cause}`
|
|
2781
|
+
})
|
|
2782
|
+
})
|
|
2783
|
+
})
|
|
2784
|
+
|
|
2785
|
+
return { toolName, params }
|
|
2786
|
+
})
|
|
2787
|
+
|
|
2788
|
+
const getUsage = (usage: OpenAiSchema.ResponseUsage | null | undefined): Response.Usage => {
|
|
2574
2789
|
if (Predicate.isNullish(usage)) {
|
|
2575
2790
|
return {
|
|
2576
2791
|
inputTokens: {
|
|
@@ -2589,8 +2804,8 @@ const getUsage = (usage: Generated.ResponseUsage | null | undefined): Response.U
|
|
|
2589
2804
|
|
|
2590
2805
|
const inputTokens = usage.input_tokens
|
|
2591
2806
|
const outputTokens = usage.output_tokens
|
|
2592
|
-
const cachedTokens = usage.input_tokens_details
|
|
2593
|
-
const reasoningTokens = usage.output_tokens_details
|
|
2807
|
+
const cachedTokens = getUsageTokenDetail(usage.input_tokens_details, "cached_tokens")
|
|
2808
|
+
const reasoningTokens = getUsageTokenDetail(usage.output_tokens_details, "reasoning_tokens")
|
|
2594
2809
|
|
|
2595
2810
|
return {
|
|
2596
2811
|
inputTokens: {
|
|
@@ -2606,3 +2821,64 @@ const getUsage = (usage: Generated.ResponseUsage | null | undefined): Response.U
|
|
|
2606
2821
|
}
|
|
2607
2822
|
}
|
|
2608
2823
|
}
|
|
2824
|
+
|
|
2825
|
+
type ServiceTier = "default" | "auto" | "flex" | "scale" | "priority" | null
|
|
2826
|
+
|
|
2827
|
+
const toServiceTier = (value: string | undefined): {
|
|
2828
|
+
readonly metadata: {
|
|
2829
|
+
readonly openai: {
|
|
2830
|
+
readonly serviceTier: ServiceTier
|
|
2831
|
+
}
|
|
2832
|
+
}
|
|
2833
|
+
} | undefined => {
|
|
2834
|
+
switch (value) {
|
|
2835
|
+
case "default":
|
|
2836
|
+
case "auto":
|
|
2837
|
+
case "flex":
|
|
2838
|
+
case "scale":
|
|
2839
|
+
case "priority":
|
|
2840
|
+
return { metadata: { openai: { serviceTier: value } } }
|
|
2841
|
+
default:
|
|
2842
|
+
return undefined
|
|
2843
|
+
}
|
|
2844
|
+
}
|
|
2845
|
+
|
|
2846
|
+
const getUsageTokenDetail = (details: unknown, key: string): number =>
|
|
2847
|
+
Predicate.hasProperty(details, key) && typeof details[key] === "number" ? details[key] : 0
|
|
2848
|
+
|
|
2849
|
+
const transformToolCallParams = Effect.fnUntraced(function*<Tools extends ReadonlyArray<Tool.Any>>(
|
|
2850
|
+
tools: Tools,
|
|
2851
|
+
toolName: string,
|
|
2852
|
+
toolParams: unknown
|
|
2853
|
+
): Effect.fn.Return<unknown, AiError.AiError> {
|
|
2854
|
+
const tool = tools.find((tool) => tool.name === toolName)
|
|
2855
|
+
|
|
2856
|
+
if (Predicate.isUndefined(tool)) {
|
|
2857
|
+
return yield* AiError.make({
|
|
2858
|
+
module: "OpenAiLanguageModel",
|
|
2859
|
+
method: "makeResponse",
|
|
2860
|
+
reason: new AiError.ToolNotFoundError({
|
|
2861
|
+
toolName,
|
|
2862
|
+
availableTools: tools.map((tool) => tool.name)
|
|
2863
|
+
})
|
|
2864
|
+
})
|
|
2865
|
+
}
|
|
2866
|
+
|
|
2867
|
+
const { codec } = yield* tryCodecTransform(tool.parametersSchema, "makeResponse")
|
|
2868
|
+
|
|
2869
|
+
const transform = Schema.decodeEffect(codec)
|
|
2870
|
+
|
|
2871
|
+
return yield* (
|
|
2872
|
+
transform(toolParams) as Effect.Effect<unknown, Schema.SchemaError>
|
|
2873
|
+
).pipe(Effect.mapError((error) =>
|
|
2874
|
+
AiError.make({
|
|
2875
|
+
module: "OpenAiLanguageModel",
|
|
2876
|
+
method: "makeResponse",
|
|
2877
|
+
reason: new AiError.ToolParameterValidationError({
|
|
2878
|
+
toolName,
|
|
2879
|
+
toolParams,
|
|
2880
|
+
description: error.issue.toString()
|
|
2881
|
+
})
|
|
2882
|
+
})
|
|
2883
|
+
))
|
|
2884
|
+
})
|