@effect/ai-openai-compat 4.0.0-beta.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/dist/OpenAiClient.d.ts +739 -0
- package/dist/OpenAiClient.d.ts.map +1 -0
- package/dist/OpenAiClient.js +170 -0
- package/dist/OpenAiClient.js.map +1 -0
- package/dist/OpenAiConfig.d.ts +47 -0
- package/dist/OpenAiConfig.d.ts.map +1 -0
- package/dist/OpenAiConfig.js +25 -0
- package/dist/OpenAiConfig.js.map +1 -0
- package/dist/OpenAiError.d.ts +93 -0
- package/dist/OpenAiError.d.ts.map +1 -0
- package/dist/OpenAiError.js +5 -0
- package/dist/OpenAiError.js.map +1 -0
- package/dist/OpenAiLanguageModel.d.ts +285 -0
- package/dist/OpenAiLanguageModel.d.ts.map +1 -0
- package/dist/OpenAiLanguageModel.js +1223 -0
- package/dist/OpenAiLanguageModel.js.map +1 -0
- package/dist/OpenAiTelemetry.d.ts +120 -0
- package/dist/OpenAiTelemetry.d.ts.map +1 -0
- package/dist/OpenAiTelemetry.js +35 -0
- package/dist/OpenAiTelemetry.js.map +1 -0
- package/dist/index.d.ts +35 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +36 -0
- package/dist/index.js.map +1 -0
- package/dist/internal/errors.d.ts +2 -0
- package/dist/internal/errors.d.ts.map +1 -0
- package/dist/internal/errors.js +286 -0
- package/dist/internal/errors.js.map +1 -0
- package/dist/internal/utilities.d.ts +2 -0
- package/dist/internal/utilities.d.ts.map +1 -0
- package/dist/internal/utilities.js +25 -0
- package/dist/internal/utilities.js.map +1 -0
- package/package.json +62 -0
- package/src/OpenAiClient.ts +998 -0
- package/src/OpenAiConfig.ts +64 -0
- package/src/OpenAiError.ts +102 -0
- package/src/OpenAiLanguageModel.ts +1638 -0
- package/src/OpenAiTelemetry.ts +159 -0
- package/src/index.ts +41 -0
- package/src/internal/errors.ts +327 -0
- package/src/internal/utilities.ts +33 -0
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI telemetry attributes for OpenTelemetry integration.
|
|
3
|
+
*
|
|
4
|
+
* Provides OpenAI-specific GenAI telemetry attributes following OpenTelemetry
|
|
5
|
+
* semantic conventions, extending the base GenAI attributes with OpenAI-specific
|
|
6
|
+
* request and response metadata.
|
|
7
|
+
*
|
|
8
|
+
* @since 1.0.0
|
|
9
|
+
*/
|
|
10
|
+
import { dual } from "effect/Function"
|
|
11
|
+
import * as String from "effect/String"
|
|
12
|
+
import type { Span } from "effect/Tracer"
|
|
13
|
+
import type { Simplify } from "effect/Types"
|
|
14
|
+
import * as Telemetry from "effect/unstable/ai/Telemetry"
|
|
15
|
+
|
|
16
|
+
/**
|
|
17
|
+
* The attributes used to describe telemetry in the context of Generative
|
|
18
|
+
* Artificial Intelligence (GenAI) Models requests and responses.
|
|
19
|
+
*
|
|
20
|
+
* {@see https://opentelemetry.io/docs/specs/semconv/attributes-registry/gen-ai/}
|
|
21
|
+
*
|
|
22
|
+
* @since 1.0.0
|
|
23
|
+
* @category models
|
|
24
|
+
*/
|
|
25
|
+
export type OpenAiTelemetryAttributes = Simplify<
|
|
26
|
+
& Telemetry.GenAITelemetryAttributes
|
|
27
|
+
& Telemetry.AttributesWithPrefix<RequestAttributes, "gen_ai.openai.request">
|
|
28
|
+
& Telemetry.AttributesWithPrefix<ResponseAttributes, "gen_ai.openai.request">
|
|
29
|
+
>
|
|
30
|
+
|
|
31
|
+
/**
|
|
32
|
+
* All telemetry attributes which are part of the GenAI specification,
|
|
33
|
+
* including the OpenAi-specific attributes.
|
|
34
|
+
*
|
|
35
|
+
* @since 1.0.0
|
|
36
|
+
* @category models
|
|
37
|
+
*/
|
|
38
|
+
export type AllAttributes = Telemetry.AllAttributes & RequestAttributes & ResponseAttributes
|
|
39
|
+
|
|
40
|
+
/**
|
|
41
|
+
* Telemetry attributes which are part of the GenAI specification and are
|
|
42
|
+
* namespaced by `gen_ai.openai.request`.
|
|
43
|
+
*
|
|
44
|
+
* @since 1.0.0
|
|
45
|
+
* @category models
|
|
46
|
+
*/
|
|
47
|
+
export interface RequestAttributes {
|
|
48
|
+
/**
|
|
49
|
+
* The response format that is requested.
|
|
50
|
+
*/
|
|
51
|
+
readonly responseFormat?: (string & {}) | WellKnownResponseFormat | null | undefined
|
|
52
|
+
/**
|
|
53
|
+
* The service tier requested. May be a specific tier, `default`, or `auto`.
|
|
54
|
+
*/
|
|
55
|
+
readonly serviceTier?: (string & {}) | WellKnownServiceTier | null | undefined
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
/**
|
|
59
|
+
* Telemetry attributes which are part of the GenAI specification and are
|
|
60
|
+
* namespaced by `gen_ai.openai.response`.
|
|
61
|
+
*
|
|
62
|
+
* @since 1.0.0
|
|
63
|
+
* @category models
|
|
64
|
+
*/
|
|
65
|
+
export interface ResponseAttributes {
|
|
66
|
+
/**
|
|
67
|
+
* The service tier used for the response.
|
|
68
|
+
*/
|
|
69
|
+
readonly serviceTier?: string | null | undefined
|
|
70
|
+
/**
|
|
71
|
+
* A fingerprint to track any eventual change in the Generative AI
|
|
72
|
+
* environment.
|
|
73
|
+
*/
|
|
74
|
+
readonly systemFingerprint?: string | null | undefined
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
/**
|
|
78
|
+
* The `gen_ai.openai.request.response_format` attribute has the following
|
|
79
|
+
* list of well-known values.
|
|
80
|
+
*
|
|
81
|
+
* If one of them applies, then the respective value **MUST** be used;
|
|
82
|
+
* otherwise, a custom value **MAY** be used.
|
|
83
|
+
*
|
|
84
|
+
* @since 1.0.0
|
|
85
|
+
* @category models
|
|
86
|
+
*/
|
|
87
|
+
export type WellKnownResponseFormat = "json_object" | "json_schema" | "text"
|
|
88
|
+
|
|
89
|
+
/**
|
|
90
|
+
* The `gen_ai.openai.request.service_tier` attribute has the following
|
|
91
|
+
* list of well-known values.
|
|
92
|
+
*
|
|
93
|
+
* If one of them applies, then the respective value **MUST** be used;
|
|
94
|
+
* otherwise, a custom value **MAY** be used.
|
|
95
|
+
*
|
|
96
|
+
* @since 1.0.0
|
|
97
|
+
* @category models
|
|
98
|
+
*/
|
|
99
|
+
export type WellKnownServiceTier = "auto" | "default"
|
|
100
|
+
|
|
101
|
+
/**
|
|
102
|
+
* @since 1.0.0
|
|
103
|
+
* @since models
|
|
104
|
+
*/
|
|
105
|
+
export type OpenAiTelemetryAttributeOptions = Telemetry.GenAITelemetryAttributeOptions & {
|
|
106
|
+
openai?: {
|
|
107
|
+
request?: RequestAttributes | undefined
|
|
108
|
+
response?: ResponseAttributes | undefined
|
|
109
|
+
} | undefined
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
const addOpenAiRequestAttributes = Telemetry.addSpanAttributes("gen_ai.openai.request", String.camelToSnake)<
|
|
113
|
+
RequestAttributes
|
|
114
|
+
>
|
|
115
|
+
const addOpenAiResponseAttributes = Telemetry.addSpanAttributes("gen_ai.openai.response", String.camelToSnake)<
|
|
116
|
+
ResponseAttributes
|
|
117
|
+
>
|
|
118
|
+
|
|
119
|
+
/**
|
|
120
|
+
* Applies the specified OpenAi GenAI telemetry attributes to the provided
|
|
121
|
+
* `Span`.
|
|
122
|
+
*
|
|
123
|
+
* **NOTE**: This method will mutate the `Span` **in-place**.
|
|
124
|
+
*
|
|
125
|
+
* @since 1.0.0
|
|
126
|
+
* @since utilities
|
|
127
|
+
*/
|
|
128
|
+
export const addGenAIAnnotations: {
|
|
129
|
+
/**
|
|
130
|
+
* Applies the specified OpenAi GenAI telemetry attributes to the provided
|
|
131
|
+
* `Span`.
|
|
132
|
+
*
|
|
133
|
+
* **NOTE**: This method will mutate the `Span` **in-place**.
|
|
134
|
+
*
|
|
135
|
+
* @since 1.0.0
|
|
136
|
+
* @since utilities
|
|
137
|
+
*/
|
|
138
|
+
(options: OpenAiTelemetryAttributeOptions): (span: Span) => void
|
|
139
|
+
/**
|
|
140
|
+
* Applies the specified OpenAi GenAI telemetry attributes to the provided
|
|
141
|
+
* `Span`.
|
|
142
|
+
*
|
|
143
|
+
* **NOTE**: This method will mutate the `Span` **in-place**.
|
|
144
|
+
*
|
|
145
|
+
* @since 1.0.0
|
|
146
|
+
* @since utilities
|
|
147
|
+
*/
|
|
148
|
+
(span: Span, options: OpenAiTelemetryAttributeOptions): void
|
|
149
|
+
} = dual(2, (span: Span, options: OpenAiTelemetryAttributeOptions) => {
|
|
150
|
+
Telemetry.addGenAIAnnotations(span, options)
|
|
151
|
+
if (options.openai != null) {
|
|
152
|
+
if (options.openai.request != null) {
|
|
153
|
+
addOpenAiRequestAttributes(span, options.openai.request)
|
|
154
|
+
}
|
|
155
|
+
if (options.openai.response != null) {
|
|
156
|
+
addOpenAiResponseAttributes(span, options.openai.response)
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
})
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @since 1.0.0
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
// @barrel: Auto-generated exports. Do not edit manually.
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* @since 1.0.0
|
|
9
|
+
*/
|
|
10
|
+
export * as OpenAiClient from "./OpenAiClient.ts"
|
|
11
|
+
|
|
12
|
+
/**
|
|
13
|
+
* @since 1.0.0
|
|
14
|
+
*/
|
|
15
|
+
export * as OpenAiConfig from "./OpenAiConfig.ts"
|
|
16
|
+
|
|
17
|
+
/**
|
|
18
|
+
* @since 1.0.0
|
|
19
|
+
*/
|
|
20
|
+
export * as OpenAiError from "./OpenAiError.ts"
|
|
21
|
+
|
|
22
|
+
/**
|
|
23
|
+
* OpenAI Language Model implementation.
|
|
24
|
+
*
|
|
25
|
+
* Provides a LanguageModel implementation for OpenAI's chat completions API,
|
|
26
|
+
* supporting text generation, structured output, tool calling, and streaming.
|
|
27
|
+
*
|
|
28
|
+
* @since 1.0.0
|
|
29
|
+
*/
|
|
30
|
+
export * as OpenAiLanguageModel from "./OpenAiLanguageModel.ts"
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* OpenAI telemetry attributes for OpenTelemetry integration.
|
|
34
|
+
*
|
|
35
|
+
* Provides OpenAI-specific GenAI telemetry attributes following OpenTelemetry
|
|
36
|
+
* semantic conventions, extending the base GenAI attributes with OpenAI-specific
|
|
37
|
+
* request and response metadata.
|
|
38
|
+
*
|
|
39
|
+
* @since 1.0.0
|
|
40
|
+
*/
|
|
41
|
+
export * as OpenAiTelemetry from "./OpenAiTelemetry.ts"
|
|
@@ -0,0 +1,327 @@
|
|
|
1
|
+
import * as Arr from "effect/Array"
|
|
2
|
+
import * as Duration from "effect/Duration"
|
|
3
|
+
import * as Effect from "effect/Effect"
|
|
4
|
+
import { dual } from "effect/Function"
|
|
5
|
+
import * as Number from "effect/Number"
|
|
6
|
+
import * as Option from "effect/Option"
|
|
7
|
+
import * as Redactable from "effect/Redactable"
|
|
8
|
+
import * as Schema from "effect/Schema"
|
|
9
|
+
import * as SchemaTransformation from "effect/SchemaTransformation"
|
|
10
|
+
import * as String from "effect/String"
|
|
11
|
+
import * as AiError from "effect/unstable/ai/AiError"
|
|
12
|
+
import type * as Response from "effect/unstable/ai/Response"
|
|
13
|
+
import type * as HttpClientError from "effect/unstable/http/HttpClientError"
|
|
14
|
+
import type * as HttpClientRequest from "effect/unstable/http/HttpClientRequest"
|
|
15
|
+
import type * as HttpClientResponse from "effect/unstable/http/HttpClientResponse"
|
|
16
|
+
import type { OpenAiErrorMetadata } from "../OpenAiError.ts"
|
|
17
|
+
|
|
18
|
+
/** @internal */
|
|
19
|
+
export const OpenAiErrorBody = Schema.Struct({
|
|
20
|
+
error: Schema.Struct({
|
|
21
|
+
message: Schema.String,
|
|
22
|
+
type: Schema.optional(Schema.NullOr(Schema.String)),
|
|
23
|
+
status: Schema.optional(Schema.NullOr(Schema.String)),
|
|
24
|
+
param: Schema.optional(Schema.NullOr(Schema.String)),
|
|
25
|
+
code: Schema.optional(Schema.NullOr(Schema.Union([Schema.String, Schema.Number])))
|
|
26
|
+
})
|
|
27
|
+
})
|
|
28
|
+
const OpenAiErrorBodyJson = Schema.decodeUnknownOption(Schema.fromJsonString(Schema.Union([
|
|
29
|
+
OpenAiErrorBody,
|
|
30
|
+
Schema.NonEmptyArray(OpenAiErrorBody).pipe(
|
|
31
|
+
Schema.decodeTo(
|
|
32
|
+
Schema.toType(OpenAiErrorBody),
|
|
33
|
+
SchemaTransformation.transform({
|
|
34
|
+
decode: Arr.headNonEmpty,
|
|
35
|
+
encode: (item) => [item]
|
|
36
|
+
})
|
|
37
|
+
)
|
|
38
|
+
)
|
|
39
|
+
])))
|
|
40
|
+
|
|
41
|
+
/** @internal */
|
|
42
|
+
export const mapSchemaError = dual<
|
|
43
|
+
(method: string) => (error: Schema.SchemaError) => AiError.AiError,
|
|
44
|
+
(error: Schema.SchemaError, method: string) => AiError.AiError
|
|
45
|
+
>(2, (error, method) =>
|
|
46
|
+
AiError.make({
|
|
47
|
+
module: "OpenAiClient",
|
|
48
|
+
method,
|
|
49
|
+
reason: AiError.InvalidOutputError.fromSchemaError(error)
|
|
50
|
+
}))
|
|
51
|
+
|
|
52
|
+
/** @internal */
|
|
53
|
+
export const mapHttpClientError = dual<
|
|
54
|
+
(method: string) => (error: HttpClientError.HttpClientError) => Effect.Effect<never, AiError.AiError>,
|
|
55
|
+
(error: HttpClientError.HttpClientError, method: string) => Effect.Effect<never, AiError.AiError>
|
|
56
|
+
>(2, (error, method) => {
|
|
57
|
+
const reason = error.reason
|
|
58
|
+
switch (reason._tag) {
|
|
59
|
+
case "TransportError": {
|
|
60
|
+
return Effect.fail(AiError.make({
|
|
61
|
+
module: "OpenAiClient",
|
|
62
|
+
method,
|
|
63
|
+
reason: new AiError.NetworkError({
|
|
64
|
+
reason: "TransportError",
|
|
65
|
+
description: reason.description,
|
|
66
|
+
request: buildHttpRequestDetails(reason.request)
|
|
67
|
+
})
|
|
68
|
+
}))
|
|
69
|
+
}
|
|
70
|
+
case "EncodeError": {
|
|
71
|
+
return Effect.fail(AiError.make({
|
|
72
|
+
module: "OpenAiClient",
|
|
73
|
+
method,
|
|
74
|
+
reason: new AiError.NetworkError({
|
|
75
|
+
reason: "EncodeError",
|
|
76
|
+
description: reason.description,
|
|
77
|
+
request: buildHttpRequestDetails(reason.request)
|
|
78
|
+
})
|
|
79
|
+
}))
|
|
80
|
+
}
|
|
81
|
+
case "InvalidUrlError": {
|
|
82
|
+
return Effect.fail(AiError.make({
|
|
83
|
+
module: "OpenAiClient",
|
|
84
|
+
method,
|
|
85
|
+
reason: new AiError.NetworkError({
|
|
86
|
+
reason: "InvalidUrlError",
|
|
87
|
+
description: reason.description,
|
|
88
|
+
request: buildHttpRequestDetails(reason.request)
|
|
89
|
+
})
|
|
90
|
+
}))
|
|
91
|
+
}
|
|
92
|
+
case "StatusCodeError": {
|
|
93
|
+
return mapStatusCodeError(reason, method)
|
|
94
|
+
}
|
|
95
|
+
case "DecodeError": {
|
|
96
|
+
return Effect.fail(AiError.make({
|
|
97
|
+
module: "OpenAiClient",
|
|
98
|
+
method,
|
|
99
|
+
reason: new AiError.InvalidOutputError({
|
|
100
|
+
description: reason.description ?? "Failed to decode response"
|
|
101
|
+
})
|
|
102
|
+
}))
|
|
103
|
+
}
|
|
104
|
+
case "EmptyBodyError": {
|
|
105
|
+
return Effect.fail(AiError.make({
|
|
106
|
+
module: "OpenAiClient",
|
|
107
|
+
method,
|
|
108
|
+
reason: new AiError.InvalidOutputError({
|
|
109
|
+
description: reason.description ?? "Response body was empty"
|
|
110
|
+
})
|
|
111
|
+
}))
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
})
|
|
115
|
+
|
|
116
|
+
/** @internal */
|
|
117
|
+
const mapStatusCodeError = Effect.fnUntraced(function*(
|
|
118
|
+
error: HttpClientError.StatusCodeError,
|
|
119
|
+
method: string
|
|
120
|
+
) {
|
|
121
|
+
const { request, response, description } = error
|
|
122
|
+
const status = response.status
|
|
123
|
+
const headers = response.headers as Record<string, string>
|
|
124
|
+
const requestId = headers["x-request-id"]
|
|
125
|
+
|
|
126
|
+
let body = yield* response.text.pipe(
|
|
127
|
+
Effect.catchCause(() => Effect.succeed(description?.startsWith("{") ? description : undefined))
|
|
128
|
+
)
|
|
129
|
+
const decoded = OpenAiErrorBodyJson(body)
|
|
130
|
+
|
|
131
|
+
const reason = mapStatusCodeToReason({
|
|
132
|
+
status,
|
|
133
|
+
headers,
|
|
134
|
+
message: Option.isSome(decoded) ? decoded.value.error.message : undefined,
|
|
135
|
+
http: buildHttpContext({ request, response, body }),
|
|
136
|
+
metadata: {
|
|
137
|
+
errorCode: Option.isSome(decoded) ? decoded.value.error.code?.toString() ?? null : null,
|
|
138
|
+
errorType: decoded.pipe(
|
|
139
|
+
Option.flatMapNullishOr((d) => d.error.type ?? d.error.status),
|
|
140
|
+
Option.map(String.toLowerCase),
|
|
141
|
+
Option.getOrNull
|
|
142
|
+
),
|
|
143
|
+
requestId: requestId ?? null
|
|
144
|
+
}
|
|
145
|
+
})
|
|
146
|
+
|
|
147
|
+
return yield* AiError.make({ module: "OpenAiClient", method, reason })
|
|
148
|
+
})
|
|
149
|
+
|
|
150
|
+
/** @internal */
|
|
151
|
+
export const parseRateLimitHeaders = (headers: Record<string, string>) => {
|
|
152
|
+
const retryAfterRaw = headers["retry-after"]
|
|
153
|
+
let retryAfter: Duration.Duration | undefined
|
|
154
|
+
if (retryAfterRaw !== undefined) {
|
|
155
|
+
const parsed = Number.parse(retryAfterRaw)
|
|
156
|
+
if (parsed !== undefined) {
|
|
157
|
+
retryAfter = Duration.seconds(parsed)
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
const remainingRaw = headers["x-ratelimit-remaining-requests"]
|
|
161
|
+
const remaining = remainingRaw !== undefined ? Number.parse(remainingRaw) ?? null : null
|
|
162
|
+
return {
|
|
163
|
+
retryAfter,
|
|
164
|
+
limit: headers["x-ratelimit-limit-requests"] ?? null,
|
|
165
|
+
remaining,
|
|
166
|
+
resetRequests: headers["x-ratelimit-reset-requests"] ?? null,
|
|
167
|
+
resetTokens: headers["x-ratelimit-reset-tokens"] ?? null
|
|
168
|
+
}
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
/** @internal */
|
|
172
|
+
export const buildHttpRequestDetails = (
|
|
173
|
+
request: HttpClientRequest.HttpClientRequest
|
|
174
|
+
): typeof Response.HttpRequestDetails.Type => ({
|
|
175
|
+
method: request.method,
|
|
176
|
+
url: request.url,
|
|
177
|
+
urlParams: Array.from(request.urlParams),
|
|
178
|
+
hash: request.hash,
|
|
179
|
+
headers: Redactable.redact(request.headers) as Record<string, string>
|
|
180
|
+
})
|
|
181
|
+
|
|
182
|
+
/** @internal */
|
|
183
|
+
export const buildHttpContext = (params: {
|
|
184
|
+
readonly request: HttpClientRequest.HttpClientRequest
|
|
185
|
+
readonly response?: HttpClientResponse.HttpClientResponse
|
|
186
|
+
readonly body?: string | undefined
|
|
187
|
+
}): typeof AiError.HttpContext.Type => ({
|
|
188
|
+
request: buildHttpRequestDetails(params.request),
|
|
189
|
+
response: params.response !== undefined
|
|
190
|
+
? {
|
|
191
|
+
status: params.response.status,
|
|
192
|
+
headers: Redactable.redact(params.response.headers) as Record<string, string>
|
|
193
|
+
}
|
|
194
|
+
: undefined,
|
|
195
|
+
body: params.body
|
|
196
|
+
})
|
|
197
|
+
|
|
198
|
+
const buildInvalidRequestDescription = (params: {
|
|
199
|
+
readonly status: number
|
|
200
|
+
readonly message: string | undefined
|
|
201
|
+
readonly method: string
|
|
202
|
+
readonly url: string
|
|
203
|
+
readonly errorCode: string | null
|
|
204
|
+
readonly errorType: string | null
|
|
205
|
+
readonly requestId: string | null
|
|
206
|
+
readonly body: string | undefined
|
|
207
|
+
}): string => {
|
|
208
|
+
const parts: Array<string> = []
|
|
209
|
+
|
|
210
|
+
if (params.message) {
|
|
211
|
+
parts.push(params.message)
|
|
212
|
+
} else {
|
|
213
|
+
parts.push(`HTTP ${params.status}`)
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
parts.push(`(${params.method} ${params.url})`)
|
|
217
|
+
|
|
218
|
+
if (params.errorCode) {
|
|
219
|
+
parts.push(`[code: ${params.errorCode}]`)
|
|
220
|
+
} else if (params.errorType) {
|
|
221
|
+
parts.push(`[type: ${params.errorType}]`)
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
if (params.requestId) {
|
|
225
|
+
parts.push(`[requestId: ${params.requestId}]`)
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
if (!params.message && params.body) {
|
|
229
|
+
const truncated = params.body.length > 200
|
|
230
|
+
? params.body.slice(0, 200) + "..."
|
|
231
|
+
: params.body
|
|
232
|
+
parts.push(`Response: ${truncated}`)
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
return parts.join(" ")
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
/** @internal */
|
|
239
|
+
export const mapStatusCodeToReason = ({ status, headers, message, metadata, http }: {
|
|
240
|
+
readonly status: number
|
|
241
|
+
readonly headers: Record<string, string>
|
|
242
|
+
readonly message: string | undefined
|
|
243
|
+
readonly metadata: OpenAiErrorMetadata
|
|
244
|
+
readonly http: typeof AiError.HttpContext.Type
|
|
245
|
+
}): AiError.AiErrorReason => {
|
|
246
|
+
const invalidRequestDescription = buildInvalidRequestDescription({
|
|
247
|
+
status,
|
|
248
|
+
message,
|
|
249
|
+
method: http.request.method,
|
|
250
|
+
url: http.request.url,
|
|
251
|
+
errorCode: metadata.errorCode,
|
|
252
|
+
errorType: metadata.errorType,
|
|
253
|
+
requestId: metadata.requestId,
|
|
254
|
+
body: http.body
|
|
255
|
+
})
|
|
256
|
+
|
|
257
|
+
switch (status) {
|
|
258
|
+
case 400:
|
|
259
|
+
return new AiError.InvalidRequestError({
|
|
260
|
+
description: invalidRequestDescription,
|
|
261
|
+
metadata: { openai: metadata },
|
|
262
|
+
http
|
|
263
|
+
})
|
|
264
|
+
case 401:
|
|
265
|
+
return new AiError.AuthenticationError({
|
|
266
|
+
kind: "InvalidKey",
|
|
267
|
+
metadata,
|
|
268
|
+
http
|
|
269
|
+
})
|
|
270
|
+
case 403:
|
|
271
|
+
return new AiError.AuthenticationError({
|
|
272
|
+
kind: "InsufficientPermissions",
|
|
273
|
+
metadata,
|
|
274
|
+
http
|
|
275
|
+
})
|
|
276
|
+
case 404:
|
|
277
|
+
return new AiError.InvalidRequestError({
|
|
278
|
+
description: invalidRequestDescription,
|
|
279
|
+
metadata: { openai: metadata },
|
|
280
|
+
http
|
|
281
|
+
})
|
|
282
|
+
case 409:
|
|
283
|
+
case 422:
|
|
284
|
+
return new AiError.InvalidRequestError({
|
|
285
|
+
description: invalidRequestDescription,
|
|
286
|
+
metadata: { openai: metadata },
|
|
287
|
+
http
|
|
288
|
+
})
|
|
289
|
+
case 429: {
|
|
290
|
+
if (
|
|
291
|
+
metadata.errorCode === "insufficient_quota" ||
|
|
292
|
+
metadata.errorType === "insufficient_quota" ||
|
|
293
|
+
metadata.errorType?.includes("quota") ||
|
|
294
|
+
metadata.errorType?.includes("exhausted")
|
|
295
|
+
) {
|
|
296
|
+
return new AiError.QuotaExhaustedError({
|
|
297
|
+
metadata: { openai: metadata },
|
|
298
|
+
http
|
|
299
|
+
})
|
|
300
|
+
}
|
|
301
|
+
const { retryAfter, ...rateLimitMetadata } = parseRateLimitHeaders(headers)
|
|
302
|
+
return new AiError.RateLimitError({
|
|
303
|
+
retryAfter,
|
|
304
|
+
metadata: {
|
|
305
|
+
openai: {
|
|
306
|
+
...metadata,
|
|
307
|
+
...rateLimitMetadata
|
|
308
|
+
}
|
|
309
|
+
},
|
|
310
|
+
http
|
|
311
|
+
})
|
|
312
|
+
}
|
|
313
|
+
default:
|
|
314
|
+
if (status >= 500) {
|
|
315
|
+
return new AiError.InternalProviderError({
|
|
316
|
+
description: message ?? "Server error",
|
|
317
|
+
metadata,
|
|
318
|
+
http
|
|
319
|
+
})
|
|
320
|
+
}
|
|
321
|
+
return new AiError.UnknownError({
|
|
322
|
+
description: message,
|
|
323
|
+
metadata,
|
|
324
|
+
http
|
|
325
|
+
})
|
|
326
|
+
}
|
|
327
|
+
}
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
import type * as Response from "effect/unstable/ai/Response"
|
|
2
|
+
|
|
3
|
+
/** @internal */
|
|
4
|
+
export const ProviderOptionsKey = "@effect/ai-openai-compat/OpenAiLanguageModel/ProviderOptions"
|
|
5
|
+
|
|
6
|
+
/** @internal */
|
|
7
|
+
export const ProviderMetadataKey = "@effect/ai-openai-compat/OpenAiLanguageModel/ProviderMetadata"
|
|
8
|
+
|
|
9
|
+
const finishReasonMap: Record<string, Response.FinishReason> = {
|
|
10
|
+
content_filter: "content-filter",
|
|
11
|
+
function_call: "tool-calls",
|
|
12
|
+
length: "length",
|
|
13
|
+
stop: "stop",
|
|
14
|
+
tool_calls: "tool-calls"
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
/** @internal */
|
|
18
|
+
export const escapeJSONDelta = (delta: string): string => JSON.stringify(delta).slice(1, -1)
|
|
19
|
+
|
|
20
|
+
/** @internal */
|
|
21
|
+
export const resolveFinishReason = (
|
|
22
|
+
finishReason: string | null | undefined,
|
|
23
|
+
hasToolCalls: boolean
|
|
24
|
+
): Response.FinishReason => {
|
|
25
|
+
if (finishReason == null) {
|
|
26
|
+
return hasToolCalls ? "tool-calls" : "stop"
|
|
27
|
+
}
|
|
28
|
+
const reason = finishReasonMap[finishReason]
|
|
29
|
+
if (reason == null) {
|
|
30
|
+
return hasToolCalls ? "tool-calls" : "unknown"
|
|
31
|
+
}
|
|
32
|
+
return reason
|
|
33
|
+
}
|