@lota-sdk/core 0.4.18 → 0.4.19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +2 -2
- package/src/ai-gateway/ai-gateway.ts +2 -25
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@lota-sdk/core",
|
|
3
|
-
"version": "0.4.
|
|
3
|
+
"version": "0.4.19",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"main": "./src/index.ts",
|
|
6
6
|
"types": "./src/index.ts",
|
|
@@ -31,7 +31,7 @@
|
|
|
31
31
|
"@ai-sdk/openai": "^3.0.53",
|
|
32
32
|
"@chat-adapter/slack": "^4.26.0",
|
|
33
33
|
"@chat-adapter/state-ioredis": "^4.26.0",
|
|
34
|
-
"@lota-sdk/shared": "0.4.
|
|
34
|
+
"@lota-sdk/shared": "0.4.19",
|
|
35
35
|
"@mendable/firecrawl-js": "^4.18.3",
|
|
36
36
|
"@surrealdb/node": "^3.0.3",
|
|
37
37
|
"ai": "^6.0.168",
|
|
@@ -21,7 +21,6 @@ type AiGatewayGenerateResult = Awaited<ReturnType<WrapStreamOptions['doGenerate'
|
|
|
21
21
|
type AiGatewayStreamResult = Awaited<ReturnType<WrapStreamOptions['doStream']>>
|
|
22
22
|
type AiGatewayGeneratedContent = AiGatewayGenerateResult['content'][number]
|
|
23
23
|
type AiGatewayStreamPart = AiGatewayStreamResult['stream'] extends ReadableStream<infer T> ? T : never
|
|
24
|
-
type AiGatewayProviderOptions = NonNullable<AiGatewayCallOptions['providerOptions']>
|
|
25
24
|
type AiGatewayAttemptResult<A> = { source: string; result: A }
|
|
26
25
|
// eslint-disable-next-line @typescript-eslint/no-redundant-type-constituents
|
|
27
26
|
type AiGatewayRunFork = <A, E>(effect: Effect.Effect<A, E, never>) => Fiber.Fiber<A, E | unknown>
|
|
@@ -867,10 +866,6 @@ function executeStreamAttemptPlan(
|
|
|
867
866
|
)
|
|
868
867
|
}
|
|
869
868
|
|
|
870
|
-
function isOpenRouterOpenAIReasoningModel(modelId: string): boolean {
|
|
871
|
-
return modelId.trim().toLowerCase().startsWith('openrouter/openai/gpt-5')
|
|
872
|
-
}
|
|
873
|
-
|
|
874
869
|
function shouldCloseInjectedReasoning(chunk: AiGatewayStreamPart): boolean {
|
|
875
870
|
switch (chunk.type) {
|
|
876
871
|
case 'stream-start':
|
|
@@ -1052,27 +1047,9 @@ function createAiGatewayLanguageModelMiddleware(
|
|
|
1052
1047
|
|
|
1053
1048
|
export function normalizeAiGatewayChatProviderOptions(
|
|
1054
1049
|
params: AiGatewayCallOptions,
|
|
1055
|
-
|
|
1050
|
+
_modelId?: string,
|
|
1056
1051
|
): AiGatewayCallOptions {
|
|
1057
|
-
|
|
1058
|
-
? ({ ...params.providerOptions } as AiGatewayProviderOptions)
|
|
1059
|
-
: ({} as AiGatewayProviderOptions)
|
|
1060
|
-
const openaiOptions = isRecord(providerOptions.openai)
|
|
1061
|
-
? { ...providerOptions.openai }
|
|
1062
|
-
: ({} as Record<string, unknown>)
|
|
1063
|
-
|
|
1064
|
-
if (modelId && isOpenRouterOpenAIReasoningModel(modelId) && openaiOptions.forceReasoning === undefined) {
|
|
1065
|
-
openaiOptions.forceReasoning = true
|
|
1066
|
-
}
|
|
1067
|
-
|
|
1068
|
-
if (providerOptions.openai === openaiOptions || Object.keys(openaiOptions).length === 0) {
|
|
1069
|
-
return params
|
|
1070
|
-
}
|
|
1071
|
-
|
|
1072
|
-
return {
|
|
1073
|
-
...params,
|
|
1074
|
-
providerOptions: { ...providerOptions, openai: openaiOptions as AiGatewayProviderOptions['openai'] },
|
|
1075
|
-
}
|
|
1052
|
+
return params
|
|
1076
1053
|
}
|
|
1077
1054
|
|
|
1078
1055
|
function withAiGatewayDevTools<TModel extends AiGatewayLanguageModel>(model: TModel): TModel {
|