@lota-sdk/core 0.4.37 → 0.4.39

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lota-sdk/core",
3
- "version": "0.4.37",
3
+ "version": "0.4.39",
4
4
  "files": [
5
5
  "src",
6
6
  "infrastructure/schema"
@@ -32,7 +32,7 @@
32
32
  "@ai-sdk/provider": "^3.0.9",
33
33
  "@chat-adapter/slack": "^4.26.0",
34
34
  "@chat-adapter/state-ioredis": "^4.26.0",
35
- "@lota-sdk/shared": "0.4.37",
35
+ "@lota-sdk/shared": "0.4.39",
36
36
  "@mendable/firecrawl-js": "^4.20.0",
37
37
  "@surrealdb/node": "^3.0.3",
38
38
  "ai": "^6.0.170",
@@ -8,6 +8,7 @@ import { Cause, Clock, Context, Duration, Effect, Fiber, Layer, Semaphore } from
8
8
  import { DEFAULT_AI_GATEWAY_URL } from '../config/constants'
9
9
  import { ERROR_TAGS, AiGenerationError, ConfigurationError } from '../effect/errors'
10
10
  import { RuntimeConfigServiceTag } from '../effect/services'
11
+ import { openRouterEmbeddingModel } from '../embeddings/openrouter'
11
12
  import { isRecord, readString } from '../utils/string'
12
13
  import { buildAiGatewayCacheHeaders } from './cache-headers'
13
14
 
@@ -1319,7 +1320,7 @@ export function aiGatewayEmbeddingModel(modelId: string, deps?: AiGatewayDeps) {
1319
1320
  export type AiGatewayModels = {
1320
1321
  model(modelId: string): ReturnType<typeof aiGatewayModel>
1321
1322
  chatModel(modelId: string): ReturnType<typeof aiGatewayChatModel>
1322
- embeddingModel(modelId: string): ReturnType<typeof aiGatewayEmbeddingModel>
1323
+ embeddingModel(modelId: string): ReturnType<typeof openRouterEmbeddingModel>
1323
1324
  openRouterResponseHealingModel(modelId: string): ReturnType<typeof aiGatewayOpenRouterResponseHealingModel>
1324
1325
  }
1325
1326
 
@@ -1327,7 +1328,7 @@ export function createAiGatewayModels(deps: AiGatewayDeps): AiGatewayModels {
1327
1328
  return {
1328
1329
  model: (modelId: string) => aiGatewayModel(modelId, deps),
1329
1330
  chatModel: (modelId: string) => aiGatewayChatModel(modelId, deps),
1330
- embeddingModel: (modelId: string) => aiGatewayEmbeddingModel(modelId, deps),
1331
+ embeddingModel: (modelId: string) => openRouterEmbeddingModel(modelId),
1331
1332
  openRouterResponseHealingModel: (modelId: string) => aiGatewayOpenRouterResponseHealingModel(modelId, deps),
1332
1333
  }
1333
1334
  }
@@ -0,0 +1,46 @@
1
+ import { createOpenAI } from '@ai-sdk/openai'
2
+
3
+ import { ConfigurationError } from '../effect/errors'
4
+
5
+ const OPENROUTER_BASE_URL = 'https://openrouter.ai/api/v1'
6
+ const OPENROUTER_MODEL_PREFIX = 'openrouter/' as const
7
+ const OPENAI_TEXT_EMBEDDING_3_SMALL_MODEL_ID = 'openai/text-embedding-3-small'
8
+ const OPENROUTER_API_KEY_ENV = 'OPENROUTER_API_KEY'
9
+
10
+ type OpenRouterProvider = ReturnType<typeof createOpenAI>
11
+
12
+ let cachedProvider: { apiKey: string; provider: OpenRouterProvider } | null = null
13
+
14
+ function normalizeOpenRouterEmbeddingModelId(modelId: string): string {
15
+ const normalized = modelId.trim()
16
+ if (normalized === 'text-embedding-3-small') return OPENAI_TEXT_EMBEDDING_3_SMALL_MODEL_ID
17
+ return normalized.startsWith(OPENROUTER_MODEL_PREFIX) ? normalized.slice(OPENROUTER_MODEL_PREFIX.length) : normalized
18
+ }
19
+
20
+ function readOpenRouterApiKey(env: Record<string, string | undefined> = process.env): string {
21
+ const apiKey = env[OPENROUTER_API_KEY_ENV]?.trim()
22
+ if (!apiKey) {
23
+ throw new ConfigurationError({
24
+ message: `[embeddings-provider] ${OPENROUTER_API_KEY_ENV} is required for direct OpenRouter embeddings.`,
25
+ key: OPENROUTER_API_KEY_ENV,
26
+ })
27
+ }
28
+ return apiKey
29
+ }
30
+
31
+ function getOpenRouterProvider(apiKey: string): OpenRouterProvider {
32
+ if (cachedProvider?.apiKey === apiKey) return cachedProvider.provider
33
+
34
+ const provider = createOpenAI({ apiKey, baseURL: OPENROUTER_BASE_URL })
35
+ cachedProvider = { apiKey, provider }
36
+ return provider
37
+ }
38
+
39
+ export function openRouterEmbeddingModel(modelId: string) {
40
+ const normalizedModelId = normalizeOpenRouterEmbeddingModelId(modelId)
41
+ if (!normalizedModelId) {
42
+ throw new ConfigurationError({ message: '[embeddings-provider] Model id is required.', key: 'embeddingModelId' })
43
+ }
44
+
45
+ return getOpenRouterProvider(readOpenRouterApiKey()).embeddingModel(normalizedModelId)
46
+ }
@@ -1,10 +1,11 @@
1
1
  import { embed, embedMany } from 'ai'
2
2
  import { Schema, Effect } from 'effect'
3
3
 
4
- import { aiGatewayEmbeddingModel } from '../ai-gateway/ai-gateway'
5
4
  import { ERROR_TAGS, ConfigurationError } from '../effect/errors'
5
+ import { openRouterEmbeddingModel } from './openrouter'
6
6
 
7
7
  const SUPPORTED_EMBEDDING_PREFIXES = ['openai/', 'openrouter/'] as const
8
+ const SUPPORTED_BARE_EMBEDDING_MODEL_IDS = ['text-embedding-3-small'] as const
8
9
 
9
10
  type SharedEmbeddingCache = {
10
11
  get(model: string, text: string): Promise<number[] | null>
@@ -30,14 +31,18 @@ function resolveEmbeddingModel(modelId: string) {
30
31
  throw new ConfigurationError({ message: '[embeddings-provider] Model id is required.', key: 'embeddingModelId' })
31
32
  }
32
33
 
34
+ if (SUPPORTED_BARE_EMBEDDING_MODEL_IDS.includes(normalized as (typeof SUPPORTED_BARE_EMBEDDING_MODEL_IDS)[number])) {
35
+ return openRouterEmbeddingModel(`openai/${normalized}`)
36
+ }
37
+
33
38
  if (!SUPPORTED_EMBEDDING_PREFIXES.some((prefix) => normalized.startsWith(prefix))) {
34
39
  throw new ConfigurationError({
35
- message: `[embeddings-provider] Unsupported model id "${modelId}". Use one of: ${SUPPORTED_EMBEDDING_PREFIXES.join(', ')}*.`,
40
+ message: `[embeddings-provider] Unsupported model id "${modelId}". Use one of: ${SUPPORTED_EMBEDDING_PREFIXES.join(', ')}* or ${SUPPORTED_BARE_EMBEDDING_MODEL_IDS.join(', ')}.`,
36
41
  key: 'embeddingModelId',
37
42
  })
38
43
  }
39
44
 
40
- return aiGatewayEmbeddingModel(normalized)
45
+ return openRouterEmbeddingModel(normalized)
41
46
  }
42
47
 
43
48
  function normalizeEmbedding(embedding: readonly number[]): number[] {
@@ -106,7 +111,10 @@ export class ProviderEmbeddings {
106
111
 
107
112
  const promise = this.runPromise(this.executeEmbedQueryEffect(input))
108
113
  this.inflightEmbeddings.set(dedupKey, promise)
109
- void promise.finally(() => this.inflightEmbeddings.delete(dedupKey))
114
+ void promise.then(
115
+ () => this.inflightEmbeddings.delete(dedupKey),
116
+ () => this.inflightEmbeddings.delete(dedupKey),
117
+ )
110
118
 
111
119
  return promise
112
120
  }
package/src/index.ts CHANGED
@@ -4,6 +4,7 @@ export * from './ai-gateway'
4
4
  export * from './config'
5
5
  export * from './db'
6
6
  export * from './document'
7
+ export * from './embeddings/openrouter'
7
8
  export * from './queues'
8
9
  export * from './redis'
9
10
  export * from './runtime'
@@ -291,6 +291,7 @@ export const LOTA_RUNTIME_ENV_KEYS = Object.freeze([
291
291
  'REDIS_URL',
292
292
  'AI_GATEWAY_URL',
293
293
  'AI_GATEWAY_KEY',
294
+ 'OPENROUTER_API_KEY',
294
295
  'AI_EMBEDDING_MODEL',
295
296
  'AI_GATEWAY_MAX_CONCURRENCY',
296
297
  'S3_ENDPOINT',