@lota-sdk/core 0.1.17 → 0.1.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lota-sdk/core",
3
- "version": "0.1.17",
3
+ "version": "0.1.19",
4
4
  "type": "module",
5
5
  "main": "./src/index.ts",
6
6
  "types": "./src/index.ts",
@@ -32,7 +32,7 @@
32
32
  "@chat-adapter/slack": "^4.23.0",
33
33
  "@chat-adapter/state-ioredis": "^4.23.0",
34
34
  "@logtape/logtape": "^2.0.5",
35
- "@lota-sdk/shared": "0.1.17",
35
+ "@lota-sdk/shared": "0.1.19",
36
36
  "@mendable/firecrawl-js": "^4.17.0",
37
37
  "@surrealdb/node": "^3.0.3",
38
38
  "ai": "^6.0.137",
@@ -0,0 +1,431 @@
1
+ import { devToolsMiddleware } from '@ai-sdk/devtools'
2
+ import { createOpenAI } from '@ai-sdk/openai'
3
+ import { wrapLanguageModel } from 'ai'
4
+ import type { LanguageModelMiddleware } from 'ai'
5
+
6
+ import { getRuntimeConfig } from '../runtime/runtime-config'
7
+ import { isRecord, readString } from '../utils/string'
8
+ import { buildAiGatewayCacheHeaders } from './cache-headers'
9
+
10
+ type AiGatewayLanguageModel = Parameters<typeof wrapLanguageModel>[0]['model']
11
+ type AiGatewayExtraParams = Record<string, unknown>
12
+ type AiGatewayChatResponse = { body?: unknown }
13
+ type AiGatewayTransformParamsOptions = Parameters<NonNullable<LanguageModelMiddleware['transformParams']>>[0]
14
+ type WrapStreamOptions = Parameters<NonNullable<LanguageModelMiddleware['wrapStream']>>[0]
15
+ type AiGatewayCallOptions = WrapStreamOptions['params']
16
+ type AiGatewayGenerateResult = Awaited<ReturnType<WrapStreamOptions['doGenerate']>>
17
+ type AiGatewayStreamResult = Awaited<ReturnType<WrapStreamOptions['doStream']>>
18
+ type AiGatewayGeneratedContent = AiGatewayGenerateResult['content'][number]
19
+ type AiGatewayStreamPart = AiGatewayStreamResult['stream'] extends ReadableStream<infer T> ? T : never
20
+ type AiGatewayConfig = { apiKey: string; baseURL: string }
21
+ type AiGatewayProviderOptions = NonNullable<AiGatewayCallOptions['providerOptions']>
22
+
23
+ const EXPECTED_GATEWAY_KEY_PREFIX = 'sk-bf-'
24
+ const AI_GATEWAY_VIRTUAL_KEY_HEADER = 'x-bf-vk'
25
+ const AI_GATEWAY_EXTRA_PARAMS_HEADER = 'x-bf-passthrough-extra-params'
26
+ const DEFAULT_AI_GATEWAY_URL = 'https://ai-gateway.gobrainy.ai' as const
27
+ const OPENROUTER_RESPONSE_HEALING_EXTRA_PARAMS = {
28
+ plugins: [{ id: 'response-healing' }],
29
+ } as const satisfies AiGatewayExtraParams
30
+
31
+ function toAiGatewayCacheKeyPart(value: string): string {
32
+ const normalized = value
33
+ .trim()
34
+ .toLowerCase()
35
+ .replace(/[^a-z0-9:_-]+/g, '-')
36
+ .replace(/-+/g, '-')
37
+ return normalized.replace(/^-+|-+$/g, '') || 'request'
38
+ }
39
+
40
+ function mergeAiGatewayHeaders(
41
+ existingHeaders: AiGatewayCallOptions['headers'] | undefined,
42
+ additionalHeaders: Record<string, string>,
43
+ ): Record<string, string> {
44
+ const merged = new Headers(existingHeaders as HeadersInit | undefined)
45
+ for (const [key, value] of Object.entries(additionalHeaders)) {
46
+ if (!merged.has(key)) {
47
+ merged.set(key, value)
48
+ }
49
+ }
50
+ return Object.fromEntries(merged.entries())
51
+ }
52
+
53
+ function withDefaultAiGatewayCacheHeaders(params: AiGatewayCallOptions, modelId: string): AiGatewayCallOptions {
54
+ return {
55
+ ...params,
56
+ headers: mergeAiGatewayHeaders(
57
+ params.headers,
58
+ buildAiGatewayCacheHeaders(`model:${toAiGatewayCacheKeyPart(modelId)}`),
59
+ ),
60
+ }
61
+ }
62
+
63
+ function normalizeAiGatewayUrl(value: string): string {
64
+ const trimmed = value.trim()
65
+ if (!trimmed) {
66
+ throw new Error('[ai-gateway] AI gateway URL is required.')
67
+ }
68
+
69
+ const normalized = trimmed.replace(/\/+$/, '')
70
+ return normalized.endsWith('/v1') ? normalized : `${normalized}/v1`
71
+ }
72
+
73
+ function readDirectEnvAiGatewayConfig(): AiGatewayConfig {
74
+ const apiKey = (process.env.AI_GATEWAY_KEY ?? '').trim()
75
+ if (!apiKey) {
76
+ throw new Error(
77
+ '[ai-gateway] Missing AI gateway key. Set AI_GATEWAY_KEY, or configure createLotaRuntime({ aiGateway: { key } }).',
78
+ )
79
+ }
80
+
81
+ return { apiKey, baseURL: normalizeAiGatewayUrl(process.env.AI_GATEWAY_URL?.trim() || DEFAULT_AI_GATEWAY_URL) }
82
+ }
83
+
84
+ function readAiGatewayConfig(): AiGatewayConfig {
85
+ try {
86
+ const { aiGateway } = getRuntimeConfig()
87
+ return { apiKey: aiGateway.key.trim(), baseURL: normalizeAiGatewayUrl(aiGateway.url) }
88
+ } catch {
89
+ return readDirectEnvAiGatewayConfig()
90
+ }
91
+ }
92
+
93
+ function readReasoningDetailsText(value: unknown): string | null {
94
+ if (!Array.isArray(value)) return null
95
+
96
+ const textParts = value
97
+ .map((item) => (isRecord(item) ? readString(item.text) : null))
98
+ .filter((item): item is string => item !== null)
99
+
100
+ if (textParts.length === 0) return null
101
+
102
+ return textParts.join('\n\n')
103
+ }
104
+
105
+ function readReasoningDeltaText(value: unknown): string | null {
106
+ return typeof value === 'string' && value.length > 0 ? value : null
107
+ }
108
+
109
+ function readAiGatewayChatReasoningText(message: Record<string, unknown>): string | null {
110
+ return (
111
+ readString(message.reasoning) ??
112
+ readString(message.reasoning_content) ??
113
+ readReasoningDetailsText(message.reasoning_details)
114
+ )
115
+ }
116
+
117
+ export function extractAiGatewayChatReasoningText(responseBody: unknown): string | null {
118
+ if (!isRecord(responseBody) || !Array.isArray(responseBody.choices)) return null
119
+
120
+ for (const choice of responseBody.choices) {
121
+ if (!isRecord(choice) || !isRecord(choice.message)) continue
122
+
123
+ const reasoningText = readAiGatewayChatReasoningText(choice.message)
124
+ if (reasoningText) return reasoningText
125
+ }
126
+
127
+ return null
128
+ }
129
+
130
+ export function extractAiGatewayChatReasoningDeltaText(rawChunk: unknown): string | null {
131
+ if (!isRecord(rawChunk) || !Array.isArray(rawChunk.choices)) return null
132
+
133
+ for (const choice of rawChunk.choices) {
134
+ if (!isRecord(choice) || !isRecord(choice.delta)) continue
135
+
136
+ const reasoningText =
137
+ readReasoningDeltaText(choice.delta.reasoning) ??
138
+ readReasoningDeltaText(choice.delta.reasoning_content) ??
139
+ readReasoningDetailsText(choice.delta.reasoning_details)
140
+ if (reasoningText) return reasoningText
141
+ }
142
+
143
+ return null
144
+ }
145
+
146
+ type AiGatewayResponsesReasoningDelta = { id: string; delta: string; itemId: string }
147
+
148
+ export function extractAiGatewayResponsesReasoningDelta(rawChunk: unknown): AiGatewayResponsesReasoningDelta | null {
149
+ if (!isRecord(rawChunk) || rawChunk.type !== 'response.reasoning_summary_text.delta') return null
150
+ if ('summary_index' in rawChunk) return null
151
+
152
+ const itemId = readString(rawChunk.item_id)
153
+ const delta = readReasoningDeltaText(rawChunk.delta)
154
+ if (!itemId || !delta) return null
155
+
156
+ return { id: `${itemId}:0`, delta, itemId }
157
+ }
158
+
159
+ export function injectAiGatewayChatReasoningContent(
160
+ content: readonly AiGatewayGeneratedContent[],
161
+ response?: AiGatewayChatResponse,
162
+ ): AiGatewayGeneratedContent[] {
163
+ if (content.some((part) => part.type === 'reasoning')) {
164
+ return [...content]
165
+ }
166
+
167
+ const reasoningText = extractAiGatewayChatReasoningText(response?.body)
168
+ if (!reasoningText) return [...content]
169
+
170
+ return [{ type: 'reasoning', text: reasoningText }, ...content]
171
+ }
172
+
173
+ function isReasoningEnabled(params: AiGatewayCallOptions): boolean {
174
+ if (!isRecord(params.providerOptions) || !isRecord(params.providerOptions.openai)) return false
175
+
176
+ const openaiOptions = params.providerOptions.openai
177
+ if (openaiOptions.forceReasoning === true) return true
178
+ if (typeof openaiOptions.reasoningSummary === 'string' && openaiOptions.reasoningSummary.length > 0) return true
179
+ return typeof openaiOptions.reasoningEffort === 'string' && openaiOptions.reasoningEffort !== 'none'
180
+ }
181
+
182
+ function shouldCloseInjectedReasoning(chunk: AiGatewayStreamPart): boolean {
183
+ return chunk.type !== 'stream-start' && chunk.type !== 'response-metadata' && chunk.type !== 'raw'
184
+ }
185
+
186
+ export function injectAiGatewayChatReasoningStream(
187
+ stream: ReadableStream<AiGatewayStreamPart>,
188
+ ): ReadableStream<AiGatewayStreamPart> {
189
+ const reasoningId = 'ai-gateway-reasoning-0'
190
+ let reasoningOpen = false
191
+ let reasoningClosed = false
192
+
193
+ return stream.pipeThrough(
194
+ new TransformStream<AiGatewayStreamPart, AiGatewayStreamPart>({
195
+ transform(chunk, controller) {
196
+ const closeReasoning = () => {
197
+ if (!reasoningOpen || reasoningClosed) return
198
+
199
+ controller.enqueue({ type: 'reasoning-end', id: reasoningId } satisfies AiGatewayStreamPart)
200
+ reasoningClosed = true
201
+ }
202
+
203
+ if (chunk.type === 'raw') {
204
+ const reasoningDelta = reasoningClosed ? null : extractAiGatewayChatReasoningDeltaText(chunk.rawValue)
205
+ controller.enqueue(chunk)
206
+
207
+ if (reasoningDelta) {
208
+ if (!reasoningOpen) {
209
+ controller.enqueue({ type: 'reasoning-start', id: reasoningId } satisfies AiGatewayStreamPart)
210
+ reasoningOpen = true
211
+ }
212
+
213
+ controller.enqueue({
214
+ type: 'reasoning-delta',
215
+ id: reasoningId,
216
+ delta: reasoningDelta,
217
+ } satisfies AiGatewayStreamPart)
218
+ }
219
+ return
220
+ }
221
+
222
+ if (shouldCloseInjectedReasoning(chunk)) {
223
+ closeReasoning()
224
+ }
225
+
226
+ controller.enqueue(chunk)
227
+ },
228
+ flush(controller) {
229
+ if (!reasoningOpen || reasoningClosed) return
230
+ controller.enqueue({ type: 'reasoning-end', id: reasoningId } satisfies AiGatewayStreamPart)
231
+ },
232
+ }),
233
+ )
234
+ }
235
+
236
+ export function injectAiGatewayResponsesReasoningStream(
237
+ stream: ReadableStream<AiGatewayStreamPart>,
238
+ ): ReadableStream<AiGatewayStreamPart> {
239
+ return stream.pipeThrough(
240
+ new TransformStream<AiGatewayStreamPart, AiGatewayStreamPart>({
241
+ transform(chunk, controller) {
242
+ controller.enqueue(chunk)
243
+
244
+ if (chunk.type !== 'raw') return
245
+
246
+ const reasoningDelta = extractAiGatewayResponsesReasoningDelta(chunk.rawValue)
247
+ if (!reasoningDelta) return
248
+
249
+ controller.enqueue({
250
+ type: 'reasoning-delta',
251
+ id: reasoningDelta.id,
252
+ delta: reasoningDelta.delta,
253
+ providerMetadata: { openai: { itemId: reasoningDelta.itemId } },
254
+ } satisfies AiGatewayStreamPart)
255
+ },
256
+ }),
257
+ )
258
+ }
259
+
260
+ function addAiGatewayReasoningRawChunks(
261
+ params: AiGatewayCallOptions,
262
+ type: AiGatewayTransformParamsOptions['type'],
263
+ ): AiGatewayCallOptions {
264
+ if (type !== 'stream' || !isReasoningEnabled(params) || params.includeRawChunks === true) {
265
+ return params
266
+ }
267
+
268
+ return { ...params, includeRawChunks: true }
269
+ }
270
+
271
+ export function normalizeAiGatewayChatProviderOptions(params: AiGatewayCallOptions): AiGatewayCallOptions {
272
+ const providerOptions = isRecord(params.providerOptions)
273
+ ? ({ ...params.providerOptions } as AiGatewayProviderOptions)
274
+ : ({} as AiGatewayProviderOptions)
275
+ const openaiOptions = isRecord(providerOptions.openai)
276
+ ? { ...providerOptions.openai }
277
+ : ({} as Record<string, unknown>)
278
+
279
+ if (openaiOptions.systemMessageMode === 'system') {
280
+ return params
281
+ }
282
+
283
+ return {
284
+ ...params,
285
+ providerOptions: {
286
+ ...providerOptions,
287
+ openai: {
288
+ ...openaiOptions,
289
+ ...(openaiOptions.systemMessageMode === 'remove' ? {} : { systemMessageMode: 'system' }),
290
+ },
291
+ },
292
+ }
293
+ }
294
+
295
+ export function injectAiGatewayExtraParamsRequestBody(
296
+ body: BodyInit | null | undefined,
297
+ extraParams: AiGatewayExtraParams,
298
+ ): BodyInit | null | undefined {
299
+ if (typeof body !== 'string') return body
300
+
301
+ let parsed: unknown
302
+ try {
303
+ parsed = JSON.parse(body)
304
+ } catch {
305
+ return body
306
+ }
307
+
308
+ if (!isRecord(parsed)) return body
309
+
310
+ const mergedExtraParams = isRecord(parsed.extra_params)
311
+ ? { ...parsed.extra_params, ...extraParams }
312
+ : { ...extraParams }
313
+
314
+ return JSON.stringify({ ...parsed, extra_params: mergedExtraParams })
315
+ }
316
+
317
+ function createAiGatewayFetchWithExtraParams(extraParams: AiGatewayExtraParams): typeof fetch {
318
+ const fetchWithExtraParams = (input: RequestInfo | URL, init?: RequestInit | BunFetchRequestInit) =>
319
+ globalThis.fetch(input, { ...init, body: injectAiGatewayExtraParamsRequestBody(init?.body, extraParams) })
320
+
321
+ return Object.assign(fetchWithExtraParams, { preconnect: globalThis.fetch.preconnect.bind(globalThis.fetch) })
322
+ }
323
+
324
+ function createAiGatewayProvider(extraParams?: AiGatewayExtraParams) {
325
+ const { apiKey, baseURL } = readAiGatewayConfig()
326
+ if (!apiKey.startsWith(EXPECTED_GATEWAY_KEY_PREFIX)) {
327
+ throw new Error(`[ai-gateway] Gateway keys must use the ${EXPECTED_GATEWAY_KEY_PREFIX}* format.`)
328
+ }
329
+
330
+ return createOpenAI({
331
+ baseURL,
332
+ apiKey,
333
+ headers: {
334
+ [AI_GATEWAY_VIRTUAL_KEY_HEADER]: apiKey,
335
+ ...(extraParams ? { [AI_GATEWAY_EXTRA_PARAMS_HEADER]: 'true' } : {}),
336
+ },
337
+ ...(extraParams ? { fetch: createAiGatewayFetchWithExtraParams(extraParams) } : {}),
338
+ })
339
+ }
340
+
341
+ function withAiGatewayDevTools<TModel extends AiGatewayLanguageModel>(model: TModel): TModel {
342
+ return wrapLanguageModel({ model, middleware: devToolsMiddleware() }) as TModel
343
+ }
344
+
345
+ let provider: ReturnType<typeof createOpenAI> | null = null
346
+ let openRouterResponseHealingProvider: ReturnType<typeof createOpenAI> | null = null
347
+
348
+ export function getAiGatewayProvider() {
349
+ if (provider) return provider
350
+
351
+ provider = createAiGatewayProvider()
352
+
353
+ return provider
354
+ }
355
+
356
+ export function getAiGatewayOpenRouterResponseHealingProvider() {
357
+ if (openRouterResponseHealingProvider) return openRouterResponseHealingProvider
358
+
359
+ openRouterResponseHealingProvider = createAiGatewayProvider(OPENROUTER_RESPONSE_HEALING_EXTRA_PARAMS)
360
+
361
+ return openRouterResponseHealingProvider
362
+ }
363
+
364
+ export function aiGatewayModel(modelId: string) {
365
+ return withAiGatewayDevTools(
366
+ wrapLanguageModel({
367
+ model: getAiGatewayProvider()(modelId),
368
+ middleware: {
369
+ specificationVersion: 'v3',
370
+ transformParams: async ({ params, type }) =>
371
+ withDefaultAiGatewayCacheHeaders(addAiGatewayReasoningRawChunks(params, type), modelId),
372
+ wrapStream: async ({ doStream, params }) => {
373
+ const result = await doStream()
374
+ if (!isReasoningEnabled(params)) return result
375
+
376
+ return { ...result, stream: injectAiGatewayResponsesReasoningStream(result.stream) }
377
+ },
378
+ },
379
+ }),
380
+ )
381
+ }
382
+
383
+ export function aiGatewayOpenRouterResponseHealingModel(modelId: string) {
384
+ return withAiGatewayDevTools(
385
+ wrapLanguageModel({
386
+ model: getAiGatewayOpenRouterResponseHealingProvider()(modelId),
387
+ middleware: {
388
+ specificationVersion: 'v3',
389
+ transformParams: async ({ params }) => withDefaultAiGatewayCacheHeaders(params, modelId),
390
+ },
391
+ }),
392
+ )
393
+ }
394
+
395
+ export function aiGatewayChatModel(modelId: string) {
396
+ return withAiGatewayDevTools(
397
+ wrapLanguageModel({
398
+ model: getAiGatewayProvider().chat(modelId),
399
+ middleware: {
400
+ specificationVersion: 'v3',
401
+ transformParams: async ({ params, type }) =>
402
+ normalizeAiGatewayChatProviderOptions(
403
+ withDefaultAiGatewayCacheHeaders(addAiGatewayReasoningRawChunks(params, type), modelId),
404
+ ),
405
+ wrapGenerate: async ({ doGenerate }) => {
406
+ const result = await doGenerate()
407
+
408
+ return {
409
+ ...result,
410
+ content: injectAiGatewayChatReasoningContent(
411
+ result.content,
412
+ result.response as AiGatewayChatResponse | undefined,
413
+ ),
414
+ }
415
+ },
416
+ wrapStream: async ({ doStream, params }) => {
417
+ const result = await doStream()
418
+ if (!isReasoningEnabled(params)) return result
419
+
420
+ return { ...result, stream: injectAiGatewayChatReasoningStream(result.stream) }
421
+ },
422
+ },
423
+ }),
424
+ )
425
+ }
426
+
427
+ export function aiGatewayEmbeddingModel(modelId: string) {
428
+ return getAiGatewayProvider().embeddingModel(modelId)
429
+ }
430
+
431
+ export { DEFAULT_AI_GATEWAY_URL, normalizeAiGatewayUrl }
@@ -0,0 +1,33 @@
1
+ const AI_GATEWAY_CACHE_KEY_HEADER = 'x-bf-cache-key'
2
+ const AI_GATEWAY_CACHE_TTL_HEADER = 'x-bf-cache-ttl'
3
+ const AI_GATEWAY_CACHE_THRESHOLD_HEADER = 'x-bf-cache-threshold'
4
+ const AI_GATEWAY_CACHE_TYPE_HEADER = 'x-bf-cache-type'
5
+
6
+ export const AI_GATEWAY_STRICT_SEMANTIC_CACHE_THRESHOLD = 0.975
7
+
8
+ export type AiGatewayCacheType = 'direct' | 'semantic'
9
+
10
+ export function buildAiGatewayCacheHeaders(
11
+ cacheKey: string,
12
+ ttl?: string,
13
+ threshold?: number,
14
+ cacheType?: AiGatewayCacheType,
15
+ ): Record<string, string> {
16
+ const headers: Record<string, string> = { [AI_GATEWAY_CACHE_KEY_HEADER]: cacheKey }
17
+ if (ttl) headers[AI_GATEWAY_CACHE_TTL_HEADER] = ttl
18
+ if (typeof threshold === 'number') headers[AI_GATEWAY_CACHE_THRESHOLD_HEADER] = String(threshold)
19
+ if (cacheType) headers[AI_GATEWAY_CACHE_TYPE_HEADER] = cacheType
20
+ return headers
21
+ }
22
+
23
+ export function buildAiGatewayDirectCacheHeaders(cacheKey: string, ttl?: string): Record<string, string> {
24
+ return buildAiGatewayCacheHeaders(cacheKey, ttl, undefined, 'direct')
25
+ }
26
+
27
+ export function buildAiGatewayStrictSemanticCacheHeaders(
28
+ cacheKey: string,
29
+ ttl?: string,
30
+ threshold = AI_GATEWAY_STRICT_SEMANTIC_CACHE_THRESHOLD,
31
+ ): Record<string, string> {
32
+ return buildAiGatewayCacheHeaders(cacheKey, ttl, threshold, 'semantic')
33
+ }
@@ -1,2 +1,2 @@
1
- export * from './bifrost'
1
+ export * from './ai-gateway'
2
2
  export * from './cache-headers'
@@ -1,5 +1,5 @@
1
1
  export {
2
- BIFROST_REASONING_SUMMARY_LEVEL,
2
+ AI_GATEWAY_REASONING_SUMMARY_LEVEL,
3
3
  OPENAI_HIGH_REASONING_PROVIDER_OPTIONS,
4
4
  OPENAI_REASONING_MODEL_ID,
5
5
  OPENROUTER_DELEGATED_REASONING_MODEL_ID,
@@ -1,7 +1,7 @@
1
1
  import { embed, embedMany } from 'ai'
2
2
 
3
+ import { aiGatewayEmbeddingModel } from '../ai-gateway/ai-gateway'
3
4
  import { getEmbeddingCache } from '../ai/embedding-cache'
4
- import { bifrostEmbeddingModel } from '../bifrost/bifrost'
5
5
  import { getRuntimeConfig } from '../runtime/runtime-config'
6
6
 
7
7
  const SUPPORTED_EMBEDDING_PREFIXES = ['openai/', 'openrouter/'] as const
@@ -30,7 +30,7 @@ function resolveEmbeddingModel(modelId: string) {
30
30
  )
31
31
  }
32
32
 
33
- return bifrostEmbeddingModel(normalized)
33
+ return aiGatewayEmbeddingModel(normalized)
34
34
  }
35
35
 
36
36
  function normalizeEmbedding(embedding: readonly number[]): number[] {
package/src/index.ts CHANGED
@@ -1,6 +1,6 @@
1
1
  export * from './create-runtime'
2
2
  export * from './ai'
3
- export * from './bifrost'
3
+ export * from './ai-gateway'
4
4
  export * from './config'
5
5
  export * from './db'
6
6
  export * from './document'
@@ -1,6 +1,10 @@
1
1
  export class ChatRunRegistry {
2
2
  private controllers = new Map<string, AbortController>()
3
3
 
4
+ has(runId: string): boolean {
5
+ return this.controllers.has(runId)
6
+ }
7
+
4
8
  register(runId: string, controller: AbortController): void {
5
9
  this.controllers.set(runId, controller)
6
10
  }