illuma-agents 1.0.2 → 1.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (225) hide show
  1. package/LICENSE +25 -21
  2. package/dist/cjs/agents/AgentContext.cjs +222 -0
  3. package/dist/cjs/agents/AgentContext.cjs.map +1 -0
  4. package/dist/cjs/common/enum.cjs +5 -4
  5. package/dist/cjs/common/enum.cjs.map +1 -1
  6. package/dist/cjs/events.cjs +7 -5
  7. package/dist/cjs/events.cjs.map +1 -1
  8. package/dist/cjs/graphs/Graph.cjs +328 -207
  9. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  10. package/dist/cjs/graphs/MultiAgentGraph.cjs +507 -0
  11. package/dist/cjs/graphs/MultiAgentGraph.cjs.map +1 -0
  12. package/dist/cjs/llm/anthropic/index.cjs.map +1 -1
  13. package/dist/cjs/llm/google/index.cjs.map +1 -1
  14. package/dist/cjs/llm/ollama/index.cjs.map +1 -1
  15. package/dist/cjs/llm/openai/index.cjs +35 -0
  16. package/dist/cjs/llm/openai/index.cjs.map +1 -1
  17. package/dist/cjs/llm/openai/utils/index.cjs +3 -1
  18. package/dist/cjs/llm/openai/utils/index.cjs.map +1 -1
  19. package/dist/cjs/llm/openrouter/index.cjs.map +1 -1
  20. package/dist/cjs/llm/providers.cjs +0 -2
  21. package/dist/cjs/llm/providers.cjs.map +1 -1
  22. package/dist/cjs/llm/vertexai/index.cjs.map +1 -1
  23. package/dist/cjs/main.cjs +12 -1
  24. package/dist/cjs/main.cjs.map +1 -1
  25. package/dist/cjs/messages/cache.cjs +123 -0
  26. package/dist/cjs/messages/cache.cjs.map +1 -0
  27. package/dist/cjs/messages/content.cjs +53 -0
  28. package/dist/cjs/messages/content.cjs.map +1 -0
  29. package/dist/cjs/messages/format.cjs +17 -29
  30. package/dist/cjs/messages/format.cjs.map +1 -1
  31. package/dist/cjs/run.cjs +119 -74
  32. package/dist/cjs/run.cjs.map +1 -1
  33. package/dist/cjs/stream.cjs +77 -73
  34. package/dist/cjs/stream.cjs.map +1 -1
  35. package/dist/cjs/tools/Calculator.cjs +45 -0
  36. package/dist/cjs/tools/Calculator.cjs.map +1 -0
  37. package/dist/cjs/tools/CodeExecutor.cjs +22 -22
  38. package/dist/cjs/tools/CodeExecutor.cjs.map +1 -1
  39. package/dist/cjs/tools/ToolNode.cjs +5 -3
  40. package/dist/cjs/tools/ToolNode.cjs.map +1 -1
  41. package/dist/cjs/tools/handlers.cjs +20 -20
  42. package/dist/cjs/tools/handlers.cjs.map +1 -1
  43. package/dist/cjs/utils/events.cjs +31 -0
  44. package/dist/cjs/utils/events.cjs.map +1 -0
  45. package/dist/cjs/utils/handlers.cjs +70 -0
  46. package/dist/cjs/utils/handlers.cjs.map +1 -0
  47. package/dist/cjs/utils/tokens.cjs +54 -7
  48. package/dist/cjs/utils/tokens.cjs.map +1 -1
  49. package/dist/esm/agents/AgentContext.mjs +220 -0
  50. package/dist/esm/agents/AgentContext.mjs.map +1 -0
  51. package/dist/esm/common/enum.mjs +5 -4
  52. package/dist/esm/common/enum.mjs.map +1 -1
  53. package/dist/esm/events.mjs +7 -5
  54. package/dist/esm/events.mjs.map +1 -1
  55. package/dist/esm/graphs/Graph.mjs +330 -209
  56. package/dist/esm/graphs/Graph.mjs.map +1 -1
  57. package/dist/esm/graphs/MultiAgentGraph.mjs +505 -0
  58. package/dist/esm/graphs/MultiAgentGraph.mjs.map +1 -0
  59. package/dist/esm/llm/anthropic/index.mjs.map +1 -1
  60. package/dist/esm/llm/google/index.mjs.map +1 -1
  61. package/dist/esm/llm/ollama/index.mjs.map +1 -1
  62. package/dist/esm/llm/openai/index.mjs +35 -0
  63. package/dist/esm/llm/openai/index.mjs.map +1 -1
  64. package/dist/esm/llm/openai/utils/index.mjs +3 -1
  65. package/dist/esm/llm/openai/utils/index.mjs.map +1 -1
  66. package/dist/esm/llm/openrouter/index.mjs.map +1 -1
  67. package/dist/esm/llm/providers.mjs +0 -2
  68. package/dist/esm/llm/providers.mjs.map +1 -1
  69. package/dist/esm/llm/vertexai/index.mjs.map +1 -1
  70. package/dist/esm/main.mjs +7 -2
  71. package/dist/esm/main.mjs.map +1 -1
  72. package/dist/esm/messages/cache.mjs +120 -0
  73. package/dist/esm/messages/cache.mjs.map +1 -0
  74. package/dist/esm/messages/content.mjs +51 -0
  75. package/dist/esm/messages/content.mjs.map +1 -0
  76. package/dist/esm/messages/format.mjs +18 -29
  77. package/dist/esm/messages/format.mjs.map +1 -1
  78. package/dist/esm/run.mjs +119 -74
  79. package/dist/esm/run.mjs.map +1 -1
  80. package/dist/esm/stream.mjs +77 -73
  81. package/dist/esm/stream.mjs.map +1 -1
  82. package/dist/esm/tools/Calculator.mjs +24 -0
  83. package/dist/esm/tools/Calculator.mjs.map +1 -0
  84. package/dist/esm/tools/CodeExecutor.mjs +22 -22
  85. package/dist/esm/tools/CodeExecutor.mjs.map +1 -1
  86. package/dist/esm/tools/ToolNode.mjs +5 -3
  87. package/dist/esm/tools/ToolNode.mjs.map +1 -1
  88. package/dist/esm/tools/handlers.mjs +20 -20
  89. package/dist/esm/tools/handlers.mjs.map +1 -1
  90. package/dist/esm/utils/events.mjs +29 -0
  91. package/dist/esm/utils/events.mjs.map +1 -0
  92. package/dist/esm/utils/handlers.mjs +68 -0
  93. package/dist/esm/utils/handlers.mjs.map +1 -0
  94. package/dist/esm/utils/tokens.mjs +54 -8
  95. package/dist/esm/utils/tokens.mjs.map +1 -1
  96. package/dist/types/agents/AgentContext.d.ts +94 -0
  97. package/dist/types/common/enum.d.ts +7 -5
  98. package/dist/types/events.d.ts +3 -3
  99. package/dist/types/graphs/Graph.d.ts +60 -66
  100. package/dist/types/graphs/MultiAgentGraph.d.ts +47 -0
  101. package/dist/types/graphs/index.d.ts +1 -0
  102. package/dist/types/index.d.ts +1 -0
  103. package/dist/types/llm/openai/index.d.ts +10 -0
  104. package/dist/types/messages/cache.d.ts +20 -0
  105. package/dist/types/messages/content.d.ts +7 -0
  106. package/dist/types/messages/format.d.ts +1 -7
  107. package/dist/types/messages/index.d.ts +2 -0
  108. package/dist/types/messages/reducer.d.ts +9 -0
  109. package/dist/types/run.d.ts +16 -10
  110. package/dist/types/stream.d.ts +4 -3
  111. package/dist/types/tools/Calculator.d.ts +8 -0
  112. package/dist/types/tools/ToolNode.d.ts +1 -1
  113. package/dist/types/tools/handlers.d.ts +9 -7
  114. package/dist/types/tools/search/tool.d.ts +4 -4
  115. package/dist/types/types/graph.d.ts +124 -11
  116. package/dist/types/types/llm.d.ts +13 -9
  117. package/dist/types/types/messages.d.ts +4 -0
  118. package/dist/types/types/run.d.ts +46 -8
  119. package/dist/types/types/stream.d.ts +3 -2
  120. package/dist/types/utils/events.d.ts +6 -0
  121. package/dist/types/utils/handlers.d.ts +34 -0
  122. package/dist/types/utils/index.d.ts +1 -0
  123. package/dist/types/utils/tokens.d.ts +24 -0
  124. package/package.json +162 -145
  125. package/src/agents/AgentContext.ts +323 -0
  126. package/src/common/enum.ts +177 -176
  127. package/src/events.ts +197 -191
  128. package/src/graphs/Graph.ts +1058 -846
  129. package/src/graphs/MultiAgentGraph.ts +598 -0
  130. package/src/graphs/index.ts +2 -1
  131. package/src/index.ts +25 -24
  132. package/src/llm/anthropic/index.ts +413 -413
  133. package/src/llm/google/index.ts +222 -222
  134. package/src/llm/google/utils/zod_to_genai_parameters.ts +86 -88
  135. package/src/llm/ollama/index.ts +92 -92
  136. package/src/llm/openai/index.ts +894 -853
  137. package/src/llm/openai/utils/index.ts +920 -918
  138. package/src/llm/openrouter/index.ts +60 -60
  139. package/src/llm/providers.ts +55 -57
  140. package/src/llm/vertexai/index.ts +360 -360
  141. package/src/messages/cache.test.ts +461 -0
  142. package/src/messages/cache.ts +151 -0
  143. package/src/messages/content.test.ts +362 -0
  144. package/src/messages/content.ts +63 -0
  145. package/src/messages/format.ts +611 -625
  146. package/src/messages/formatAgentMessages.test.ts +1144 -917
  147. package/src/messages/index.ts +6 -4
  148. package/src/messages/reducer.ts +80 -0
  149. package/src/run.ts +447 -381
  150. package/src/scripts/abort.ts +157 -138
  151. package/src/scripts/ant_web_search.ts +158 -158
  152. package/src/scripts/cli.ts +172 -167
  153. package/src/scripts/cli2.ts +133 -125
  154. package/src/scripts/cli3.ts +184 -178
  155. package/src/scripts/cli4.ts +191 -184
  156. package/src/scripts/cli5.ts +191 -184
  157. package/src/scripts/code_exec.ts +213 -214
  158. package/src/scripts/code_exec_simple.ts +147 -129
  159. package/src/scripts/content.ts +138 -120
  160. package/src/scripts/handoff-test.ts +135 -0
  161. package/src/scripts/multi-agent-chain.ts +278 -0
  162. package/src/scripts/multi-agent-conditional.ts +220 -0
  163. package/src/scripts/multi-agent-document-review-chain.ts +197 -0
  164. package/src/scripts/multi-agent-hybrid-flow.ts +310 -0
  165. package/src/scripts/multi-agent-parallel.ts +343 -0
  166. package/src/scripts/multi-agent-sequence.ts +212 -0
  167. package/src/scripts/multi-agent-supervisor.ts +364 -0
  168. package/src/scripts/multi-agent-test.ts +186 -0
  169. package/src/scripts/search.ts +146 -150
  170. package/src/scripts/simple.ts +225 -225
  171. package/src/scripts/stream.ts +140 -122
  172. package/src/scripts/test-custom-prompt-key.ts +145 -0
  173. package/src/scripts/test-handoff-input.ts +170 -0
  174. package/src/scripts/test-multi-agent-list-handoff.ts +261 -0
  175. package/src/scripts/test-tools-before-handoff.ts +222 -0
  176. package/src/scripts/tools.ts +153 -155
  177. package/src/specs/agent-handoffs.test.ts +889 -0
  178. package/src/specs/anthropic.simple.test.ts +320 -317
  179. package/src/specs/azure.simple.test.ts +325 -316
  180. package/src/specs/openai.simple.test.ts +311 -316
  181. package/src/specs/openrouter.simple.test.ts +107 -0
  182. package/src/specs/prune.test.ts +758 -763
  183. package/src/specs/reasoning.test.ts +201 -165
  184. package/src/specs/thinking-prune.test.ts +769 -703
  185. package/src/specs/token-memoization.test.ts +39 -0
  186. package/src/stream.ts +664 -651
  187. package/src/tools/Calculator.test.ts +278 -0
  188. package/src/tools/Calculator.ts +25 -0
  189. package/src/tools/CodeExecutor.ts +220 -220
  190. package/src/tools/ToolNode.ts +170 -170
  191. package/src/tools/handlers.ts +341 -336
  192. package/src/types/graph.ts +372 -185
  193. package/src/types/llm.ts +141 -140
  194. package/src/types/messages.ts +4 -0
  195. package/src/types/run.ts +128 -89
  196. package/src/types/stream.ts +401 -400
  197. package/src/utils/events.ts +32 -0
  198. package/src/utils/handlers.ts +107 -0
  199. package/src/utils/index.ts +6 -5
  200. package/src/utils/llmConfig.ts +183 -183
  201. package/src/utils/tokens.ts +129 -70
  202. package/dist/types/scripts/abort.d.ts +0 -1
  203. package/dist/types/scripts/ant_web_search.d.ts +0 -1
  204. package/dist/types/scripts/args.d.ts +0 -7
  205. package/dist/types/scripts/caching.d.ts +0 -1
  206. package/dist/types/scripts/cli.d.ts +0 -1
  207. package/dist/types/scripts/cli2.d.ts +0 -1
  208. package/dist/types/scripts/cli3.d.ts +0 -1
  209. package/dist/types/scripts/cli4.d.ts +0 -1
  210. package/dist/types/scripts/cli5.d.ts +0 -1
  211. package/dist/types/scripts/code_exec.d.ts +0 -1
  212. package/dist/types/scripts/code_exec_files.d.ts +0 -1
  213. package/dist/types/scripts/code_exec_simple.d.ts +0 -1
  214. package/dist/types/scripts/content.d.ts +0 -1
  215. package/dist/types/scripts/empty_input.d.ts +0 -1
  216. package/dist/types/scripts/image.d.ts +0 -1
  217. package/dist/types/scripts/memory.d.ts +0 -1
  218. package/dist/types/scripts/search.d.ts +0 -1
  219. package/dist/types/scripts/simple.d.ts +0 -1
  220. package/dist/types/scripts/stream.d.ts +0 -1
  221. package/dist/types/scripts/thinking.d.ts +0 -1
  222. package/dist/types/scripts/tools.d.ts +0 -1
  223. package/dist/types/specs/spec.utils.d.ts +0 -1
  224. package/dist/types/tools/example.d.ts +0 -78
  225. package/src/tools/example.ts +0 -129
@@ -0,0 +1,32 @@
1
+ /* eslint-disable no-console */
2
+ // src/utils/events.ts
3
+ import { dispatchCustomEvent } from '@langchain/core/callbacks/dispatch';
4
+ import type { RunnableConfig } from '@langchain/core/runnables';
5
+
6
+ /**
7
+ * Safely dispatches a custom event and properly awaits it to avoid
8
+ * race conditions where events are dispatched after run cleanup.
9
+ */
10
+ export async function safeDispatchCustomEvent(
11
+ event: string,
12
+ payload: unknown,
13
+ config?: RunnableConfig
14
+ ): Promise<void> {
15
+ try {
16
+ await dispatchCustomEvent(event, payload, config);
17
+ } catch (e) {
18
+ // Check if this is the known EventStreamCallbackHandler error
19
+ if (
20
+ e instanceof Error &&
21
+ e.message.includes('handleCustomEvent: Run ID') &&
22
+ e.message.includes('not found in run map')
23
+ ) {
24
+ // Suppress this specific error - it's expected during parallel execution
25
+ // when EventStreamCallbackHandler loses track of run IDs
26
+ // console.debug('Suppressed error dispatching custom event:', e);
27
+ return;
28
+ }
29
+ // Log other errors
30
+ console.error('Error dispatching custom event:', e);
31
+ }
32
+ }
@@ -0,0 +1,107 @@
1
+ /**
2
+ * Multi-Agent Handler Utilities
3
+ *
4
+ * Provides a simple helper to create handlers with content aggregation for multi-agent scripts.
5
+ *
6
+ * Usage:
7
+ * ```typescript
8
+ * const { contentParts, aggregateContent, handlers } = createHandlers();
9
+ *
10
+ * // With callbacks
11
+ * const { contentParts, aggregateContent, handlers } = createHandlers({
12
+ * onRunStep: (event, data) => console.log('Step:', data),
13
+ * onRunStepCompleted: (event, data) => console.log('Completed:', data)
14
+ * });
15
+ * ```
16
+ */
17
+
18
+ import { GraphEvents } from '@/common';
19
+ import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
20
+ import { ToolEndHandler, ModelEndHandler } from '@/events';
21
+ import type * as t from '@/types';
22
+
23
+ interface HandlerCallbacks {
24
+ onRunStep?: (event: GraphEvents.ON_RUN_STEP, data: t.StreamEventData) => void;
25
+ onRunStepCompleted?: (
26
+ event: GraphEvents.ON_RUN_STEP_COMPLETED,
27
+ data: t.StreamEventData
28
+ ) => void;
29
+ onRunStepDelta?: (
30
+ event: GraphEvents.ON_RUN_STEP_DELTA,
31
+ data: t.StreamEventData
32
+ ) => void;
33
+ onMessageDelta?: (
34
+ event: GraphEvents.ON_MESSAGE_DELTA,
35
+ data: t.StreamEventData
36
+ ) => void;
37
+ }
38
+
39
+ /**
40
+ * Creates handlers with content aggregation for multi-agent scripts
41
+ */
42
+ export function createHandlers(callbacks?: HandlerCallbacks): {
43
+ contentParts: Array<t.MessageContentComplex | undefined>;
44
+ aggregateContent: ReturnType<
45
+ typeof createContentAggregator
46
+ >['aggregateContent'];
47
+ handlers: Record<string, t.EventHandler>;
48
+ } {
49
+ // Set up content aggregator
50
+ const { contentParts, aggregateContent } = createContentAggregator();
51
+
52
+ // Create the handlers object
53
+ const handlers = {
54
+ [GraphEvents.TOOL_END]: new ToolEndHandler(),
55
+ [GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(),
56
+ [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
57
+
58
+ [GraphEvents.ON_RUN_STEP]: {
59
+ handle: (
60
+ event: GraphEvents.ON_RUN_STEP,
61
+ data: t.StreamEventData
62
+ ): void => {
63
+ aggregateContent({ event, data: data as t.RunStep });
64
+ callbacks?.onRunStep?.(event, data);
65
+ },
66
+ },
67
+
68
+ [GraphEvents.ON_RUN_STEP_COMPLETED]: {
69
+ handle: (
70
+ event: GraphEvents.ON_RUN_STEP_COMPLETED,
71
+ data: t.StreamEventData
72
+ ): void => {
73
+ aggregateContent({
74
+ event,
75
+ data: data as unknown as { result: t.ToolEndEvent },
76
+ });
77
+ callbacks?.onRunStepCompleted?.(event, data);
78
+ },
79
+ },
80
+
81
+ [GraphEvents.ON_RUN_STEP_DELTA]: {
82
+ handle: (
83
+ event: GraphEvents.ON_RUN_STEP_DELTA,
84
+ data: t.StreamEventData
85
+ ): void => {
86
+ aggregateContent({ event, data: data as t.RunStepDeltaEvent });
87
+ callbacks?.onRunStepDelta?.(event, data);
88
+ },
89
+ },
90
+
91
+ [GraphEvents.ON_MESSAGE_DELTA]: {
92
+ handle: (
93
+ event: GraphEvents.ON_MESSAGE_DELTA,
94
+ data: t.StreamEventData
95
+ ): void => {
96
+ aggregateContent({ event, data: data as t.MessageDeltaEvent });
97
+ callbacks?.onMessageDelta?.(event, data);
98
+ },
99
+ },
100
+ };
101
+
102
+ return {
103
+ contentParts,
104
+ aggregateContent,
105
+ handlers,
106
+ };
107
+ }
@@ -1,5 +1,6 @@
1
- export * from './graph';
2
- export * from './llm';
3
- export * from './misc';
4
- export * from './run';
5
- export * from './tokens';
1
+ export * from './graph';
2
+ export * from './llm';
3
+ export * from './misc';
4
+ export * from './handlers';
5
+ export * from './run';
6
+ export * from './tokens';
@@ -1,183 +1,183 @@
1
- // src/utils/llmConfig.ts
2
- import { Providers } from '@/common';
3
- import type * as or from '@/llm/openrouter';
4
- import type * as t from '@/types';
5
-
6
- export const llmConfigs: Record<string, t.LLMConfig | undefined> = {
7
- [Providers.OPENAI]: {
8
- provider: Providers.OPENAI,
9
- model: 'gpt-4.1',
10
- temperature: 0.7,
11
- streaming: true,
12
- streamUsage: true,
13
- // disableStreaming: true,
14
- },
15
- anthropicLITELLM: {
16
- provider: Providers.OPENAI,
17
- streaming: true,
18
- streamUsage: false,
19
- apiKey: 'sk-1234',
20
- model: 'claude-sonnet-4',
21
- maxTokens: 8192,
22
- modelKwargs: {
23
- metadata: {
24
- user_id: 'some_user_id',
25
- },
26
- thinking: {
27
- type: 'enabled',
28
- budget_tokens: 2000,
29
- },
30
- },
31
- configuration: {
32
- baseURL: 'http://host.docker.internal:4000/v1',
33
- defaultHeaders: {
34
- 'anthropic-beta': 'prompt-caching-2024-07-31,context-1m-2025-08-07',
35
- },
36
- },
37
- // disableStreaming: true,
38
- },
39
- [Providers.XAI]: {
40
- provider: Providers.XAI,
41
- model: 'grok-2-latest',
42
- streaming: true,
43
- streamUsage: true,
44
- },
45
- alibaba: {
46
- provider: Providers.OPENAI,
47
- streaming: true,
48
- streamUsage: true,
49
- model: 'qwen-max',
50
- openAIApiKey: process.env.ALIBABA_API_KEY,
51
- configuration: {
52
- baseURL: 'https://dashscope-intl.aliyuncs.com/compatible-mode/v1',
53
- },
54
- },
55
- [Providers.OPENROUTER]: {
56
- provider: Providers.OPENROUTER,
57
- streaming: true,
58
- streamUsage: true,
59
- model: 'deepseek/deepseek-r1',
60
- openAIApiKey: process.env.OPENROUTER_API_KEY,
61
- configuration: {
62
- baseURL: process.env.OPENROUTER_BASE_URL,
63
- defaultHeaders: {
64
- 'HTTP-Referer': 'https://gaavi.ai',
65
- 'X-Title': 'Illuma',
66
- },
67
- },
68
- include_reasoning: true,
69
- } as or.ChatOpenRouterCallOptions & t.LLMConfig,
70
- [Providers.AZURE]: {
71
- provider: Providers.AZURE,
72
- temperature: 0.7,
73
- streaming: true,
74
- streamUsage: true,
75
- azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY,
76
- azureOpenAIApiInstanceName: process.env.AZURE_OPENAI_API_INSTANCE,
77
- azureOpenAIApiDeploymentName: process.env.AZURE_OPENAI_API_DEPLOYMENT,
78
- azureOpenAIApiVersion: process.env.AZURE_OPENAI_API_VERSION,
79
- model: process.env.AZURE_MODEL_NAME ?? 'gpt-4o',
80
- },
81
- [Providers.OLLAMA]: {
82
- provider: Providers.OLLAMA,
83
- model: 'gpt-oss:20b',
84
- streaming: true,
85
- streamUsage: true,
86
- baseUrl: 'http://localhost:11434',
87
- },
88
- lmstudio: {
89
- provider: Providers.OPENAI,
90
- model: 'gpt-oss-20b',
91
- streaming: true,
92
- streamUsage: true,
93
- configuration: {
94
- baseURL: 'http://192.168.254.183:1233/v1',
95
- },
96
- },
97
- zhipu: {
98
- provider: Providers.OPENAI,
99
- streaming: true,
100
- streamUsage: false,
101
- model: 'glm-4.5-air',
102
- apiKey: process.env.ZHIPU_API_KEY,
103
- configuration: {
104
- baseURL: 'https://open.bigmodel.cn/api/paas/v4',
105
- },
106
- },
107
- [Providers.DEEPSEEK]: {
108
- provider: Providers.DEEPSEEK,
109
- model: 'deepseek-reasoner',
110
- streaming: true,
111
- streamUsage: true,
112
- },
113
- [Providers.ANTHROPIC]: {
114
- provider: Providers.ANTHROPIC,
115
- model: 'claude-sonnet-4-5',
116
- streaming: true,
117
- streamUsage: true,
118
- },
119
- // [Providers.MISTRALAI]: {
120
- // provider: Providers.MISTRALAI,
121
- // model: 'mistral-large-latest',
122
- // streaming: true,
123
- // streamUsage: true,
124
- // },
125
- [Providers.MISTRAL]: {
126
- provider: Providers.OPENAI,
127
- streaming: true,
128
- streamUsage: false,
129
- // model: 'codestral-latest',
130
- model: 'mistral-large-latest',
131
- openAIApiKey: process.env.MISTRAL_API_KEY,
132
- configuration: {
133
- baseURL: 'https://api.mistral.ai/v1',
134
- defaultHeaders: {},
135
- },
136
- },
137
- [Providers.VERTEXAI]: {
138
- provider: Providers.VERTEXAI,
139
- model: 'gemini-2.5-flash',
140
- streaming: true,
141
- streamUsage: true,
142
- keyFile: process.env.VERTEXAI_KEY_FILE,
143
- } as t.VertexAIClientOptions & t.LLMConfig,
144
- [Providers.GOOGLE]: {
145
- provider: Providers.GOOGLE,
146
- model: 'gemini-2.5-flash',
147
- streaming: true,
148
- streamUsage: true,
149
- },
150
- [Providers.BEDROCK]: {
151
- provider: Providers.BEDROCK,
152
- // model: 'anthropic.claude-3-sonnet-20240229-v1:0',
153
- // model: 'us.anthropic.claude-3-5-sonnet-20241022-v2:0',
154
- // model: 'us.amazon.nova-pro-v1:0',
155
- model: 'us.anthropic.claude-sonnet-4-20250514-v1:0',
156
- // additionalModelRequestFields: { thinking: { type: 'enabled', budget_tokens: 2000 } },
157
- region: process.env.BEDROCK_AWS_REGION,
158
- credentials: {
159
- accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,
160
- secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,
161
- },
162
- streaming: true,
163
- streamUsage: true,
164
- },
165
- perplexity: {
166
- provider: Providers.OPENAI,
167
- model: 'llama-3.1-sonar-small-128k-online',
168
- streaming: true,
169
- streamUsage: true,
170
- apiKey: process.env.PERPLEXITY_API_KEY,
171
- configuration: {
172
- baseURL: 'https://api.perplexity.ai/',
173
- },
174
- },
175
- };
176
-
177
- export function getLLMConfig(provider: string): t.LLMConfig {
178
- const config = llmConfigs[provider];
179
- if (config === undefined) {
180
- throw new Error(`Unsupported provider: ${provider}`);
181
- }
182
- return config;
183
- }
1
+ // src/utils/llmConfig.ts
2
+ import { Providers } from '@/common';
3
+ import type * as or from '@/llm/openrouter';
4
+ import type * as t from '@/types';
5
+
6
+ export const llmConfigs: Record<string, t.LLMConfig | undefined> = {
7
+ [Providers.OPENAI]: {
8
+ provider: Providers.OPENAI,
9
+ model: 'gpt-4.1',
10
+ // temperature: 0.7,
11
+ streaming: true,
12
+ streamUsage: true,
13
+ // disableStreaming: true,
14
+ },
15
+ anthropicLITELLM: {
16
+ provider: Providers.OPENAI,
17
+ streaming: true,
18
+ streamUsage: false,
19
+ apiKey: 'sk-1234',
20
+ model: 'claude-sonnet-4',
21
+ maxTokens: 8192,
22
+ modelKwargs: {
23
+ metadata: {
24
+ user_id: 'some_user_id',
25
+ },
26
+ thinking: {
27
+ type: 'enabled',
28
+ budget_tokens: 2000,
29
+ },
30
+ },
31
+ configuration: {
32
+ baseURL: 'http://host.docker.internal:4000/v1',
33
+ defaultHeaders: {
34
+ 'anthropic-beta': 'prompt-caching-2024-07-31,context-1m-2025-08-07',
35
+ },
36
+ },
37
+ // disableStreaming: true,
38
+ },
39
+ [Providers.XAI]: {
40
+ provider: Providers.XAI,
41
+ model: 'grok-2-latest',
42
+ streaming: true,
43
+ streamUsage: true,
44
+ },
45
+ alibaba: {
46
+ provider: Providers.OPENAI,
47
+ streaming: true,
48
+ streamUsage: true,
49
+ model: 'qwen-max',
50
+ openAIApiKey: process.env.ALIBABA_API_KEY,
51
+ configuration: {
52
+ baseURL: 'https://dashscope-intl.aliyuncs.com/compatible-mode/v1',
53
+ },
54
+ },
55
+ [Providers.OPENROUTER]: {
56
+ provider: Providers.OPENROUTER,
57
+ streaming: true,
58
+ streamUsage: true,
59
+ model: 'openai/gpt-4.1',
60
+ openAIApiKey: process.env.OPENROUTER_API_KEY,
61
+ configuration: {
62
+ baseURL: process.env.OPENROUTER_BASE_URL,
63
+ defaultHeaders: {
64
+ 'HTTP-Referer': 'https://illuma.ai',
65
+ 'X-Title': 'Illuma',
66
+ },
67
+ },
68
+ include_reasoning: true,
69
+ } as or.ChatOpenRouterCallOptions & t.LLMConfig,
70
+ [Providers.AZURE]: {
71
+ provider: Providers.AZURE,
72
+ temperature: 0.7,
73
+ streaming: true,
74
+ streamUsage: true,
75
+ azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY,
76
+ azureOpenAIApiInstanceName: process.env.AZURE_OPENAI_API_INSTANCE,
77
+ azureOpenAIApiDeploymentName: process.env.AZURE_OPENAI_API_DEPLOYMENT,
78
+ azureOpenAIApiVersion: process.env.AZURE_OPENAI_API_VERSION,
79
+ model: process.env.AZURE_MODEL_NAME ?? 'gpt-4o',
80
+ },
81
+ [Providers.OLLAMA]: {
82
+ provider: Providers.OLLAMA,
83
+ model: 'gpt-oss:20b',
84
+ streaming: true,
85
+ streamUsage: true,
86
+ baseUrl: 'http://localhost:11434',
87
+ },
88
+ lmstudio: {
89
+ provider: Providers.OPENAI,
90
+ model: 'gpt-oss-20b',
91
+ streaming: true,
92
+ streamUsage: true,
93
+ configuration: {
94
+ baseURL: 'http://192.168.254.183:1233/v1',
95
+ },
96
+ },
97
+ zhipu: {
98
+ provider: Providers.OPENAI,
99
+ streaming: true,
100
+ streamUsage: false,
101
+ model: 'glm-4.5-air',
102
+ apiKey: process.env.ZHIPU_API_KEY,
103
+ configuration: {
104
+ baseURL: 'https://open.bigmodel.cn/api/paas/v4',
105
+ },
106
+ },
107
+ [Providers.DEEPSEEK]: {
108
+ provider: Providers.DEEPSEEK,
109
+ model: 'deepseek-reasoner',
110
+ streaming: true,
111
+ streamUsage: true,
112
+ },
113
+ [Providers.ANTHROPIC]: {
114
+ provider: Providers.ANTHROPIC,
115
+ model: 'claude-sonnet-4-5',
116
+ streaming: true,
117
+ streamUsage: true,
118
+ },
119
+ // [Providers.MISTRALAI]: {
120
+ // provider: Providers.MISTRALAI,
121
+ // model: 'mistral-large-latest',
122
+ // streaming: true,
123
+ // streamUsage: true,
124
+ // },
125
+ [Providers.MISTRAL]: {
126
+ provider: Providers.OPENAI,
127
+ streaming: true,
128
+ streamUsage: false,
129
+ // model: 'codestral-latest',
130
+ model: 'mistral-large-latest',
131
+ openAIApiKey: process.env.MISTRAL_API_KEY,
132
+ configuration: {
133
+ baseURL: 'https://api.mistral.ai/v1',
134
+ defaultHeaders: {},
135
+ },
136
+ },
137
+ [Providers.VERTEXAI]: {
138
+ provider: Providers.VERTEXAI,
139
+ model: 'gemini-2.5-flash',
140
+ streaming: true,
141
+ streamUsage: true,
142
+ keyFile: process.env.VERTEXAI_KEY_FILE,
143
+ } as t.VertexAIClientOptions & t.LLMConfig,
144
+ [Providers.GOOGLE]: {
145
+ provider: Providers.GOOGLE,
146
+ model: 'gemini-2.5-flash',
147
+ streaming: true,
148
+ streamUsage: true,
149
+ },
150
+ [Providers.BEDROCK]: {
151
+ provider: Providers.BEDROCK,
152
+ // model: 'anthropic.claude-3-sonnet-20240229-v1:0',
153
+ // model: 'us.anthropic.claude-3-5-sonnet-20241022-v2:0',
154
+ // model: 'us.amazon.nova-pro-v1:0',
155
+ model: 'us.anthropic.claude-sonnet-4-20250514-v1:0',
156
+ // additionalModelRequestFields: { thinking: { type: 'enabled', budget_tokens: 2000 } },
157
+ region: process.env.BEDROCK_AWS_REGION,
158
+ credentials: {
159
+ accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,
160
+ secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,
161
+ },
162
+ streaming: true,
163
+ streamUsage: true,
164
+ },
165
+ perplexity: {
166
+ provider: Providers.OPENAI,
167
+ model: 'llama-3.1-sonar-small-128k-online',
168
+ streaming: true,
169
+ streamUsage: true,
170
+ apiKey: process.env.PERPLEXITY_API_KEY,
171
+ configuration: {
172
+ baseURL: 'https://api.perplexity.ai/',
173
+ },
174
+ },
175
+ };
176
+
177
+ export function getLLMConfig(provider: string): t.LLMConfig {
178
+ const config = llmConfigs[provider];
179
+ if (config === undefined) {
180
+ throw new Error(`Unsupported provider: ${provider}`);
181
+ }
182
+ return config;
183
+ }