@agenticmail/enterprise 0.5.20 → 0.5.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,247 @@
1
+ // src/runtime/providers.ts
2
+ var PROVIDER_REGISTRY = {
3
+ anthropic: {
4
+ id: "anthropic",
5
+ name: "Anthropic",
6
+ baseUrl: "https://api.anthropic.com",
7
+ apiType: "anthropic",
8
+ envKey: "ANTHROPIC_API_KEY",
9
+ requiresApiKey: true,
10
+ supportsTools: true,
11
+ supportsStreaming: true,
12
+ isLocal: false,
13
+ defaultModels: ["claude-opus-4-6", "claude-sonnet-4-6", "claude-sonnet-4-5-20250929", "claude-haiku-4-5-20251001"]
14
+ },
15
+ openai: {
16
+ id: "openai",
17
+ name: "OpenAI",
18
+ baseUrl: "https://api.openai.com/v1",
19
+ apiType: "openai-compatible",
20
+ envKey: "OPENAI_API_KEY",
21
+ requiresApiKey: true,
22
+ supportsTools: true,
23
+ supportsStreaming: true,
24
+ isLocal: false,
25
+ defaultModels: ["gpt-4o", "gpt-4o-mini", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", "o3", "o4-mini"]
26
+ },
27
+ google: {
28
+ id: "google",
29
+ name: "Google",
30
+ baseUrl: "https://generativelanguage.googleapis.com/v1beta",
31
+ apiType: "google",
32
+ envKey: "GOOGLE_API_KEY",
33
+ requiresApiKey: true,
34
+ supportsTools: true,
35
+ supportsStreaming: true,
36
+ isLocal: false,
37
+ defaultModels: ["gemini-2.5-pro", "gemini-2.5-flash", "gemini-2.0-flash", "gemini-3-pro"]
38
+ },
39
+ deepseek: {
40
+ id: "deepseek",
41
+ name: "DeepSeek",
42
+ baseUrl: "https://api.deepseek.com",
43
+ apiType: "openai-compatible",
44
+ envKey: "DEEPSEEK_API_KEY",
45
+ requiresApiKey: true,
46
+ supportsTools: true,
47
+ supportsStreaming: true,
48
+ isLocal: false,
49
+ defaultModels: ["deepseek-chat", "deepseek-reasoner", "deepseek-chat-v3"]
50
+ },
51
+ xai: {
52
+ id: "xai",
53
+ name: "xAI",
54
+ baseUrl: "https://api.x.ai/v1",
55
+ apiType: "openai-compatible",
56
+ envKey: "XAI_API_KEY",
57
+ requiresApiKey: true,
58
+ supportsTools: true,
59
+ supportsStreaming: true,
60
+ isLocal: false,
61
+ defaultModels: ["grok-4", "grok-4-fast", "grok-3", "grok-3-mini"]
62
+ },
63
+ mistral: {
64
+ id: "mistral",
65
+ name: "Mistral",
66
+ baseUrl: "https://api.mistral.ai/v1",
67
+ apiType: "openai-compatible",
68
+ envKey: "MISTRAL_API_KEY",
69
+ requiresApiKey: true,
70
+ supportsTools: true,
71
+ supportsStreaming: true,
72
+ isLocal: false,
73
+ defaultModels: ["mistral-large-latest", "mistral-small-latest", "codestral-latest"]
74
+ },
75
+ groq: {
76
+ id: "groq",
77
+ name: "Groq",
78
+ baseUrl: "https://api.groq.com/openai/v1",
79
+ apiType: "openai-compatible",
80
+ envKey: "GROQ_API_KEY",
81
+ requiresApiKey: true,
82
+ supportsTools: true,
83
+ supportsStreaming: true,
84
+ isLocal: false,
85
+ defaultModels: ["llama-3.3-70b-versatile", "llama-3.1-8b-instant", "gemma2-9b-it"]
86
+ },
87
+ together: {
88
+ id: "together",
89
+ name: "Together",
90
+ baseUrl: "https://api.together.xyz/v1",
91
+ apiType: "openai-compatible",
92
+ envKey: "TOGETHER_API_KEY",
93
+ requiresApiKey: true,
94
+ supportsTools: true,
95
+ supportsStreaming: true,
96
+ isLocal: false,
97
+ defaultModels: ["meta-llama/Llama-3.3-70B-Instruct-Turbo", "Qwen/Qwen2.5-72B-Instruct-Turbo"]
98
+ },
99
+ fireworks: {
100
+ id: "fireworks",
101
+ name: "Fireworks",
102
+ baseUrl: "https://api.fireworks.ai/inference/v1",
103
+ apiType: "openai-compatible",
104
+ envKey: "FIREWORKS_API_KEY",
105
+ requiresApiKey: true,
106
+ supportsTools: true,
107
+ supportsStreaming: true,
108
+ isLocal: false,
109
+ defaultModels: ["accounts/fireworks/models/llama-v3p3-70b-instruct"]
110
+ },
111
+ moonshot: {
112
+ id: "moonshot",
113
+ name: "Moonshot",
114
+ baseUrl: "https://api.moonshot.cn/v1",
115
+ apiType: "openai-compatible",
116
+ envKey: "MOONSHOT_API_KEY",
117
+ requiresApiKey: true,
118
+ supportsTools: true,
119
+ supportsStreaming: true,
120
+ isLocal: false,
121
+ defaultModels: ["kimi-k2-0711"]
122
+ },
123
+ cerebras: {
124
+ id: "cerebras",
125
+ name: "Cerebras",
126
+ baseUrl: "https://api.cerebras.ai/v1",
127
+ apiType: "openai-compatible",
128
+ envKey: "CEREBRAS_API_KEY",
129
+ requiresApiKey: true,
130
+ supportsTools: true,
131
+ supportsStreaming: true,
132
+ isLocal: false,
133
+ defaultModels: ["llama-3.3-70b", "llama-3.1-8b"]
134
+ },
135
+ openrouter: {
136
+ id: "openrouter",
137
+ name: "OpenRouter",
138
+ baseUrl: "https://openrouter.ai/api/v1",
139
+ apiType: "openai-compatible",
140
+ envKey: "OPENROUTER_API_KEY",
141
+ requiresApiKey: true,
142
+ supportsTools: true,
143
+ supportsStreaming: true,
144
+ isLocal: false,
145
+ defaultModels: ["anthropic/claude-sonnet-4", "openai/gpt-4o", "google/gemini-2.5-pro"]
146
+ },
147
+ nvidia: {
148
+ id: "nvidia",
149
+ name: "NVIDIA",
150
+ baseUrl: "https://integrate.api.nvidia.com/v1",
151
+ apiType: "openai-compatible",
152
+ envKey: "NVIDIA_API_KEY",
153
+ requiresApiKey: true,
154
+ supportsTools: true,
155
+ supportsStreaming: true,
156
+ isLocal: false,
157
+ defaultModels: ["nvidia/llama-3.1-nemotron-70b-instruct"]
158
+ },
159
+ ollama: {
160
+ id: "ollama",
161
+ name: "Ollama",
162
+ baseUrl: "http://localhost:11434",
163
+ apiType: "ollama",
164
+ envKey: "OLLAMA_HOST",
165
+ requiresApiKey: false,
166
+ supportsTools: true,
167
+ supportsStreaming: true,
168
+ isLocal: true
169
+ },
170
+ vllm: {
171
+ id: "vllm",
172
+ name: "vLLM",
173
+ baseUrl: "http://localhost:8000/v1",
174
+ apiType: "openai-compatible",
175
+ envKey: "VLLM_API_KEY",
176
+ requiresApiKey: false,
177
+ supportsTools: true,
178
+ supportsStreaming: true,
179
+ isLocal: true
180
+ },
181
+ lmstudio: {
182
+ id: "lmstudio",
183
+ name: "LM Studio",
184
+ baseUrl: "http://localhost:1234/v1",
185
+ apiType: "openai-compatible",
186
+ envKey: "",
187
+ requiresApiKey: false,
188
+ supportsTools: false,
189
+ supportsStreaming: true,
190
+ isLocal: true
191
+ },
192
+ litellm: {
193
+ id: "litellm",
194
+ name: "LiteLLM",
195
+ baseUrl: "http://localhost:4000/v1",
196
+ apiType: "openai-compatible",
197
+ envKey: "LITELLM_API_KEY",
198
+ requiresApiKey: false,
199
+ supportsTools: true,
200
+ supportsStreaming: true,
201
+ isLocal: true
202
+ }
203
+ };
204
+ function resolveProvider(providerName, customProviders) {
205
+ if (PROVIDER_REGISTRY[providerName]) {
206
+ return PROVIDER_REGISTRY[providerName];
207
+ }
208
+ if (customProviders) {
209
+ var match = customProviders.find(function(p) {
210
+ return p.id === providerName;
211
+ });
212
+ if (match) return match;
213
+ }
214
+ return null;
215
+ }
216
+ function getApiType(provider) {
217
+ return provider.apiType;
218
+ }
219
+ function getBaseUrl(provider) {
220
+ return provider.baseUrl;
221
+ }
222
+ function resolveApiKeyForProvider(providerName, apiKeys, customProviders) {
223
+ if (apiKeys && apiKeys[providerName]) return apiKeys[providerName];
224
+ var def = resolveProvider(providerName, customProviders);
225
+ if (!def) return void 0;
226
+ var envKey = "envKey" in def ? def.envKey : def.apiKeyEnvVar;
227
+ if (envKey && process.env[envKey]) return process.env[envKey];
228
+ var requiresKey = "requiresApiKey" in def ? def.requiresApiKey : !!def.apiKeyEnvVar;
229
+ if (!requiresKey) return "";
230
+ return void 0;
231
+ }
232
+ function listAllProviders(customProviders) {
233
+ var all = Object.values(PROVIDER_REGISTRY);
234
+ if (customProviders && customProviders.length > 0) {
235
+ all = all.concat(customProviders);
236
+ }
237
+ return all;
238
+ }
239
+
240
+ export {
241
+ PROVIDER_REGISTRY,
242
+ resolveProvider,
243
+ getApiType,
244
+ getBaseUrl,
245
+ resolveApiKeyForProvider,
246
+ listAllProviders
247
+ };