@geenius/ai 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (165) hide show
  1. package/.changeset/config.json +11 -0
  2. package/.env.example +2 -0
  3. package/.github/CODEOWNERS +1 -0
  4. package/.github/ISSUE_TEMPLATE/bug_report.md +16 -0
  5. package/.github/ISSUE_TEMPLATE/feature_request.md +11 -0
  6. package/.github/PULL_REQUEST_TEMPLATE.md +10 -0
  7. package/.github/dependabot.yml +11 -0
  8. package/.github/workflows/ci.yml +23 -0
  9. package/.github/workflows/release.yml +29 -0
  10. package/.node-version +1 -0
  11. package/.nvmrc +1 -0
  12. package/.prettierrc +7 -0
  13. package/.project/ACCOUNT.yaml +4 -0
  14. package/.project/IDEAS.yaml +7 -0
  15. package/.project/PROJECT.yaml +11 -0
  16. package/.project/ROADMAP.yaml +15 -0
  17. package/CHANGELOG.md +15 -0
  18. package/CODE_OF_CONDUCT.md +26 -0
  19. package/CONTRIBUTING.md +61 -0
  20. package/LICENSE +21 -0
  21. package/README.md +1 -0
  22. package/SECURITY.md +18 -0
  23. package/SUPPORT.md +14 -0
  24. package/package.json +75 -0
  25. package/packages/convex/package.json +42 -0
  26. package/packages/convex/src/index.ts +8 -0
  27. package/packages/convex/src/mutations/messages.ts +29 -0
  28. package/packages/convex/src/queries/messages.ts +24 -0
  29. package/packages/convex/src/schema.ts +20 -0
  30. package/packages/convex/tsconfig.json +11 -0
  31. package/packages/convex/tsup.config.ts +17 -0
  32. package/packages/react/README.md +1 -0
  33. package/packages/react/package.json +60 -0
  34. package/packages/react/src/components/AILogTable.tsx +90 -0
  35. package/packages/react/src/components/ChatWindow.tsx +118 -0
  36. package/packages/react/src/components/GenerationCard.tsx +73 -0
  37. package/packages/react/src/components/ImageGenerator.tsx +103 -0
  38. package/packages/react/src/components/ModelSelector.tsx +44 -0
  39. package/packages/react/src/components/ModelTestRunner.tsx +148 -0
  40. package/packages/react/src/components/VoiceSelector.tsx +51 -0
  41. package/packages/react/src/components/index.ts +9 -0
  42. package/packages/react/src/hooks/index.ts +12 -0
  43. package/packages/react/src/hooks/useAI.ts +158 -0
  44. package/packages/react/src/hooks/useAILogs.ts +40 -0
  45. package/packages/react/src/hooks/useAIModels.ts +53 -0
  46. package/packages/react/src/hooks/useChat.ts +141 -0
  47. package/packages/react/src/hooks/useContentManager.ts +108 -0
  48. package/packages/react/src/hooks/useImageGeneration.ts +82 -0
  49. package/packages/react/src/hooks/useMemory.ts +161 -0
  50. package/packages/react/src/hooks/useModelTest.ts +126 -0
  51. package/packages/react/src/hooks/useRealtimeAudio.ts +203 -0
  52. package/packages/react/src/hooks/useSkills.ts +114 -0
  53. package/packages/react/src/hooks/useTextToSpeech.ts +99 -0
  54. package/packages/react/src/hooks/useTranscription.ts +119 -0
  55. package/packages/react/src/hooks/useVideoGeneration.ts +79 -0
  56. package/packages/react/src/index.ts +42 -0
  57. package/packages/react/src/pages/AILogsPage.tsx +98 -0
  58. package/packages/react/src/pages/ChatPage.tsx +42 -0
  59. package/packages/react/src/pages/ModelTestPage.tsx +33 -0
  60. package/packages/react/src/pages/index.ts +5 -0
  61. package/packages/react/tsconfig.json +26 -0
  62. package/packages/react/tsup.config.ts +22 -0
  63. package/packages/react-css/README.md +1 -0
  64. package/packages/react-css/package.json +45 -0
  65. package/packages/react-css/src/ai.css +857 -0
  66. package/packages/react-css/src/components/AILogTable.tsx +90 -0
  67. package/packages/react-css/src/components/ChatWindow.tsx +118 -0
  68. package/packages/react-css/src/components/GenerationCard.tsx +73 -0
  69. package/packages/react-css/src/components/ImageGenerator.tsx +103 -0
  70. package/packages/react-css/src/components/ModelSelector.tsx +44 -0
  71. package/packages/react-css/src/components/ModelTestRunner.tsx +148 -0
  72. package/packages/react-css/src/components/VoiceSelector.tsx +51 -0
  73. package/packages/react-css/src/components/index.ts +9 -0
  74. package/packages/react-css/src/hooks/index.ts +12 -0
  75. package/packages/react-css/src/hooks/useAI.ts +153 -0
  76. package/packages/react-css/src/hooks/useAILogs.ts +40 -0
  77. package/packages/react-css/src/hooks/useAIModels.ts +51 -0
  78. package/packages/react-css/src/hooks/useChat.ts +145 -0
  79. package/packages/react-css/src/hooks/useContentManager.ts +108 -0
  80. package/packages/react-css/src/hooks/useImageGeneration.ts +82 -0
  81. package/packages/react-css/src/hooks/useMemory.ts +161 -0
  82. package/packages/react-css/src/hooks/useModelTest.ts +122 -0
  83. package/packages/react-css/src/hooks/useRealtimeAudio.ts +203 -0
  84. package/packages/react-css/src/hooks/useSkills.ts +114 -0
  85. package/packages/react-css/src/hooks/useTextToSpeech.ts +99 -0
  86. package/packages/react-css/src/hooks/useTranscription.ts +119 -0
  87. package/packages/react-css/src/hooks/useVideoGeneration.ts +79 -0
  88. package/packages/react-css/src/index.ts +35 -0
  89. package/packages/react-css/src/pages/AILogsPage.tsx +98 -0
  90. package/packages/react-css/src/pages/ChatPage.tsx +42 -0
  91. package/packages/react-css/src/pages/ModelTestPage.tsx +33 -0
  92. package/packages/react-css/src/pages/index.ts +5 -0
  93. package/packages/react-css/src/styles.css +127 -0
  94. package/packages/react-css/tsconfig.json +26 -0
  95. package/packages/react-css/tsup.config.ts +2 -0
  96. package/packages/shared/README.md +1 -0
  97. package/packages/shared/package.json +71 -0
  98. package/packages/shared/src/__tests__/ai.test.ts +67 -0
  99. package/packages/shared/src/ai-client.ts +243 -0
  100. package/packages/shared/src/config.ts +235 -0
  101. package/packages/shared/src/content.ts +249 -0
  102. package/packages/shared/src/convex/helpers.ts +163 -0
  103. package/packages/shared/src/convex/index.ts +16 -0
  104. package/packages/shared/src/convex/schemas.ts +146 -0
  105. package/packages/shared/src/convex/validators.ts +136 -0
  106. package/packages/shared/src/index.ts +107 -0
  107. package/packages/shared/src/memory.ts +197 -0
  108. package/packages/shared/src/providers/base.ts +103 -0
  109. package/packages/shared/src/providers/elevenlabs.ts +155 -0
  110. package/packages/shared/src/providers/index.ts +28 -0
  111. package/packages/shared/src/providers/openai-compatible.ts +286 -0
  112. package/packages/shared/src/providers/registry.ts +113 -0
  113. package/packages/shared/src/providers/replicate-fal.ts +230 -0
  114. package/packages/shared/src/skills.ts +273 -0
  115. package/packages/shared/src/types.ts +501 -0
  116. package/packages/shared/tsconfig.json +25 -0
  117. package/packages/shared/tsup.config.ts +22 -0
  118. package/packages/shared/vitest.config.ts +4 -0
  119. package/packages/solidjs/README.md +1 -0
  120. package/packages/solidjs/package.json +59 -0
  121. package/packages/solidjs/src/components/ChatWindow.tsx +78 -0
  122. package/packages/solidjs/src/components/GenerationCard.tsx +62 -0
  123. package/packages/solidjs/src/components/ModelTestRunner.tsx +119 -0
  124. package/packages/solidjs/src/components/index.ts +5 -0
  125. package/packages/solidjs/src/index.ts +32 -0
  126. package/packages/solidjs/src/pages/ChatPage.tsx +22 -0
  127. package/packages/solidjs/src/pages/ModelTestPage.tsx +22 -0
  128. package/packages/solidjs/src/pages/index.ts +4 -0
  129. package/packages/solidjs/src/primitives/createAI.ts +79 -0
  130. package/packages/solidjs/src/primitives/createChat.ts +100 -0
  131. package/packages/solidjs/src/primitives/createContentManager.ts +61 -0
  132. package/packages/solidjs/src/primitives/createImageGeneration.ts +46 -0
  133. package/packages/solidjs/src/primitives/createMemory.ts +127 -0
  134. package/packages/solidjs/src/primitives/createModelTest.ts +89 -0
  135. package/packages/solidjs/src/primitives/createSkills.ts +83 -0
  136. package/packages/solidjs/src/primitives/createTextToSpeech.ts +56 -0
  137. package/packages/solidjs/src/primitives/createVideoGeneration.ts +46 -0
  138. package/packages/solidjs/src/primitives/index.ts +8 -0
  139. package/packages/solidjs/tsconfig.json +27 -0
  140. package/packages/solidjs/tsup.config.ts +21 -0
  141. package/packages/solidjs-css/README.md +1 -0
  142. package/packages/solidjs-css/package.json +44 -0
  143. package/packages/solidjs-css/src/ai.css +857 -0
  144. package/packages/solidjs-css/src/components/ChatWindow.tsx +78 -0
  145. package/packages/solidjs-css/src/components/GenerationCard.tsx +62 -0
  146. package/packages/solidjs-css/src/components/ModelTestRunner.tsx +119 -0
  147. package/packages/solidjs-css/src/components/index.ts +5 -0
  148. package/packages/solidjs-css/src/index.ts +26 -0
  149. package/packages/solidjs-css/src/pages/ChatPage.tsx +22 -0
  150. package/packages/solidjs-css/src/pages/ModelTestPage.tsx +22 -0
  151. package/packages/solidjs-css/src/pages/index.ts +4 -0
  152. package/packages/solidjs-css/src/primitives/createAI.ts +79 -0
  153. package/packages/solidjs-css/src/primitives/createChat.ts +100 -0
  154. package/packages/solidjs-css/src/primitives/createContentManager.ts +61 -0
  155. package/packages/solidjs-css/src/primitives/createImageGeneration.ts +46 -0
  156. package/packages/solidjs-css/src/primitives/createMemory.ts +127 -0
  157. package/packages/solidjs-css/src/primitives/createModelTest.ts +89 -0
  158. package/packages/solidjs-css/src/primitives/createSkills.ts +83 -0
  159. package/packages/solidjs-css/src/primitives/createTextToSpeech.ts +56 -0
  160. package/packages/solidjs-css/src/primitives/createVideoGeneration.ts +46 -0
  161. package/packages/solidjs-css/src/primitives/index.ts +1 -0
  162. package/packages/solidjs-css/src/styles.css +127 -0
  163. package/packages/solidjs-css/tsconfig.json +27 -0
  164. package/packages/solidjs-css/tsup.config.ts +2 -0
  165. package/pnpm-workspace.yaml +2 -0
@@ -0,0 +1,67 @@
1
+ import { describe, it, expect } from 'vitest'
2
+ import {
3
+ BUILT_IN_TEMPLATES, buildContentPrompt, buildActionSystemPrompt,
4
+ DEFAULT_MEMORY_CONFIG, buildMemoryContext, extractPreferenceHints,
5
+ BUILT_IN_SKILLS, createSkillRegistry, buildSkillPrompt,
6
+ } from '../index'
7
+
8
+ describe('AI Templates', () => {
9
+ it('BUILT_IN_TEMPLATES is a non-empty array', () => {
10
+ expect(Array.isArray(BUILT_IN_TEMPLATES)).toBe(true)
11
+ expect(BUILT_IN_TEMPLATES.length).toBeGreaterThan(0)
12
+ })
13
+
14
+ it('buildContentPrompt returns system and user prompt', () => {
15
+ const template = BUILT_IN_TEMPLATES[0]
16
+ const result = buildContentPrompt(template, { text: 'Hello world' })
17
+ expect(typeof result).toBe('object')
18
+ expect(typeof result.systemPrompt).toBe('string')
19
+ expect(typeof result.userPrompt).toBe('string')
20
+ })
21
+
22
+ it('buildActionSystemPrompt returns a system prompt', () => {
23
+ const prompt = buildActionSystemPrompt('summarize')
24
+ expect(typeof prompt).toBe('string')
25
+ expect(prompt.length).toBeGreaterThan(0)
26
+ })
27
+ })
28
+
29
+ describe('AI Memory', () => {
30
+ it('DEFAULT_MEMORY_CONFIG is defined', () => {
31
+ expect(DEFAULT_MEMORY_CONFIG).toBeDefined()
32
+ expect(typeof DEFAULT_MEMORY_CONFIG).toBe('object')
33
+ })
34
+
35
+ it('buildMemoryContext returns string from entries', () => {
36
+ const ctx = buildMemoryContext([])
37
+ expect(typeof ctx).toBe('string')
38
+ })
39
+
40
+ it('extractPreferenceHints extracts from text', () => {
41
+ const hints = extractPreferenceHints('I prefer dark mode and concise responses.')
42
+ expect(typeof hints).toBe('object')
43
+ })
44
+ })
45
+
46
+ describe('AI Skills', () => {
47
+ it('BUILT_IN_SKILLS is a non-empty array', () => {
48
+ expect(Array.isArray(BUILT_IN_SKILLS)).toBe(true)
49
+ expect(BUILT_IN_SKILLS.length).toBeGreaterThan(0)
50
+ })
51
+
52
+ it('createSkillRegistry returns a registry', () => {
53
+ const registry = createSkillRegistry()
54
+ expect(registry).toBeDefined()
55
+ expect(typeof registry.register).toBe('function')
56
+ expect(typeof registry.get).toBe('function')
57
+ expect(typeof registry.list).toBe('function')
58
+ })
59
+
60
+ it('buildSkillPrompt produces system and user prompts', () => {
61
+ const skill = Object.values(BUILT_IN_SKILLS)[0]
62
+ const result = buildSkillPrompt(skill, {})
63
+ expect(typeof result).toBe('object')
64
+ expect(typeof result.systemPrompt).toBe('string')
65
+ expect(typeof result.userPrompt).toBe('string')
66
+ })
67
+ })
@@ -0,0 +1,243 @@
1
+ // @geenius/ai — TanStack AI Evaluation Wrapper
2
+ // Multi-provider AI hook for React with streaming support.
3
+
4
+ /**
5
+ * Unified AI provider abstraction.
6
+ * When TanStack AI reaches beta, this can wrap @tanstack/react-ai.
7
+ * Until then, provides a consistent interface across providers.
8
+ */
9
+
10
+ export type AIProvider = 'openai' | 'anthropic' | 'gemini' | 'ollama' | 'custom'
11
+
12
+ /** Lightweight per-request client config. For the full SDK config object, see AIConfig in types.ts. */
13
+ export interface AIClientConfig {
14
+ provider: AIProvider
15
+ apiKey?: string
16
+ baseUrl?: string
17
+ model?: string
18
+ defaultTemperature?: number
19
+ defaultMaxTokens?: number
20
+ }
21
+ /** @deprecated Use AIClientConfig — renamed to avoid collision with types.ts AIConfig */
22
+ export type AIConfig = AIClientConfig
23
+
24
+ export interface AIClientMessage {
25
+ role: 'system' | 'user' | 'assistant' | 'tool'
26
+ content: string
27
+ toolCalls?: ToolCall[]
28
+ toolCallId?: string
29
+ }
30
+
31
+ export interface ToolCall {
32
+ id: string
33
+ name: string
34
+ arguments: Record<string, unknown>
35
+ }
36
+
37
+ export interface ToolDefinition {
38
+ name: string
39
+ description: string
40
+ parameters: Record<string, unknown>
41
+ handler?: (args: Record<string, unknown>) => Promise<unknown>
42
+ }
43
+
44
+ export interface StreamCallbacks {
45
+ onToken?: (token: string) => void
46
+ onThinking?: (thinking: string) => void
47
+ onToolCall?: (toolCall: ToolCall) => Promise<unknown>
48
+ onComplete?: (response: AIResponse) => void
49
+ onError?: (error: Error) => void
50
+ }
51
+
52
+ export interface AIResponse {
53
+ content: string
54
+ model: string
55
+ provider: AIProvider
56
+ usage?: { promptTokens: number; completionTokens: number; totalTokens: number }
57
+ toolCalls?: ToolCall[]
58
+ thinking?: string
59
+ finishReason: 'stop' | 'length' | 'tool_calls' | 'error'
60
+ }
61
+
62
+ // ─── Provider Resolution ──────────────────────────────────────
63
+
64
+ const PROVIDER_ENDPOINTS: Record<AIProvider, string> = {
65
+ openai: 'https://api.openai.com/v1/chat/completions',
66
+ anthropic: 'https://api.anthropic.com/v1/messages',
67
+ gemini: 'https://generativelanguage.googleapis.com/v1beta/models',
68
+ ollama: 'http://localhost:11434/api/chat',
69
+ custom: '',
70
+ }
71
+
72
+ const DEFAULT_MODELS: Record<AIProvider, string> = {
73
+ openai: 'gpt-4o',
74
+ anthropic: 'claude-sonnet-4-20250514',
75
+ gemini: 'gemini-2.0-flash',
76
+ ollama: 'llama3.1',
77
+ custom: '',
78
+ }
79
+
80
+ /**
81
+ * Resolve the provider from environment variables.
82
+ */
83
+ export function resolveAIProvider(): AIClientConfig {
84
+ let provider: AIProvider = 'openai'
85
+ let apiKey = ''
86
+
87
+ try {
88
+ // @ts-expect-error — Vite env
89
+ const envProvider = import.meta?.env?.VITE_AI_PROVIDER ?? process?.env?.AI_PROVIDER ?? 'openai'
90
+ provider = envProvider as AIProvider
91
+ // @ts-expect-error — Vite env
92
+ apiKey = import.meta?.env?.VITE_AI_API_KEY ?? process?.env?.AI_API_KEY ?? ''
93
+ } catch {
94
+ // Fallback
95
+ }
96
+
97
+ return {
98
+ provider,
99
+ apiKey,
100
+ model: DEFAULT_MODELS[provider],
101
+ defaultTemperature: 0.7,
102
+ defaultMaxTokens: 4096,
103
+ }
104
+ }
105
+
106
+ // ─── Chat Client ──────────────────────────────────────────────
107
+
108
+ export class AIClient {
109
+ private config: AIClientConfig
110
+
111
+ constructor(config?: Partial<AIClientConfig>) {
112
+ this.config = { ...resolveAIProvider(), ...config }
113
+ }
114
+
115
+ /**
116
+ * Send a chat completion request.
117
+ */
118
+ async chat(
119
+ messages: AIClientMessage[],
120
+ opts?: {
121
+ model?: string
122
+ temperature?: number
123
+ maxTokens?: number
124
+ tools?: ToolDefinition[]
125
+ stream?: boolean
126
+ callbacks?: StreamCallbacks
127
+ }
128
+ ): Promise<AIResponse> {
129
+ const model = opts?.model ?? this.config.model ?? DEFAULT_MODELS[this.config.provider]
130
+ const endpoint = this.config.baseUrl ?? PROVIDER_ENDPOINTS[this.config.provider]
131
+
132
+ try {
133
+ // Format request based on provider
134
+ const body = this.formatRequest(messages, model, opts)
135
+
136
+ const response = await fetch(endpoint, {
137
+ method: 'POST',
138
+ headers: this.getHeaders(),
139
+ body: JSON.stringify(body),
140
+ })
141
+
142
+ if (!response.ok) {
143
+ const err = await response.json().catch(() => ({ error: response.statusText }))
144
+ throw new Error(`AI request failed: ${err.error?.message ?? err.error ?? response.statusText}`)
145
+ }
146
+
147
+ const data = await response.json()
148
+ const result = this.parseResponse(data)
149
+
150
+ // Handle tool calls
151
+ if (result.toolCalls?.length && opts?.callbacks?.onToolCall) {
152
+ for (const tc of result.toolCalls) {
153
+ const toolResult = await opts.callbacks.onToolCall(tc)
154
+ // Could recursively call with tool results
155
+ opts.callbacks.onComplete?.(result)
156
+ }
157
+ }
158
+
159
+ opts?.callbacks?.onComplete?.(result)
160
+ return result
161
+ } catch (err) {
162
+ const error = err instanceof Error ? err : new Error(String(err))
163
+ opts?.callbacks?.onError?.(error)
164
+ throw error
165
+ }
166
+ }
167
+
168
+ private formatRequest(messages: AIClientMessage[], model: string, opts?: { temperature?: number; maxTokens?: number; tools?: ToolDefinition[] }) {
169
+ // OpenAI / Gemini compatible format
170
+ return {
171
+ model,
172
+ messages: messages.map(m => ({ role: m.role, content: m.content })),
173
+ temperature: opts?.temperature ?? this.config.defaultTemperature,
174
+ max_tokens: opts?.maxTokens ?? this.config.defaultMaxTokens,
175
+ ...(opts?.tools?.length ? {
176
+ tools: opts.tools.map(t => ({
177
+ type: 'function',
178
+ function: { name: t.name, description: t.description, parameters: t.parameters },
179
+ })),
180
+ } : {}),
181
+ }
182
+ }
183
+
184
+ private getHeaders(): Record<string, string> {
185
+ const headers: Record<string, string> = { 'Content-Type': 'application/json' }
186
+ if (this.config.apiKey) {
187
+ if (this.config.provider === 'anthropic') {
188
+ headers['x-api-key'] = this.config.apiKey
189
+ headers['anthropic-version'] = '2023-06-01'
190
+ } else {
191
+ headers['Authorization'] = `Bearer ${this.config.apiKey}`
192
+ }
193
+ }
194
+ return headers
195
+ }
196
+
197
+ private parseResponse(data: any): AIResponse {
198
+ // OpenAI format
199
+ if (data.choices) {
200
+ const choice = data.choices[0]
201
+ return {
202
+ content: choice.message?.content ?? '',
203
+ model: data.model,
204
+ provider: this.config.provider,
205
+ usage: data.usage ? {
206
+ promptTokens: data.usage.prompt_tokens,
207
+ completionTokens: data.usage.completion_tokens,
208
+ totalTokens: data.usage.total_tokens,
209
+ } : undefined,
210
+ toolCalls: choice.message?.tool_calls?.map((tc: any) => ({
211
+ id: tc.id, name: tc.function.name, arguments: JSON.parse(tc.function.arguments),
212
+ })),
213
+ finishReason: choice.finish_reason === 'tool_calls' ? 'tool_calls' : choice.finish_reason === 'length' ? 'length' : 'stop',
214
+ }
215
+ }
216
+ // Anthropic format
217
+ if (data.content) {
218
+ const text = data.content.filter((c: any) => c.type === 'text').map((c: any) => c.text).join('')
219
+ return {
220
+ content: text,
221
+ model: data.model,
222
+ provider: this.config.provider,
223
+ usage: data.usage ? {
224
+ promptTokens: data.usage.input_tokens,
225
+ completionTokens: data.usage.output_tokens,
226
+ totalTokens: data.usage.input_tokens + data.usage.output_tokens,
227
+ } : undefined,
228
+ finishReason: data.stop_reason === 'tool_use' ? 'tool_calls' : 'stop',
229
+ }
230
+ }
231
+ return { content: JSON.stringify(data), model: '', provider: this.config.provider, finishReason: 'stop' }
232
+ }
233
+ }
234
+
235
+ // ─── Provider Switching ───────────────────────────────────────
236
+
237
+ /**
238
+ * Create an AI client from environment configuration.
239
+ * Supports switching providers without code changes.
240
+ */
241
+ export function createAI(overrides?: Partial<AIClientConfig>): AIClient {
242
+ return new AIClient(overrides)
243
+ }
@@ -0,0 +1,235 @@
1
+ // @geenius-ai/shared — src/config.ts
2
+
3
+ import type { AIConfig, AIProviderType, AIProviderConfig, AIModel } from './types'
4
+
5
+ // ============================================================================
6
+ // Default Provider Configs
7
+ // ============================================================================
8
+
9
+ const OPENAI_PROVIDER: AIProviderConfig = {
10
+ name: 'OpenAI',
11
+ type: 'openai',
12
+ baseUrl: 'https://api.openai.com/v1',
13
+ apiKeyEnvVar: 'OPENAI_API_KEY',
14
+ isActive: true,
15
+ models: ['gpt-4o', 'gpt-4o-mini', 'gpt-4-turbo', 'gpt-3.5-turbo', 'dall-e-3', 'tts-1', 'tts-1-hd', 'whisper-1', 'text-embedding-3-small', 'text-embedding-3-large'],
16
+ defaultModel: 'gpt-4o',
17
+ }
18
+
19
+ const ANTHROPIC_PROVIDER: AIProviderConfig = {
20
+ name: 'Anthropic',
21
+ type: 'anthropic',
22
+ baseUrl: 'https://api.anthropic.com/v1',
23
+ apiKeyEnvVar: 'ANTHROPIC_API_KEY',
24
+ isActive: true,
25
+ models: ['claude-3.5-sonnet', 'claude-3-haiku', 'claude-3-opus'],
26
+ defaultModel: 'claude-3.5-sonnet',
27
+ }
28
+
29
+ const NVIDIA_PROVIDER: AIProviderConfig = {
30
+ name: 'NVIDIA NIM',
31
+ type: 'nvidia',
32
+ baseUrl: 'https://integrate.api.nvidia.com/v1',
33
+ apiKeyEnvVar: 'NVIDIA_API_KEY',
34
+ isActive: true,
35
+ models: ['meta/llama-3.1-70b-instruct', 'meta/llama-3.1-8b-instruct', 'nvidia/llama-3.1-nemotron-70b-instruct', 'stabilityai/stable-diffusion-xl-base-1.0'],
36
+ defaultModel: 'meta/llama-3.1-70b-instruct',
37
+ }
38
+
39
+ const GOOGLE_PROVIDER: AIProviderConfig = {
40
+ name: 'Google AI',
41
+ type: 'google',
42
+ baseUrl: 'https://generativelanguage.googleapis.com/v1beta',
43
+ apiKeyEnvVar: 'GOOGLE_AI_API_KEY',
44
+ isActive: true,
45
+ models: ['gemini-pro', 'gemini-flash', 'gemini-1.5-pro', 'text-embedding-004'],
46
+ defaultModel: 'gemini-pro',
47
+ }
48
+
49
+ const OLLAMA_PROVIDER: AIProviderConfig = {
50
+ name: 'Ollama',
51
+ type: 'ollama',
52
+ baseUrl: 'http://localhost:11434/v1',
53
+ apiKeyEnvVar: 'OLLAMA_API_KEY',
54
+ isActive: false,
55
+ models: ['llama3.2:3b', 'qwen2.5:7b', 'mistral:7b'],
56
+ defaultModel: 'llama3.2:3b',
57
+ }
58
+
59
+ const GROQ_PROVIDER: AIProviderConfig = {
60
+ name: 'Groq',
61
+ type: 'groq',
62
+ baseUrl: 'https://api.groq.com/openai/v1',
63
+ apiKeyEnvVar: 'GROQ_API_KEY',
64
+ isActive: true,
65
+ models: ['llama-3.1-70b-versatile', 'mixtral-8x7b-32768', 'whisper-large-v3'],
66
+ defaultModel: 'llama-3.1-70b-versatile',
67
+ }
68
+
69
+ const ELEVENLABS_PROVIDER: AIProviderConfig = {
70
+ name: 'ElevenLabs',
71
+ type: 'elevenlabs',
72
+ baseUrl: 'https://api.elevenlabs.io',
73
+ apiKeyEnvVar: 'ELEVENLABS_API_KEY',
74
+ isActive: true,
75
+ models: ['eleven_multilingual_v2', 'eleven_turbo_v2_5', 'eleven_english_sts_v2'],
76
+ defaultModel: 'eleven_multilingual_v2',
77
+ }
78
+
79
+ const REPLICATE_PROVIDER: AIProviderConfig = {
80
+ name: 'Replicate',
81
+ type: 'replicate',
82
+ baseUrl: 'https://api.replicate.com',
83
+ apiKeyEnvVar: 'REPLICATE_API_KEY',
84
+ isActive: true,
85
+ models: ['stability-ai/sdxl', 'minimax/video-01', 'meta/musicgen', 'black-forest-labs/flux-1.1-pro'],
86
+ defaultModel: 'stability-ai/sdxl',
87
+ }
88
+
89
+ const FAL_PROVIDER: AIProviderConfig = {
90
+ name: 'Fal.ai',
91
+ type: 'fal',
92
+ baseUrl: 'https://queue.fal.run',
93
+ apiKeyEnvVar: 'FAL_API_KEY',
94
+ isActive: true,
95
+ models: ['fal-ai/flux/dev', 'fal-ai/kling-video/v2/master', 'fal-ai/stable-audio'],
96
+ defaultModel: 'fal-ai/flux/dev',
97
+ }
98
+
99
+ export const DEFAULT_PROVIDERS: AIProviderConfig[] = [
100
+ OPENAI_PROVIDER,
101
+ ANTHROPIC_PROVIDER,
102
+ NVIDIA_PROVIDER,
103
+ GOOGLE_PROVIDER,
104
+ GROQ_PROVIDER,
105
+ ELEVENLABS_PROVIDER,
106
+ REPLICATE_PROVIDER,
107
+ FAL_PROVIDER,
108
+ OLLAMA_PROVIDER,
109
+ ]
110
+
111
+ // ============================================================================
112
+ // Default Model Cost Registry
113
+ // ============================================================================
114
+
115
+ export const DEFAULT_MODELS: AIModel[] = [
116
+ // --- Text models ---
117
+ { id: 'gpt-4o', provider: 'openai', name: 'GPT-4o', capabilities: ['text', 'vision', 'function-calling', 'streaming', 'structured-output'], inputCostPer1k: 0.005, outputCostPer1k: 0.015, contextWindow: 128000, isActive: true },
118
+ { id: 'gpt-4o-mini', provider: 'openai', name: 'GPT-4o Mini', capabilities: ['text', 'vision', 'function-calling', 'streaming', 'structured-output'], inputCostPer1k: 0.00015, outputCostPer1k: 0.0006, contextWindow: 128000, isActive: true },
119
+ { id: 'claude-3.5-sonnet', provider: 'anthropic', name: 'Claude 3.5 Sonnet', capabilities: ['text', 'vision', 'function-calling', 'streaming'], inputCostPer1k: 0.003, outputCostPer1k: 0.015, contextWindow: 200000, isActive: true },
120
+ { id: 'claude-3-haiku', provider: 'anthropic', name: 'Claude 3 Haiku', capabilities: ['text', 'vision', 'streaming'], inputCostPer1k: 0.00025, outputCostPer1k: 0.00125, contextWindow: 200000, isActive: true },
121
+ { id: 'gemini-pro', provider: 'google', name: 'Gemini Pro', capabilities: ['text', 'vision', 'streaming'], inputCostPer1k: 0.00125, outputCostPer1k: 0.005, contextWindow: 2000000, isActive: true },
122
+ { id: 'gemini-flash', provider: 'google', name: 'Gemini Flash', capabilities: ['text', 'vision', 'streaming'], inputCostPer1k: 0.000075, outputCostPer1k: 0.0003, contextWindow: 1000000, isActive: true },
123
+ { id: 'meta/llama-3.1-70b-instruct', provider: 'nvidia', name: 'Llama 3.1 70B', capabilities: ['text', 'streaming'], inputCostPer1k: 0.00035, outputCostPer1k: 0.0004, contextWindow: 128000, isActive: true },
124
+ { id: 'llama-3.1-70b-versatile', provider: 'groq', name: 'Llama 3.1 70B (Groq)', capabilities: ['text', 'streaming'], inputCostPer1k: 0.00059, outputCostPer1k: 0.00079, contextWindow: 128000, isActive: true },
125
+
126
+ // --- Image models ---
127
+ { id: 'dall-e-3', provider: 'openai', name: 'DALL-E 3', capabilities: ['image-generation'], inputCostPer1k: 0, outputCostPer1k: 0, costPerCall: 0.04, isActive: true },
128
+ { id: 'stabilityai/stable-diffusion-xl-base-1.0', provider: 'nvidia', name: 'SDXL (NVIDIA)', capabilities: ['image-generation'], inputCostPer1k: 0, outputCostPer1k: 0, costPerCall: 0.002, isActive: true },
129
+ { id: 'stability-ai/sdxl', provider: 'replicate', name: 'SDXL (Replicate)', capabilities: ['image-generation'], inputCostPer1k: 0, outputCostPer1k: 0, costPerCall: 0.003, isActive: true },
130
+ { id: 'black-forest-labs/flux-1.1-pro', provider: 'replicate', name: 'Flux 1.1 Pro', capabilities: ['image-generation'], inputCostPer1k: 0, outputCostPer1k: 0, costPerCall: 0.04, isActive: true },
131
+ { id: 'fal-ai/flux/dev', provider: 'fal', name: 'Flux Dev (Fal)', capabilities: ['image-generation'], inputCostPer1k: 0, outputCostPer1k: 0, costPerCall: 0.025, isActive: true },
132
+
133
+ // --- Audio / TTS models ---
134
+ { id: 'tts-1', provider: 'openai', name: 'OpenAI TTS', capabilities: ['text-to-speech'], inputCostPer1k: 0.015, outputCostPer1k: 0, isActive: true },
135
+ { id: 'tts-1-hd', provider: 'openai', name: 'OpenAI TTS HD', capabilities: ['text-to-speech'], inputCostPer1k: 0.030, outputCostPer1k: 0, isActive: true },
136
+ { id: 'eleven_multilingual_v2', provider: 'elevenlabs', name: 'ElevenLabs Multilingual v2', capabilities: ['text-to-speech', 'voice-cloning'], inputCostPer1k: 0, outputCostPer1k: 0, costPerCall: 0.0003, isActive: true },
137
+ { id: 'eleven_turbo_v2_5', provider: 'elevenlabs', name: 'ElevenLabs Turbo v2.5', capabilities: ['text-to-speech'], inputCostPer1k: 0, outputCostPer1k: 0, costPerCall: 0.0002, isActive: true },
138
+
139
+ // --- Transcription / ASR models ---
140
+ { id: 'whisper-1', provider: 'openai', name: 'Whisper', capabilities: ['speech-to-text'], inputCostPer1k: 0.006, outputCostPer1k: 0, isActive: true },
141
+ { id: 'whisper-large-v3', provider: 'groq', name: 'Whisper v3 (Groq)', capabilities: ['speech-to-text'], inputCostPer1k: 0.001, outputCostPer1k: 0, isActive: true },
142
+
143
+ // --- Video models ---
144
+ { id: 'minimax/video-01', provider: 'replicate', name: 'MiniMax Video', capabilities: ['video-generation'], inputCostPer1k: 0, outputCostPer1k: 0, costPerCall: 0.25, isActive: true },
145
+ { id: 'fal-ai/kling-video/v2/master', provider: 'fal', name: 'Kling Video v2', capabilities: ['video-generation'], inputCostPer1k: 0, outputCostPer1k: 0, costPerCall: 0.10, isActive: true },
146
+
147
+ // --- Embedding models ---
148
+ { id: 'text-embedding-3-small', provider: 'openai', name: 'Embedding Small', capabilities: ['embedding'], inputCostPer1k: 0.00002, outputCostPer1k: 0, isActive: true },
149
+ { id: 'text-embedding-3-large', provider: 'openai', name: 'Embedding Large', capabilities: ['embedding'], inputCostPer1k: 0.00013, outputCostPer1k: 0, isActive: true },
150
+
151
+ // --- Music models ---
152
+ { id: 'meta/musicgen', provider: 'replicate', name: 'MusicGen', capabilities: ['music-generation'], inputCostPer1k: 0, outputCostPer1k: 0, costPerCall: 0.02, isActive: true },
153
+ { id: 'fal-ai/stable-audio', provider: 'fal', name: 'Stable Audio', capabilities: ['music-generation', 'sound-effects'], inputCostPer1k: 0, outputCostPer1k: 0, costPerCall: 0.015, isActive: true },
154
+ ]
155
+
156
+ // ============================================================================
157
+ // Default Voices
158
+ // ============================================================================
159
+
160
+ export const DEFAULT_VOICES = [
161
+ // OpenAI voices
162
+ { id: 'alloy', name: 'Alloy', provider: 'openai' as const, gender: 'neutral' as const },
163
+ { id: 'echo', name: 'Echo', provider: 'openai' as const, gender: 'male' as const },
164
+ { id: 'fable', name: 'Fable', provider: 'openai' as const, gender: 'male' as const },
165
+ { id: 'onyx', name: 'Onyx', provider: 'openai' as const, gender: 'male' as const },
166
+ { id: 'nova', name: 'Nova', provider: 'openai' as const, gender: 'female' as const },
167
+ { id: 'shimmer', name: 'Shimmer', provider: 'openai' as const, gender: 'female' as const },
168
+ ]
169
+
170
+ // ============================================================================
171
+ // defineAIConfig
172
+ // ============================================================================
173
+
174
+ export function defineAIConfig(overrides: Partial<AIConfig> = {}): AIConfig {
175
+ return {
176
+ defaultProvider: overrides.defaultProvider ?? 'openai',
177
+ defaultModel: overrides.defaultModel ?? 'gpt-4o',
178
+ providers: overrides.providers ?? DEFAULT_PROVIDERS,
179
+ models: overrides.models ?? DEFAULT_MODELS,
180
+ logging: {
181
+ enabled: true,
182
+ maxPromptLength: 2000,
183
+ maxResponseLength: 8000,
184
+ ...overrides.logging,
185
+ },
186
+ retries: {
187
+ maxAttempts: 3,
188
+ retryableStatusCodes: [429, 500, 502, 503, 504],
189
+ backoffMultiplierMs: 1000,
190
+ ...overrides.retries,
191
+ },
192
+ generationDefaults: {
193
+ image: 'dall-e-3',
194
+ audio: 'tts-1',
195
+ video: 'minimax/video-01',
196
+ embedding: 'text-embedding-3-small',
197
+ transcription: 'whisper-1',
198
+ music: 'meta/musicgen',
199
+ ...overrides.generationDefaults,
200
+ },
201
+ }
202
+ }
203
+
204
+ export function mergeAIConfig(base: AIConfig, overrides: Partial<AIConfig>): AIConfig {
205
+ return {
206
+ ...base,
207
+ ...overrides,
208
+ providers: overrides.providers ?? base.providers,
209
+ models: overrides.models ?? base.models,
210
+ logging: { ...base.logging, ...overrides.logging },
211
+ retries: { ...base.retries, ...overrides.retries },
212
+ generationDefaults: { ...base.generationDefaults, ...overrides.generationDefaults },
213
+ }
214
+ }
215
+
216
+ export function findProviderConfig(config: AIConfig, type: AIProviderType): AIProviderConfig | undefined {
217
+ return config.providers.find(p => p.type === type)
218
+ }
219
+
220
+ export function findModelConfig(config: AIConfig, modelId: string): AIModel | undefined {
221
+ return config.models.find(m => m.id === modelId)
222
+ }
223
+
224
+ export function getProviderForModel(config: AIConfig, modelId: string): AIProviderConfig | undefined {
225
+ const model = findModelConfig(config, modelId)
226
+ if (!model) return undefined
227
+ return findProviderConfig(config, model.provider)
228
+ }
229
+
230
+ /**
231
+ * Get all models that support a given capability.
232
+ */
233
+ export function getModelsByCapability(config: AIConfig, capability: string): AIModel[] {
234
+ return config.models.filter(m => m.isActive && m.capabilities.includes(capability as any))
235
+ }