@geenius/ai 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.changeset/config.json +11 -0
- package/.env.example +2 -0
- package/.github/CODEOWNERS +1 -0
- package/.github/ISSUE_TEMPLATE/bug_report.md +16 -0
- package/.github/ISSUE_TEMPLATE/feature_request.md +11 -0
- package/.github/PULL_REQUEST_TEMPLATE.md +10 -0
- package/.github/dependabot.yml +11 -0
- package/.github/workflows/ci.yml +23 -0
- package/.github/workflows/release.yml +29 -0
- package/.node-version +1 -0
- package/.nvmrc +1 -0
- package/.prettierrc +7 -0
- package/.project/ACCOUNT.yaml +4 -0
- package/.project/IDEAS.yaml +7 -0
- package/.project/PROJECT.yaml +11 -0
- package/.project/ROADMAP.yaml +15 -0
- package/CHANGELOG.md +15 -0
- package/CODE_OF_CONDUCT.md +26 -0
- package/CONTRIBUTING.md +61 -0
- package/LICENSE +21 -0
- package/README.md +1 -0
- package/SECURITY.md +18 -0
- package/SUPPORT.md +14 -0
- package/package.json +75 -0
- package/packages/convex/package.json +42 -0
- package/packages/convex/src/index.ts +8 -0
- package/packages/convex/src/mutations/messages.ts +29 -0
- package/packages/convex/src/queries/messages.ts +24 -0
- package/packages/convex/src/schema.ts +20 -0
- package/packages/convex/tsconfig.json +11 -0
- package/packages/convex/tsup.config.ts +17 -0
- package/packages/react/README.md +1 -0
- package/packages/react/package.json +60 -0
- package/packages/react/src/components/AILogTable.tsx +90 -0
- package/packages/react/src/components/ChatWindow.tsx +118 -0
- package/packages/react/src/components/GenerationCard.tsx +73 -0
- package/packages/react/src/components/ImageGenerator.tsx +103 -0
- package/packages/react/src/components/ModelSelector.tsx +44 -0
- package/packages/react/src/components/ModelTestRunner.tsx +148 -0
- package/packages/react/src/components/VoiceSelector.tsx +51 -0
- package/packages/react/src/components/index.ts +9 -0
- package/packages/react/src/hooks/index.ts +12 -0
- package/packages/react/src/hooks/useAI.ts +158 -0
- package/packages/react/src/hooks/useAILogs.ts +40 -0
- package/packages/react/src/hooks/useAIModels.ts +53 -0
- package/packages/react/src/hooks/useChat.ts +141 -0
- package/packages/react/src/hooks/useContentManager.ts +108 -0
- package/packages/react/src/hooks/useImageGeneration.ts +82 -0
- package/packages/react/src/hooks/useMemory.ts +161 -0
- package/packages/react/src/hooks/useModelTest.ts +126 -0
- package/packages/react/src/hooks/useRealtimeAudio.ts +203 -0
- package/packages/react/src/hooks/useSkills.ts +114 -0
- package/packages/react/src/hooks/useTextToSpeech.ts +99 -0
- package/packages/react/src/hooks/useTranscription.ts +119 -0
- package/packages/react/src/hooks/useVideoGeneration.ts +79 -0
- package/packages/react/src/index.ts +42 -0
- package/packages/react/src/pages/AILogsPage.tsx +98 -0
- package/packages/react/src/pages/ChatPage.tsx +42 -0
- package/packages/react/src/pages/ModelTestPage.tsx +33 -0
- package/packages/react/src/pages/index.ts +5 -0
- package/packages/react/tsconfig.json +26 -0
- package/packages/react/tsup.config.ts +22 -0
- package/packages/react-css/README.md +1 -0
- package/packages/react-css/package.json +45 -0
- package/packages/react-css/src/ai.css +857 -0
- package/packages/react-css/src/components/AILogTable.tsx +90 -0
- package/packages/react-css/src/components/ChatWindow.tsx +118 -0
- package/packages/react-css/src/components/GenerationCard.tsx +73 -0
- package/packages/react-css/src/components/ImageGenerator.tsx +103 -0
- package/packages/react-css/src/components/ModelSelector.tsx +44 -0
- package/packages/react-css/src/components/ModelTestRunner.tsx +148 -0
- package/packages/react-css/src/components/VoiceSelector.tsx +51 -0
- package/packages/react-css/src/components/index.ts +9 -0
- package/packages/react-css/src/hooks/index.ts +12 -0
- package/packages/react-css/src/hooks/useAI.ts +153 -0
- package/packages/react-css/src/hooks/useAILogs.ts +40 -0
- package/packages/react-css/src/hooks/useAIModels.ts +51 -0
- package/packages/react-css/src/hooks/useChat.ts +145 -0
- package/packages/react-css/src/hooks/useContentManager.ts +108 -0
- package/packages/react-css/src/hooks/useImageGeneration.ts +82 -0
- package/packages/react-css/src/hooks/useMemory.ts +161 -0
- package/packages/react-css/src/hooks/useModelTest.ts +122 -0
- package/packages/react-css/src/hooks/useRealtimeAudio.ts +203 -0
- package/packages/react-css/src/hooks/useSkills.ts +114 -0
- package/packages/react-css/src/hooks/useTextToSpeech.ts +99 -0
- package/packages/react-css/src/hooks/useTranscription.ts +119 -0
- package/packages/react-css/src/hooks/useVideoGeneration.ts +79 -0
- package/packages/react-css/src/index.ts +35 -0
- package/packages/react-css/src/pages/AILogsPage.tsx +98 -0
- package/packages/react-css/src/pages/ChatPage.tsx +42 -0
- package/packages/react-css/src/pages/ModelTestPage.tsx +33 -0
- package/packages/react-css/src/pages/index.ts +5 -0
- package/packages/react-css/src/styles.css +127 -0
- package/packages/react-css/tsconfig.json +26 -0
- package/packages/react-css/tsup.config.ts +2 -0
- package/packages/shared/README.md +1 -0
- package/packages/shared/package.json +71 -0
- package/packages/shared/src/__tests__/ai.test.ts +67 -0
- package/packages/shared/src/ai-client.ts +243 -0
- package/packages/shared/src/config.ts +235 -0
- package/packages/shared/src/content.ts +249 -0
- package/packages/shared/src/convex/helpers.ts +163 -0
- package/packages/shared/src/convex/index.ts +16 -0
- package/packages/shared/src/convex/schemas.ts +146 -0
- package/packages/shared/src/convex/validators.ts +136 -0
- package/packages/shared/src/index.ts +107 -0
- package/packages/shared/src/memory.ts +197 -0
- package/packages/shared/src/providers/base.ts +103 -0
- package/packages/shared/src/providers/elevenlabs.ts +155 -0
- package/packages/shared/src/providers/index.ts +28 -0
- package/packages/shared/src/providers/openai-compatible.ts +286 -0
- package/packages/shared/src/providers/registry.ts +113 -0
- package/packages/shared/src/providers/replicate-fal.ts +230 -0
- package/packages/shared/src/skills.ts +273 -0
- package/packages/shared/src/types.ts +501 -0
- package/packages/shared/tsconfig.json +25 -0
- package/packages/shared/tsup.config.ts +22 -0
- package/packages/shared/vitest.config.ts +4 -0
- package/packages/solidjs/README.md +1 -0
- package/packages/solidjs/package.json +59 -0
- package/packages/solidjs/src/components/ChatWindow.tsx +78 -0
- package/packages/solidjs/src/components/GenerationCard.tsx +62 -0
- package/packages/solidjs/src/components/ModelTestRunner.tsx +119 -0
- package/packages/solidjs/src/components/index.ts +5 -0
- package/packages/solidjs/src/index.ts +32 -0
- package/packages/solidjs/src/pages/ChatPage.tsx +22 -0
- package/packages/solidjs/src/pages/ModelTestPage.tsx +22 -0
- package/packages/solidjs/src/pages/index.ts +4 -0
- package/packages/solidjs/src/primitives/createAI.ts +79 -0
- package/packages/solidjs/src/primitives/createChat.ts +100 -0
- package/packages/solidjs/src/primitives/createContentManager.ts +61 -0
- package/packages/solidjs/src/primitives/createImageGeneration.ts +46 -0
- package/packages/solidjs/src/primitives/createMemory.ts +127 -0
- package/packages/solidjs/src/primitives/createModelTest.ts +89 -0
- package/packages/solidjs/src/primitives/createSkills.ts +83 -0
- package/packages/solidjs/src/primitives/createTextToSpeech.ts +56 -0
- package/packages/solidjs/src/primitives/createVideoGeneration.ts +46 -0
- package/packages/solidjs/src/primitives/index.ts +8 -0
- package/packages/solidjs/tsconfig.json +27 -0
- package/packages/solidjs/tsup.config.ts +21 -0
- package/packages/solidjs-css/README.md +1 -0
- package/packages/solidjs-css/package.json +44 -0
- package/packages/solidjs-css/src/ai.css +857 -0
- package/packages/solidjs-css/src/components/ChatWindow.tsx +78 -0
- package/packages/solidjs-css/src/components/GenerationCard.tsx +62 -0
- package/packages/solidjs-css/src/components/ModelTestRunner.tsx +119 -0
- package/packages/solidjs-css/src/components/index.ts +5 -0
- package/packages/solidjs-css/src/index.ts +26 -0
- package/packages/solidjs-css/src/pages/ChatPage.tsx +22 -0
- package/packages/solidjs-css/src/pages/ModelTestPage.tsx +22 -0
- package/packages/solidjs-css/src/pages/index.ts +4 -0
- package/packages/solidjs-css/src/primitives/createAI.ts +79 -0
- package/packages/solidjs-css/src/primitives/createChat.ts +100 -0
- package/packages/solidjs-css/src/primitives/createContentManager.ts +61 -0
- package/packages/solidjs-css/src/primitives/createImageGeneration.ts +46 -0
- package/packages/solidjs-css/src/primitives/createMemory.ts +127 -0
- package/packages/solidjs-css/src/primitives/createModelTest.ts +89 -0
- package/packages/solidjs-css/src/primitives/createSkills.ts +83 -0
- package/packages/solidjs-css/src/primitives/createTextToSpeech.ts +56 -0
- package/packages/solidjs-css/src/primitives/createVideoGeneration.ts +46 -0
- package/packages/solidjs-css/src/primitives/index.ts +1 -0
- package/packages/solidjs-css/src/styles.css +127 -0
- package/packages/solidjs-css/tsconfig.json +27 -0
- package/packages/solidjs-css/tsup.config.ts +2 -0
- package/pnpm-workspace.yaml +2 -0
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
// @geenius-ai/shared — src/providers/index.ts
|
|
2
|
+
|
|
3
|
+
export type { AIProviderInterface } from './base'
|
|
4
|
+
export { extractPromptText } from './base'
|
|
5
|
+
|
|
6
|
+
export {
|
|
7
|
+
OpenAICompatibleProvider,
|
|
8
|
+
type OpenAICompatibleConfig,
|
|
9
|
+
} from './openai-compatible'
|
|
10
|
+
|
|
11
|
+
export {
|
|
12
|
+
ElevenLabsProvider,
|
|
13
|
+
type ElevenLabsConfig,
|
|
14
|
+
} from './elevenlabs'
|
|
15
|
+
|
|
16
|
+
export {
|
|
17
|
+
ReplicateProvider,
|
|
18
|
+
type ReplicateConfig,
|
|
19
|
+
FalProvider,
|
|
20
|
+
type FalConfig,
|
|
21
|
+
} from './replicate-fal'
|
|
22
|
+
|
|
23
|
+
export {
|
|
24
|
+
createProvider,
|
|
25
|
+
resolveProviderForModel,
|
|
26
|
+
generateTextWithRetries,
|
|
27
|
+
clearProviderCache,
|
|
28
|
+
} from './registry'
|
|
@@ -0,0 +1,286 @@
|
|
|
1
|
+
// @geenius-ai/shared — src/providers/openai-compatible.ts
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* OpenAI-compatible provider — works with OpenAI, NVIDIA NIM, Groq,
|
|
5
|
+
* Together, Ollama, and any API that follows the OpenAI chat completions spec.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import type {
|
|
9
|
+
AIProviderType,
|
|
10
|
+
AIGenerationResult,
|
|
11
|
+
AIEmbeddingResult,
|
|
12
|
+
AIGenerateTextOptions,
|
|
13
|
+
AIStructuredOutputOptions,
|
|
14
|
+
AIGenerateImageOptions,
|
|
15
|
+
AIEditImageOptions,
|
|
16
|
+
AIGenerateAudioOptions,
|
|
17
|
+
AITranscribeOptions,
|
|
18
|
+
AIGenerateVideoOptions,
|
|
19
|
+
AIEmbeddingOptions,
|
|
20
|
+
} from '../types'
|
|
21
|
+
import type { AIProviderInterface } from './base'
|
|
22
|
+
import { extractPromptText } from './base'
|
|
23
|
+
|
|
24
|
+
export interface OpenAICompatibleConfig {
|
|
25
|
+
type: AIProviderType
|
|
26
|
+
name: string
|
|
27
|
+
baseUrl: string
|
|
28
|
+
apiKey: string
|
|
29
|
+
defaultModel?: string
|
|
30
|
+
headers?: Record<string, string>
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
export class OpenAICompatibleProvider implements AIProviderInterface {
|
|
34
|
+
type: AIProviderType
|
|
35
|
+
name: string
|
|
36
|
+
private baseUrl: string
|
|
37
|
+
private apiKey: string
|
|
38
|
+
private defaultModel: string
|
|
39
|
+
private extraHeaders: Record<string, string>
|
|
40
|
+
|
|
41
|
+
constructor(config: OpenAICompatibleConfig) {
|
|
42
|
+
this.type = config.type
|
|
43
|
+
this.name = config.name
|
|
44
|
+
this.baseUrl = config.baseUrl.replace(/\/$/, '')
|
|
45
|
+
this.apiKey = config.apiKey
|
|
46
|
+
this.defaultModel = config.defaultModel ?? 'gpt-4o'
|
|
47
|
+
this.extraHeaders = config.headers ?? {}
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
async generateText(options: AIGenerateTextOptions): Promise<AIGenerationResult> {
|
|
51
|
+
const model = options.model || this.defaultModel
|
|
52
|
+
const startTime = Date.now()
|
|
53
|
+
|
|
54
|
+
const body: Record<string, unknown> = {
|
|
55
|
+
model,
|
|
56
|
+
messages: options.messages,
|
|
57
|
+
temperature: options.temperature ?? 0.7,
|
|
58
|
+
max_tokens: options.maxTokens ?? 2048,
|
|
59
|
+
top_p: options.topP ?? 0.9,
|
|
60
|
+
stream: false,
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
if (options.responseFormat) {
|
|
64
|
+
body.response_format = options.responseFormat
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
const response = await fetch(`${this.baseUrl}/chat/completions`, {
|
|
68
|
+
method: 'POST',
|
|
69
|
+
headers: {
|
|
70
|
+
'Content-Type': 'application/json',
|
|
71
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
72
|
+
...this.extraHeaders,
|
|
73
|
+
},
|
|
74
|
+
body: JSON.stringify(body),
|
|
75
|
+
})
|
|
76
|
+
|
|
77
|
+
if (!response.ok) {
|
|
78
|
+
const errorText = await response.text()
|
|
79
|
+
throw new Error(`${this.name} API error (${response.status}): ${errorText}`)
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
const data = await response.json() as any
|
|
83
|
+
const durationMs = Date.now() - startTime
|
|
84
|
+
|
|
85
|
+
return {
|
|
86
|
+
content: data.choices?.[0]?.message?.content ?? '',
|
|
87
|
+
model,
|
|
88
|
+
provider: this.type,
|
|
89
|
+
type: 'text',
|
|
90
|
+
tokens: {
|
|
91
|
+
prompt: data.usage?.prompt_tokens ?? 0,
|
|
92
|
+
completion: data.usage?.completion_tokens ?? 0,
|
|
93
|
+
total: data.usage?.total_tokens ?? 0,
|
|
94
|
+
},
|
|
95
|
+
durationMs,
|
|
96
|
+
finishReason: data.choices?.[0]?.finish_reason,
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
async generateImage(options: AIGenerateImageOptions): Promise<string> {
|
|
101
|
+
const response = await fetch(`${this.baseUrl}/images/generations`, {
|
|
102
|
+
method: 'POST',
|
|
103
|
+
headers: {
|
|
104
|
+
'Content-Type': 'application/json',
|
|
105
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
106
|
+
'Accept': 'application/json',
|
|
107
|
+
...this.extraHeaders,
|
|
108
|
+
},
|
|
109
|
+
body: JSON.stringify({
|
|
110
|
+
model: options.model ?? 'dall-e-3',
|
|
111
|
+
prompt: options.prompt.substring(0, 4000),
|
|
112
|
+
n: options.n ?? 1,
|
|
113
|
+
response_format: options.responseFormat ?? 'b64_json',
|
|
114
|
+
size: options.size ?? '1024x1024',
|
|
115
|
+
}),
|
|
116
|
+
})
|
|
117
|
+
|
|
118
|
+
if (!response.ok) {
|
|
119
|
+
const errorText = await response.text()
|
|
120
|
+
throw new Error(`${this.name} Image API error (${response.status}): ${errorText}`)
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
const data = await response.json() as any
|
|
124
|
+
return data.data?.[0]?.b64_json ?? data.data?.[0]?.url ?? ''
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
async generateAudio(options: AIGenerateAudioOptions): Promise<string> {
|
|
128
|
+
const response = await fetch(`${this.baseUrl}/audio/speech`, {
|
|
129
|
+
method: 'POST',
|
|
130
|
+
headers: {
|
|
131
|
+
'Content-Type': 'application/json',
|
|
132
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
133
|
+
...this.extraHeaders,
|
|
134
|
+
},
|
|
135
|
+
body: JSON.stringify({
|
|
136
|
+
model: options.model ?? 'tts-1',
|
|
137
|
+
input: options.prompt,
|
|
138
|
+
voice: options.voice ?? 'alloy',
|
|
139
|
+
speed: options.speed ?? 1.0,
|
|
140
|
+
response_format: options.responseFormat ?? 'mp3',
|
|
141
|
+
}),
|
|
142
|
+
})
|
|
143
|
+
|
|
144
|
+
if (!response.ok) {
|
|
145
|
+
const errorText = await response.text()
|
|
146
|
+
throw new Error(`${this.name} Audio API error (${response.status}): ${errorText}`)
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
const arrayBuffer = await response.arrayBuffer()
|
|
150
|
+
const bytes = new Uint8Array(arrayBuffer)
|
|
151
|
+
let binary = ''
|
|
152
|
+
for (let i = 0; i < bytes.length; i++) {
|
|
153
|
+
binary += String.fromCharCode(bytes[i]!)
|
|
154
|
+
}
|
|
155
|
+
return btoa(binary)
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
async transcribeAudio(options: AITranscribeOptions): Promise<string> {
|
|
159
|
+
// Convert base64 to blob for multipart upload
|
|
160
|
+
const binaryStr = atob(options.audio)
|
|
161
|
+
const bytes = new Uint8Array(binaryStr.length)
|
|
162
|
+
for (let i = 0; i < binaryStr.length; i++) {
|
|
163
|
+
bytes[i] = binaryStr.charCodeAt(i)
|
|
164
|
+
}
|
|
165
|
+
const blob = new Blob([bytes], { type: 'audio/mp3' })
|
|
166
|
+
|
|
167
|
+
const formData = new FormData()
|
|
168
|
+
formData.append('file', blob, 'audio.mp3')
|
|
169
|
+
formData.append('model', options.model ?? 'whisper-1')
|
|
170
|
+
if (options.language) formData.append('language', options.language)
|
|
171
|
+
|
|
172
|
+
const response = await fetch(`${this.baseUrl}/audio/transcriptions`, {
|
|
173
|
+
method: 'POST',
|
|
174
|
+
headers: {
|
|
175
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
176
|
+
...this.extraHeaders,
|
|
177
|
+
},
|
|
178
|
+
body: formData,
|
|
179
|
+
})
|
|
180
|
+
|
|
181
|
+
if (!response.ok) {
|
|
182
|
+
const errorText = await response.text()
|
|
183
|
+
throw new Error(`${this.name} Transcription API error (${response.status}): ${errorText}`)
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
const data = await response.json() as any
|
|
187
|
+
return data.text ?? ''
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
async generateVideo(_options: AIGenerateVideoOptions): Promise<string> {
|
|
191
|
+
throw new Error(`${this.name}: Video generation not supported via OpenAI-compatible API. Use ReplicateProvider or FalProvider.`)
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
async generateEmbeddings(options: AIEmbeddingOptions): Promise<AIEmbeddingResult> {
|
|
195
|
+
const startTime = Date.now()
|
|
196
|
+
const response = await fetch(`${this.baseUrl}/embeddings`, {
|
|
197
|
+
method: 'POST',
|
|
198
|
+
headers: {
|
|
199
|
+
'Content-Type': 'application/json',
|
|
200
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
201
|
+
...this.extraHeaders,
|
|
202
|
+
},
|
|
203
|
+
body: JSON.stringify({
|
|
204
|
+
model: options.model ?? 'text-embedding-3-small',
|
|
205
|
+
input: options.input,
|
|
206
|
+
dimensions: options.dimensions,
|
|
207
|
+
encoding_format: options.encodingFormat ?? 'float',
|
|
208
|
+
}),
|
|
209
|
+
})
|
|
210
|
+
|
|
211
|
+
if (!response.ok) {
|
|
212
|
+
const errorText = await response.text()
|
|
213
|
+
throw new Error(`${this.name} Embeddings API error (${response.status}): ${errorText}`)
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
const data = await response.json() as any
|
|
217
|
+
return {
|
|
218
|
+
embeddings: (data.data ?? []).map((d: any) => d.embedding),
|
|
219
|
+
model: options.model ?? 'text-embedding-3-small',
|
|
220
|
+
provider: this.type,
|
|
221
|
+
tokens: data.usage?.total_tokens ?? 0,
|
|
222
|
+
durationMs: Date.now() - startTime,
|
|
223
|
+
}
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
async generateStructuredOutput(options: AIStructuredOutputOptions): Promise<AIGenerationResult> {
|
|
227
|
+
return this.generateText({
|
|
228
|
+
model: options.model,
|
|
229
|
+
messages: options.messages,
|
|
230
|
+
temperature: options.temperature,
|
|
231
|
+
maxTokens: options.maxTokens,
|
|
232
|
+
caller: options.caller,
|
|
233
|
+
responseFormat: {
|
|
234
|
+
type: 'json_schema',
|
|
235
|
+
json_schema: {
|
|
236
|
+
name: options.schemaName ?? 'response',
|
|
237
|
+
strict: true,
|
|
238
|
+
schema: options.schema,
|
|
239
|
+
},
|
|
240
|
+
},
|
|
241
|
+
})
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
async editImage(options: AIEditImageOptions): Promise<string> {
|
|
245
|
+
const formData = new FormData()
|
|
246
|
+
|
|
247
|
+
// Convert base64 to blob
|
|
248
|
+
const imageBinary = atob(options.image)
|
|
249
|
+
const imageBytes = new Uint8Array(imageBinary.length)
|
|
250
|
+
for (let i = 0; i < imageBinary.length; i++) {
|
|
251
|
+
imageBytes[i] = imageBinary.charCodeAt(i)
|
|
252
|
+
}
|
|
253
|
+
formData.append('image', new Blob([imageBytes], { type: 'image/png' }), 'image.png')
|
|
254
|
+
|
|
255
|
+
if (options.maskImage) {
|
|
256
|
+
const maskBinary = atob(options.maskImage)
|
|
257
|
+
const maskBytes = new Uint8Array(maskBinary.length)
|
|
258
|
+
for (let i = 0; i < maskBinary.length; i++) {
|
|
259
|
+
maskBytes[i] = maskBinary.charCodeAt(i)
|
|
260
|
+
}
|
|
261
|
+
formData.append('mask', new Blob([maskBytes], { type: 'image/png' }), 'mask.png')
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
formData.append('prompt', options.prompt)
|
|
265
|
+
formData.append('model', options.model ?? 'dall-e-2')
|
|
266
|
+
formData.append('n', String(options.n ?? 1))
|
|
267
|
+
if (options.size) formData.append('size', options.size)
|
|
268
|
+
|
|
269
|
+
const response = await fetch(`${this.baseUrl}/images/edits`, {
|
|
270
|
+
method: 'POST',
|
|
271
|
+
headers: {
|
|
272
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
273
|
+
...this.extraHeaders,
|
|
274
|
+
},
|
|
275
|
+
body: formData,
|
|
276
|
+
})
|
|
277
|
+
|
|
278
|
+
if (!response.ok) {
|
|
279
|
+
const errorText = await response.text()
|
|
280
|
+
throw new Error(`${this.name} Image Edit API error (${response.status}): ${errorText}`)
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
const data = await response.json() as any
|
|
284
|
+
return data.data?.[0]?.b64_json ?? data.data?.[0]?.url ?? ''
|
|
285
|
+
}
|
|
286
|
+
}
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
// @geenius-ai/shared — src/providers/registry.ts
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Provider registry — creates and caches provider instances.
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
import type { AIProviderConfig, AIProviderType, AIConfig, AIGenerationResult, AIGenerateTextOptions } from '../types'
|
|
8
|
+
import type { AIProviderInterface } from './base'
|
|
9
|
+
import { extractPromptText } from './base'
|
|
10
|
+
import { OpenAICompatibleProvider } from './openai-compatible'
|
|
11
|
+
|
|
12
|
+
const providerCache = new Map<string, AIProviderInterface>()
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* Create a provider instance from config.
|
|
16
|
+
* OpenAI, NVIDIA, Groq, Together, and Ollama all use OpenAI-compatible APIs.
|
|
17
|
+
*/
|
|
18
|
+
export function createProvider(config: AIProviderConfig, apiKey: string): AIProviderInterface {
|
|
19
|
+
const cacheKey = `${config.type}:${config.baseUrl}`
|
|
20
|
+
const cached = providerCache.get(cacheKey)
|
|
21
|
+
if (cached) return cached
|
|
22
|
+
|
|
23
|
+
const provider = new OpenAICompatibleProvider({
|
|
24
|
+
type: config.type,
|
|
25
|
+
name: config.name,
|
|
26
|
+
baseUrl: config.baseUrl,
|
|
27
|
+
apiKey,
|
|
28
|
+
defaultModel: config.defaultModel,
|
|
29
|
+
headers: config.headers,
|
|
30
|
+
})
|
|
31
|
+
|
|
32
|
+
providerCache.set(cacheKey, provider)
|
|
33
|
+
return provider
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
/**
|
|
37
|
+
* Resolve a provider for a given model from the AI config.
|
|
38
|
+
* Looks up the model's provider type, finds the config, and creates the provider.
|
|
39
|
+
*/
|
|
40
|
+
export function resolveProviderForModel(
|
|
41
|
+
aiConfig: AIConfig,
|
|
42
|
+
modelId: string,
|
|
43
|
+
getApiKey: (envVar: string) => string | undefined,
|
|
44
|
+
): AIProviderInterface {
|
|
45
|
+
const model = aiConfig.models.find(m => m.id === modelId)
|
|
46
|
+
const providerType = model?.provider ?? aiConfig.defaultProvider
|
|
47
|
+
const providerConfig = aiConfig.providers.find(p => p.type === providerType)
|
|
48
|
+
|
|
49
|
+
if (!providerConfig) {
|
|
50
|
+
throw new Error(`No provider config found for type "${providerType}"`)
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
const apiKey = getApiKey(providerConfig.apiKeyEnvVar)
|
|
54
|
+
if (!apiKey) {
|
|
55
|
+
throw new Error(`API key not found for ${providerConfig.name} (env: ${providerConfig.apiKeyEnvVar})`)
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
return createProvider(providerConfig, apiKey)
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
/**
|
|
62
|
+
* Generate text with automatic retries and logging support.
|
|
63
|
+
* This is the main entry point used by Convex actions.
|
|
64
|
+
*/
|
|
65
|
+
export async function generateTextWithRetries(
|
|
66
|
+
provider: AIProviderInterface,
|
|
67
|
+
options: AIGenerateTextOptions,
|
|
68
|
+
retryConfig: AIConfig['retries'],
|
|
69
|
+
): Promise<AIGenerationResult> {
|
|
70
|
+
let lastError: Error | null = null
|
|
71
|
+
const { maxAttempts, retryableStatusCodes, backoffMultiplierMs } = retryConfig
|
|
72
|
+
|
|
73
|
+
for (let attempt = 1; attempt <= maxAttempts; attempt++) {
|
|
74
|
+
try {
|
|
75
|
+
const result = await provider.generateText(options)
|
|
76
|
+
|
|
77
|
+
if (!result.content) {
|
|
78
|
+
throw new Error(`Empty response from ${provider.name} (finish: ${result.finishReason})`)
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
if (attempt > 1) {
|
|
82
|
+
console.log(`[geenius-ai] Succeeded on attempt ${attempt}/${maxAttempts}`)
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
return result
|
|
86
|
+
} catch (err) {
|
|
87
|
+
lastError = err instanceof Error ? err : new Error('Unknown error')
|
|
88
|
+
|
|
89
|
+
// Check if the error contains a retryable status code
|
|
90
|
+
const isRetryable = retryableStatusCodes.some(code =>
|
|
91
|
+
lastError!.message.includes(`(${code})`)
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
if (isRetryable && attempt < maxAttempts) {
|
|
95
|
+
const delay = Math.pow(2, attempt - 1) * backoffMultiplierMs
|
|
96
|
+
console.log(`[geenius-ai] Attempt ${attempt}/${maxAttempts} failed. Retrying in ${delay}ms...`)
|
|
97
|
+
await new Promise(resolve => setTimeout(resolve, delay))
|
|
98
|
+
continue
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
throw lastError
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
throw lastError || new Error('All retry attempts exhausted')
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
/**
|
|
109
|
+
* Clear the provider cache (useful for testing or config changes).
|
|
110
|
+
*/
|
|
111
|
+
export function clearProviderCache() {
|
|
112
|
+
providerCache.clear()
|
|
113
|
+
}
|
|
@@ -0,0 +1,230 @@
|
|
|
1
|
+
// @geenius-ai/shared — src/providers/replicate-fal.ts
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Replicate / Fal.ai provider — for video generation, music,
|
|
5
|
+
* and specialized models (Runway, Kling, Suno, etc.)
|
|
6
|
+
*
|
|
7
|
+
* Both use a prediction/submit-and-poll pattern.
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
import type {
|
|
11
|
+
AIProviderType,
|
|
12
|
+
AIGenerationResult,
|
|
13
|
+
AIGenerateTextOptions,
|
|
14
|
+
AIGenerateVideoOptions,
|
|
15
|
+
AIGenerateImageOptions,
|
|
16
|
+
AIGenerateMusicOptions,
|
|
17
|
+
} from '../types'
|
|
18
|
+
import type { AIProviderInterface } from './base'
|
|
19
|
+
|
|
20
|
+
// ============================================================================
|
|
21
|
+
// Replicate
|
|
22
|
+
// ============================================================================
|
|
23
|
+
|
|
24
|
+
export interface ReplicateConfig {
|
|
25
|
+
apiKey: string
|
|
26
|
+
baseUrl?: string
|
|
27
|
+
/** Max poll time in ms before timeout */
|
|
28
|
+
maxWaitMs?: number
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
export class ReplicateProvider implements AIProviderInterface {
|
|
32
|
+
type: AIProviderType = 'replicate'
|
|
33
|
+
name = 'Replicate'
|
|
34
|
+
private apiKey: string
|
|
35
|
+
private baseUrl: string
|
|
36
|
+
private maxWaitMs: number
|
|
37
|
+
|
|
38
|
+
constructor(config: ReplicateConfig) {
|
|
39
|
+
this.apiKey = config.apiKey
|
|
40
|
+
this.baseUrl = (config.baseUrl ?? 'https://api.replicate.com').replace(/\/$/, '')
|
|
41
|
+
this.maxWaitMs = config.maxWaitMs ?? 300_000 // 5 min default
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
async generateText(_options: AIGenerateTextOptions): Promise<AIGenerationResult> {
|
|
45
|
+
throw new Error('Use Replicate for image/video/music generation, not text.')
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
async generateImage(options: AIGenerateImageOptions): Promise<string> {
|
|
49
|
+
const model = options.model ?? 'stability-ai/sdxl'
|
|
50
|
+
return await this.runPrediction(model, {
|
|
51
|
+
prompt: options.prompt,
|
|
52
|
+
negative_prompt: options.negativePrompt,
|
|
53
|
+
width: parseInt(options.size?.split('x')[0] ?? '1024'),
|
|
54
|
+
height: parseInt(options.size?.split('x')[1] ?? '1024'),
|
|
55
|
+
num_outputs: options.n ?? 1,
|
|
56
|
+
seed: options.seed,
|
|
57
|
+
})
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
async generateVideo(options: AIGenerateVideoOptions): Promise<string> {
|
|
61
|
+
const model = options.model ?? 'minimax/video-01'
|
|
62
|
+
const input: Record<string, unknown> = {
|
|
63
|
+
prompt: options.prompt,
|
|
64
|
+
}
|
|
65
|
+
if (options.startImage) input.first_frame_image = options.startImage
|
|
66
|
+
if (options.endImage) input.last_frame_image = options.endImage
|
|
67
|
+
|
|
68
|
+
return await this.runPrediction(model, input)
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
async generateMusic(options: AIGenerateMusicOptions): Promise<string> {
|
|
72
|
+
const model = options.model ?? 'meta/musicgen'
|
|
73
|
+
return await this.runPrediction(model, {
|
|
74
|
+
prompt: options.prompt,
|
|
75
|
+
duration: options.duration ?? 10,
|
|
76
|
+
})
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
/** Core submit-and-poll logic */
|
|
80
|
+
private async runPrediction(model: string, input: Record<string, unknown>): Promise<string> {
|
|
81
|
+
// Submit prediction
|
|
82
|
+
const createResponse = await fetch(`${this.baseUrl}/v1/predictions`, {
|
|
83
|
+
method: 'POST',
|
|
84
|
+
headers: {
|
|
85
|
+
'Content-Type': 'application/json',
|
|
86
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
87
|
+
'Prefer': 'wait',
|
|
88
|
+
},
|
|
89
|
+
body: JSON.stringify({
|
|
90
|
+
model,
|
|
91
|
+
input,
|
|
92
|
+
}),
|
|
93
|
+
})
|
|
94
|
+
|
|
95
|
+
if (!createResponse.ok) {
|
|
96
|
+
const errorText = await createResponse.text()
|
|
97
|
+
throw new Error(`Replicate error (${createResponse.status}): ${errorText}`)
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
let prediction = await createResponse.json() as any
|
|
101
|
+
|
|
102
|
+
// Poll for completion if not already done
|
|
103
|
+
const startTime = Date.now()
|
|
104
|
+
while (prediction.status !== 'succeeded' && prediction.status !== 'failed') {
|
|
105
|
+
if (Date.now() - startTime > this.maxWaitMs) {
|
|
106
|
+
throw new Error(`Replicate prediction timed out after ${this.maxWaitMs}ms`)
|
|
107
|
+
}
|
|
108
|
+
await new Promise(r => setTimeout(r, 2000))
|
|
109
|
+
|
|
110
|
+
const pollResponse = await fetch(prediction.urls?.get ?? `${this.baseUrl}/v1/predictions/${prediction.id}`, {
|
|
111
|
+
headers: { 'Authorization': `Bearer ${this.apiKey}` },
|
|
112
|
+
})
|
|
113
|
+
prediction = await pollResponse.json()
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
if (prediction.status === 'failed') {
|
|
117
|
+
throw new Error(`Replicate prediction failed: ${prediction.error ?? 'Unknown error'}`)
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
// Output is typically a URL or array of URLs
|
|
121
|
+
const output = prediction.output
|
|
122
|
+
if (Array.isArray(output)) return output[0]
|
|
123
|
+
return typeof output === 'string' ? output : JSON.stringify(output)
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
// ============================================================================
|
|
128
|
+
// Fal.ai
|
|
129
|
+
// ============================================================================
|
|
130
|
+
|
|
131
|
+
export interface FalConfig {
|
|
132
|
+
apiKey: string
|
|
133
|
+
baseUrl?: string
|
|
134
|
+
maxWaitMs?: number
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
export class FalProvider implements AIProviderInterface {
|
|
138
|
+
type: AIProviderType = 'fal'
|
|
139
|
+
name = 'Fal.ai'
|
|
140
|
+
private apiKey: string
|
|
141
|
+
private baseUrl: string
|
|
142
|
+
private maxWaitMs: number
|
|
143
|
+
|
|
144
|
+
constructor(config: FalConfig) {
|
|
145
|
+
this.apiKey = config.apiKey
|
|
146
|
+
this.baseUrl = (config.baseUrl ?? 'https://queue.fal.run').replace(/\/$/, '')
|
|
147
|
+
this.maxWaitMs = config.maxWaitMs ?? 300_000
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
async generateText(_options: AIGenerateTextOptions): Promise<AIGenerationResult> {
|
|
151
|
+
throw new Error('Use Fal.ai for image/video generation, not text.')
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
async generateImage(options: AIGenerateImageOptions): Promise<string> {
|
|
155
|
+
const model = options.model ?? 'fal-ai/flux/dev'
|
|
156
|
+
return await this.runModel(model, {
|
|
157
|
+
prompt: options.prompt,
|
|
158
|
+
negative_prompt: options.negativePrompt,
|
|
159
|
+
image_size: options.size ?? '1024x1024',
|
|
160
|
+
num_images: options.n ?? 1,
|
|
161
|
+
seed: options.seed,
|
|
162
|
+
})
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
async generateVideo(options: AIGenerateVideoOptions): Promise<string> {
|
|
166
|
+
const model = options.model ?? 'fal-ai/kling-video/v2/master'
|
|
167
|
+
return await this.runModel(model, {
|
|
168
|
+
prompt: options.prompt,
|
|
169
|
+
duration: options.duration ? `${options.duration}` : '5',
|
|
170
|
+
aspect_ratio: options.aspectRatio ?? '16:9',
|
|
171
|
+
image_url: options.startImage,
|
|
172
|
+
})
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
/** Core queue-based submit-and-poll */
|
|
176
|
+
private async runModel(model: string, input: Record<string, unknown>): Promise<string> {
|
|
177
|
+
// Submit to queue
|
|
178
|
+
const submitResponse = await fetch(`${this.baseUrl}/${model}`, {
|
|
179
|
+
method: 'POST',
|
|
180
|
+
headers: {
|
|
181
|
+
'Content-Type': 'application/json',
|
|
182
|
+
'Authorization': `Key ${this.apiKey}`,
|
|
183
|
+
},
|
|
184
|
+
body: JSON.stringify(input),
|
|
185
|
+
})
|
|
186
|
+
|
|
187
|
+
if (!submitResponse.ok) {
|
|
188
|
+
const errorText = await submitResponse.text()
|
|
189
|
+
throw new Error(`Fal.ai error (${submitResponse.status}): ${errorText}`)
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
let result = await submitResponse.json() as any
|
|
193
|
+
|
|
194
|
+
// If queued, poll for result
|
|
195
|
+
if (result.request_id) {
|
|
196
|
+
const statusUrl = `https://queue.fal.run/${model}/requests/${result.request_id}/status`
|
|
197
|
+
const resultUrl = `https://queue.fal.run/${model}/requests/${result.request_id}`
|
|
198
|
+
const startTime = Date.now()
|
|
199
|
+
|
|
200
|
+
while (true) {
|
|
201
|
+
if (Date.now() - startTime > this.maxWaitMs) {
|
|
202
|
+
throw new Error(`Fal.ai timed out after ${this.maxWaitMs}ms`)
|
|
203
|
+
}
|
|
204
|
+
await new Promise(r => setTimeout(r, 2000))
|
|
205
|
+
|
|
206
|
+
const statusResponse = await fetch(statusUrl, {
|
|
207
|
+
headers: { 'Authorization': `Key ${this.apiKey}` },
|
|
208
|
+
})
|
|
209
|
+
const status = await statusResponse.json() as any
|
|
210
|
+
|
|
211
|
+
if (status.status === 'COMPLETED') {
|
|
212
|
+
const resultResponse = await fetch(resultUrl, {
|
|
213
|
+
headers: { 'Authorization': `Key ${this.apiKey}` },
|
|
214
|
+
})
|
|
215
|
+
result = await resultResponse.json()
|
|
216
|
+
break
|
|
217
|
+
}
|
|
218
|
+
if (status.status === 'FAILED') {
|
|
219
|
+
throw new Error(`Fal.ai failed: ${status.error ?? 'Unknown error'}`)
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
// Extract URL from response
|
|
225
|
+
if (result.images) return result.images[0]?.url ?? ''
|
|
226
|
+
if (result.video) return result.video?.url ?? ''
|
|
227
|
+
if (result.audio) return result.audio?.url ?? ''
|
|
228
|
+
return typeof result.output === 'string' ? result.output : JSON.stringify(result)
|
|
229
|
+
}
|
|
230
|
+
}
|