@geenius/adapters 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.changeset/config.json +11 -0
- package/.github/CODEOWNERS +1 -0
- package/.github/ISSUE_TEMPLATE/bug_report.md +16 -0
- package/.github/ISSUE_TEMPLATE/feature_request.md +11 -0
- package/.github/PULL_REQUEST_TEMPLATE.md +10 -0
- package/.github/dependabot.yml +11 -0
- package/.github/workflows/ci.yml +23 -0
- package/.github/workflows/release.yml +29 -0
- package/.nvmrc +1 -0
- package/.project/ACCOUNT.yaml +4 -0
- package/.project/IDEAS.yaml +7 -0
- package/.project/PROJECT.yaml +11 -0
- package/.project/ROADMAP.yaml +15 -0
- package/CHANGELOG.md +11 -0
- package/CODE_OF_CONDUCT.md +16 -0
- package/CONTRIBUTING.md +26 -0
- package/LICENSE +21 -0
- package/README.md +202 -0
- package/SECURITY.md +15 -0
- package/SUPPORT.md +8 -0
- package/package.json +51 -0
- package/packages/convex/README.md +64 -0
- package/packages/convex/package.json +42 -0
- package/packages/convex/src/adapter.ts +39 -0
- package/packages/convex/src/index.ts +19 -0
- package/packages/convex/src/mutations.ts +142 -0
- package/packages/convex/src/queries.ts +106 -0
- package/packages/convex/src/schema.ts +54 -0
- package/packages/convex/src/types.ts +20 -0
- package/packages/convex/tsconfig.json +11 -0
- package/packages/convex/tsup.config.ts +10 -0
- package/packages/react/README.md +1 -0
- package/packages/react/package.json +45 -0
- package/packages/react/src/components/AdapterCard.tsx +49 -0
- package/packages/react/src/components/AdapterConfigForm.tsx +118 -0
- package/packages/react/src/components/AdapterList.tsx +84 -0
- package/packages/react/src/components/AdapterStatusBadge.tsx +30 -0
- package/packages/react/src/components/index.ts +4 -0
- package/packages/react/src/hooks/index.ts +75 -0
- package/packages/react/src/index.tsx +44 -0
- package/packages/react/src/pages/AdapterDetailPage.tsx +133 -0
- package/packages/react/src/pages/AdaptersPage.tsx +111 -0
- package/packages/react/src/pages/index.ts +2 -0
- package/packages/react/src/provider/AdapterProvider.tsx +115 -0
- package/packages/react/src/provider/index.ts +2 -0
- package/packages/react/tsconfig.json +18 -0
- package/packages/react/tsup.config.ts +10 -0
- package/packages/react-css/README.md +1 -0
- package/packages/react-css/package.json +44 -0
- package/packages/react-css/src/adapters.css +1576 -0
- package/packages/react-css/src/components/AdapterCard.tsx +34 -0
- package/packages/react-css/src/components/AdapterConfigForm.tsx +63 -0
- package/packages/react-css/src/components/AdapterList.tsx +40 -0
- package/packages/react-css/src/components/AdapterStatusBadge.tsx +21 -0
- package/packages/react-css/src/components/index.ts +4 -0
- package/packages/react-css/src/hooks/index.ts +75 -0
- package/packages/react-css/src/index.tsx +25 -0
- package/packages/react-css/src/pages/AdapterDetailPage.tsx +133 -0
- package/packages/react-css/src/pages/AdaptersPage.tsx +111 -0
- package/packages/react-css/src/pages/index.ts +2 -0
- package/packages/react-css/src/provider/AdapterProvider.tsx +115 -0
- package/packages/react-css/src/provider/index.ts +2 -0
- package/packages/react-css/src/styles.css +494 -0
- package/packages/react-css/tsconfig.json +19 -0
- package/packages/react-css/tsup.config.ts +2 -0
- package/packages/shared/README.md +1 -0
- package/packages/shared/package.json +39 -0
- package/packages/shared/src/__tests__/adapters.test.ts +545 -0
- package/packages/shared/src/admin/index.ts +2 -0
- package/packages/shared/src/admin/interface.ts +34 -0
- package/packages/shared/src/admin/localStorage.ts +109 -0
- package/packages/shared/src/ai/anthropic.ts +123 -0
- package/packages/shared/src/ai/cloudflare-gateway.ts +130 -0
- package/packages/shared/src/ai/gemini.ts +181 -0
- package/packages/shared/src/ai/index.ts +14 -0
- package/packages/shared/src/ai/interface.ts +11 -0
- package/packages/shared/src/ai/localStorage.ts +78 -0
- package/packages/shared/src/ai/ollama.ts +143 -0
- package/packages/shared/src/ai/openai.ts +120 -0
- package/packages/shared/src/ai/vercel-ai.ts +101 -0
- package/packages/shared/src/auth/better-auth.ts +118 -0
- package/packages/shared/src/auth/clerk.ts +151 -0
- package/packages/shared/src/auth/convex-auth.ts +125 -0
- package/packages/shared/src/auth/index.ts +10 -0
- package/packages/shared/src/auth/interface.ts +17 -0
- package/packages/shared/src/auth/localStorage.ts +125 -0
- package/packages/shared/src/auth/supabase-auth.ts +136 -0
- package/packages/shared/src/config.ts +57 -0
- package/packages/shared/src/constants.ts +122 -0
- package/packages/shared/src/db/convex.ts +146 -0
- package/packages/shared/src/db/index.ts +10 -0
- package/packages/shared/src/db/interface.ts +13 -0
- package/packages/shared/src/db/localStorage.ts +91 -0
- package/packages/shared/src/db/mongodb.ts +125 -0
- package/packages/shared/src/db/neon.ts +171 -0
- package/packages/shared/src/db/supabase.ts +158 -0
- package/packages/shared/src/index.ts +117 -0
- package/packages/shared/src/payments/index.ts +4 -0
- package/packages/shared/src/payments/interface.ts +11 -0
- package/packages/shared/src/payments/localStorage.ts +81 -0
- package/packages/shared/src/payments/stripe.ts +177 -0
- package/packages/shared/src/storage/convex.ts +113 -0
- package/packages/shared/src/storage/index.ts +14 -0
- package/packages/shared/src/storage/interface.ts +11 -0
- package/packages/shared/src/storage/localStorage.ts +95 -0
- package/packages/shared/src/storage/minio.ts +47 -0
- package/packages/shared/src/storage/r2.ts +123 -0
- package/packages/shared/src/storage/s3.ts +128 -0
- package/packages/shared/src/storage/supabase-storage.ts +116 -0
- package/packages/shared/src/storage/uploadthing.ts +126 -0
- package/packages/shared/src/styles/adapters.css +494 -0
- package/packages/shared/src/tier-gate.ts +119 -0
- package/packages/shared/src/types.ts +162 -0
- package/packages/shared/tsconfig.json +18 -0
- package/packages/shared/tsup.config.ts +9 -0
- package/packages/shared/vitest.config.ts +14 -0
- package/packages/solidjs/README.md +1 -0
- package/packages/solidjs/package.json +44 -0
- package/packages/solidjs/src/components/AdapterCard.tsx +24 -0
- package/packages/solidjs/src/components/AdapterConfigForm.tsx +54 -0
- package/packages/solidjs/src/components/AdapterList.tsx +28 -0
- package/packages/solidjs/src/components/AdapterStatusBadge.tsx +20 -0
- package/packages/solidjs/src/components/index.ts +4 -0
- package/packages/solidjs/src/index.tsx +17 -0
- package/packages/solidjs/src/pages/AdapterDetailPage.tsx +38 -0
- package/packages/solidjs/src/pages/AdaptersPage.tsx +39 -0
- package/packages/solidjs/src/pages/index.ts +2 -0
- package/packages/solidjs/src/primitives/index.ts +78 -0
- package/packages/solidjs/src/provider/AdapterProvider.tsx +62 -0
- package/packages/solidjs/src/provider/index.ts +2 -0
- package/packages/solidjs/tsconfig.json +20 -0
- package/packages/solidjs/tsup.config.ts +10 -0
- package/packages/solidjs-css/README.md +1 -0
- package/packages/solidjs-css/package.json +43 -0
- package/packages/solidjs-css/src/adapters.css +1576 -0
- package/packages/solidjs-css/src/components/AdapterCard.tsx +43 -0
- package/packages/solidjs-css/src/components/AdapterConfigForm.tsx +119 -0
- package/packages/solidjs-css/src/components/AdapterList.tsx +68 -0
- package/packages/solidjs-css/src/components/AdapterStatusBadge.tsx +24 -0
- package/packages/solidjs-css/src/components/index.ts +8 -0
- package/packages/solidjs-css/src/index.tsx +30 -0
- package/packages/solidjs-css/src/pages/AdapterDetailPage.tsx +107 -0
- package/packages/solidjs-css/src/pages/AdaptersPage.tsx +94 -0
- package/packages/solidjs-css/src/pages/index.ts +4 -0
- package/packages/solidjs-css/src/primitives/index.ts +1 -0
- package/packages/solidjs-css/src/provider/AdapterProvider.tsx +61 -0
- package/packages/solidjs-css/src/provider/index.ts +2 -0
- package/packages/solidjs-css/tsconfig.json +20 -0
- package/packages/solidjs-css/tsup.config.ts +2 -0
- package/pnpm-workspace.yaml +2 -0
- package/tsconfig.json +17 -0
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
// @geenius/adapters — Anthropic implementation (MVP tier)
|
|
2
|
+
// Wraps the Anthropic API to conform to AiAdapter interface.
|
|
3
|
+
// Requires: @anthropic-ai/sdk
|
|
4
|
+
|
|
5
|
+
import type { ChatMessage, ChatResponse, AiOptions } from '../types'
|
|
6
|
+
import type { AiAdapter } from './interface'
|
|
7
|
+
|
|
8
|
+
interface AnthropicStreamEvent {
|
|
9
|
+
type: string
|
|
10
|
+
delta?: { type: string; text?: string }
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
interface AnthropicClient {
|
|
14
|
+
messages: {
|
|
15
|
+
create(params: {
|
|
16
|
+
model: string
|
|
17
|
+
max_tokens: number
|
|
18
|
+
system?: string
|
|
19
|
+
messages: Array<{ role: 'user' | 'assistant'; content: string }>
|
|
20
|
+
temperature?: number
|
|
21
|
+
stream?: false
|
|
22
|
+
}): Promise<{
|
|
23
|
+
content: Array<{ type: string; text?: string }>
|
|
24
|
+
stop_reason: string | null
|
|
25
|
+
usage: { input_tokens: number; output_tokens: number }
|
|
26
|
+
}>
|
|
27
|
+
create(params: {
|
|
28
|
+
model: string
|
|
29
|
+
max_tokens: number
|
|
30
|
+
system?: string
|
|
31
|
+
messages: Array<{ role: 'user' | 'assistant'; content: string }>
|
|
32
|
+
temperature?: number
|
|
33
|
+
stream: true
|
|
34
|
+
}): Promise<AsyncIterable<AnthropicStreamEvent>>
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
export interface AnthropicAdapterOptions {
|
|
39
|
+
/** Pre-configured Anthropic client instance */
|
|
40
|
+
client: AnthropicClient
|
|
41
|
+
/** Default model (default: claude-sonnet-4-20250514) */
|
|
42
|
+
defaultModel?: string
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
export function createAnthropicAdapter(options: AnthropicAdapterOptions): AiAdapter {
|
|
46
|
+
const {
|
|
47
|
+
client,
|
|
48
|
+
defaultModel = 'claude-sonnet-4-20250514',
|
|
49
|
+
} = options
|
|
50
|
+
|
|
51
|
+
return {
|
|
52
|
+
async chat(messages: ChatMessage[], opts?: AiOptions): Promise<ChatResponse> {
|
|
53
|
+
// Anthropic handles system messages separately
|
|
54
|
+
const systemMsg = messages.find((m) => m.role === 'system')
|
|
55
|
+
const nonSystemMsgs = messages
|
|
56
|
+
.filter((m) => m.role !== 'system')
|
|
57
|
+
.map((m) => ({
|
|
58
|
+
role: m.role as 'user' | 'assistant',
|
|
59
|
+
content: m.content,
|
|
60
|
+
}))
|
|
61
|
+
|
|
62
|
+
const response = await client.messages.create({
|
|
63
|
+
model: opts?.model || defaultModel,
|
|
64
|
+
max_tokens: opts?.maxTokens || 4096,
|
|
65
|
+
system: systemMsg?.content,
|
|
66
|
+
messages: nonSystemMsgs,
|
|
67
|
+
temperature: opts?.temperature,
|
|
68
|
+
})
|
|
69
|
+
|
|
70
|
+
const textBlock = response.content.find((c) => c.type === 'text')
|
|
71
|
+
return {
|
|
72
|
+
content: textBlock?.text || '',
|
|
73
|
+
finishReason: (response.stop_reason === 'end_turn' ? 'stop'
|
|
74
|
+
: response.stop_reason === 'max_tokens' ? 'length'
|
|
75
|
+
: 'error') as ChatResponse['finishReason'],
|
|
76
|
+
usage: {
|
|
77
|
+
promptTokens: response.usage.input_tokens,
|
|
78
|
+
completionTokens: response.usage.output_tokens,
|
|
79
|
+
totalTokens: response.usage.input_tokens + response.usage.output_tokens,
|
|
80
|
+
},
|
|
81
|
+
}
|
|
82
|
+
},
|
|
83
|
+
|
|
84
|
+
async complete(prompt: string, opts?: AiOptions): Promise<string> {
|
|
85
|
+
const response = await this.chat(
|
|
86
|
+
[{ role: 'user', content: prompt }],
|
|
87
|
+
opts,
|
|
88
|
+
)
|
|
89
|
+
return response.content
|
|
90
|
+
},
|
|
91
|
+
|
|
92
|
+
async embed(_text: string | string[]): Promise<number[][]> {
|
|
93
|
+
// Anthropic doesn't offer an embeddings API
|
|
94
|
+
// Users should use OpenAI or a dedicated embedding service
|
|
95
|
+
throw new Error(
|
|
96
|
+
'Anthropic does not provide an embeddings API. ' +
|
|
97
|
+
'Use createOpenAiAdapter for embeddings, or a dedicated embedding service.'
|
|
98
|
+
)
|
|
99
|
+
},
|
|
100
|
+
|
|
101
|
+
async *stream(messages: ChatMessage[], opts?: AiOptions): AsyncIterable<string> {
|
|
102
|
+
const systemMsg = messages.find((m) => m.role === 'system')
|
|
103
|
+
const nonSystemMsgs = messages
|
|
104
|
+
.filter((m) => m.role !== 'system')
|
|
105
|
+
.map((m) => ({ role: m.role as 'user' | 'assistant', content: m.content }))
|
|
106
|
+
|
|
107
|
+
const iterable = await client.messages.create({
|
|
108
|
+
model: opts?.model || defaultModel,
|
|
109
|
+
max_tokens: opts?.maxTokens || 4096,
|
|
110
|
+
system: systemMsg?.content,
|
|
111
|
+
messages: nonSystemMsgs,
|
|
112
|
+
temperature: opts?.temperature,
|
|
113
|
+
stream: true,
|
|
114
|
+
})
|
|
115
|
+
|
|
116
|
+
for await (const event of iterable) {
|
|
117
|
+
if (event.type === 'content_block_delta' && event.delta?.type === 'text_delta' && event.delta.text) {
|
|
118
|
+
yield event.delta.text
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
},
|
|
122
|
+
}
|
|
123
|
+
}
|
|
@@ -0,0 +1,130 @@
|
|
|
1
|
+
// @geenius/adapters — Cloudflare AI Gateway adapter
|
|
2
|
+
// Proxies requests through Cloudflare AI Gateway for caching, rate limiting,
|
|
3
|
+
// analytics, and fallback routing — while using any underlying AI model.
|
|
4
|
+
// Uses the OpenAI-compatible gateway endpoint.
|
|
5
|
+
|
|
6
|
+
import type { ChatMessage, ChatResponse, AiOptions } from '../types'
|
|
7
|
+
import type { AiAdapter } from './interface'
|
|
8
|
+
|
|
9
|
+
export interface CloudflareAiGatewayOptions {
|
|
10
|
+
/** Cloudflare account ID */
|
|
11
|
+
accountId: string
|
|
12
|
+
/** AI Gateway name/ID */
|
|
13
|
+
gatewayId: string
|
|
14
|
+
/** The upstream provider to route through (e.g. 'openai', 'anthropic', 'google-ai-studio', 'workers-ai') */
|
|
15
|
+
provider: 'openai' | 'anthropic' | 'google-ai-studio' | 'workers-ai' | 'azure-openai' | 'groq' | 'cohere' | string
|
|
16
|
+
/** API key for the upstream provider */
|
|
17
|
+
apiKey: string
|
|
18
|
+
/** Default model */
|
|
19
|
+
defaultModel?: string
|
|
20
|
+
/** Embedding model */
|
|
21
|
+
embeddingModel?: string
|
|
22
|
+
/** Custom gateway base URL (default: Cloudflare's) */
|
|
23
|
+
baseUrl?: string
|
|
24
|
+
/** Enable gateway caching (default: false) */
|
|
25
|
+
cacheTtl?: number
|
|
26
|
+
/** Retry on failure (default: false) */
|
|
27
|
+
retries?: number
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
export function createCloudflareAiGatewayAdapter(options: CloudflareAiGatewayOptions): AiAdapter {
|
|
31
|
+
const {
|
|
32
|
+
accountId,
|
|
33
|
+
gatewayId,
|
|
34
|
+
provider,
|
|
35
|
+
apiKey,
|
|
36
|
+
defaultModel = 'gpt-4o-mini',
|
|
37
|
+
embeddingModel = 'text-embedding-3-small',
|
|
38
|
+
baseUrl,
|
|
39
|
+
cacheTtl,
|
|
40
|
+
retries,
|
|
41
|
+
} = options
|
|
42
|
+
|
|
43
|
+
const gatewayUrl = baseUrl || `https://gateway.ai.cloudflare.com/v1/${accountId}/${gatewayId}/${provider}`
|
|
44
|
+
|
|
45
|
+
function getHeaders(): Record<string, string> {
|
|
46
|
+
const headers: Record<string, string> = {
|
|
47
|
+
'Content-Type': 'application/json',
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
// Set auth header based on provider
|
|
51
|
+
if (provider === 'anthropic') {
|
|
52
|
+
headers['x-api-key'] = apiKey
|
|
53
|
+
headers['anthropic-version'] = '2023-06-01'
|
|
54
|
+
} else {
|
|
55
|
+
headers['Authorization'] = `Bearer ${apiKey}`
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
// Cloudflare gateway-specific headers
|
|
59
|
+
if (cacheTtl) headers['cf-aig-cache-ttl'] = String(cacheTtl)
|
|
60
|
+
if (retries) headers['cf-aig-skip-cache'] = 'false'
|
|
61
|
+
|
|
62
|
+
return headers
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
return {
|
|
66
|
+
async chat(messages: ChatMessage[], opts?: AiOptions): Promise<ChatResponse> {
|
|
67
|
+
const model = opts?.model || defaultModel
|
|
68
|
+
|
|
69
|
+
// Use OpenAI-compatible format (supported by gateway for most providers)
|
|
70
|
+
const body = {
|
|
71
|
+
model,
|
|
72
|
+
messages: messages.map((m) => ({ role: m.role, content: m.content })),
|
|
73
|
+
temperature: opts?.temperature,
|
|
74
|
+
max_tokens: opts?.maxTokens,
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
const response = await fetch(`${gatewayUrl}/chat/completions`, {
|
|
78
|
+
method: 'POST',
|
|
79
|
+
headers: getHeaders(),
|
|
80
|
+
body: JSON.stringify(body),
|
|
81
|
+
})
|
|
82
|
+
|
|
83
|
+
if (!response.ok) {
|
|
84
|
+
const error = await response.text()
|
|
85
|
+
throw new Error(`Cloudflare AI Gateway error: ${response.status} — ${error}`)
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
const data = await response.json()
|
|
89
|
+
const choice = data.choices?.[0]
|
|
90
|
+
|
|
91
|
+
return {
|
|
92
|
+
content: choice?.message?.content || '',
|
|
93
|
+
finishReason: (choice?.finish_reason === 'stop' ? 'stop'
|
|
94
|
+
: choice?.finish_reason === 'length' ? 'length'
|
|
95
|
+
: 'error') as ChatResponse['finishReason'],
|
|
96
|
+
usage: data.usage ? {
|
|
97
|
+
promptTokens: data.usage.prompt_tokens || 0,
|
|
98
|
+
completionTokens: data.usage.completion_tokens || 0,
|
|
99
|
+
totalTokens: data.usage.total_tokens || 0,
|
|
100
|
+
} : undefined,
|
|
101
|
+
}
|
|
102
|
+
},
|
|
103
|
+
|
|
104
|
+
async complete(prompt: string, opts?: AiOptions): Promise<string> {
|
|
105
|
+
const response = await this.chat(
|
|
106
|
+
[{ role: 'user', content: prompt }],
|
|
107
|
+
opts,
|
|
108
|
+
)
|
|
109
|
+
return response.content
|
|
110
|
+
},
|
|
111
|
+
|
|
112
|
+
async embed(text: string | string[]): Promise<number[][]> {
|
|
113
|
+
const response = await fetch(`${gatewayUrl}/embeddings`, {
|
|
114
|
+
method: 'POST',
|
|
115
|
+
headers: getHeaders(),
|
|
116
|
+
body: JSON.stringify({
|
|
117
|
+
model: embeddingModel,
|
|
118
|
+
input: text,
|
|
119
|
+
}),
|
|
120
|
+
})
|
|
121
|
+
|
|
122
|
+
if (!response.ok) {
|
|
123
|
+
throw new Error(`Cloudflare AI Gateway embedding error: ${response.status}`)
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
const data = await response.json()
|
|
127
|
+
return data.data?.map((d: any) => d.embedding) || []
|
|
128
|
+
},
|
|
129
|
+
}
|
|
130
|
+
}
|
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
// @geenius/adapters — Google Gemini native AI adapter
|
|
2
|
+
// Uses the Gemini REST API directly (no SDK dependency required).
|
|
3
|
+
// Supports chat, completion, and embeddings.
|
|
4
|
+
|
|
5
|
+
import type { ChatMessage, ChatResponse, AiOptions } from '../types'
|
|
6
|
+
import type { AiAdapter } from './interface'
|
|
7
|
+
|
|
8
|
+
export interface GeminiAdapterOptions {
|
|
9
|
+
/** Google AI API key */
|
|
10
|
+
apiKey: string
|
|
11
|
+
/** Default model (default: gemini-2.0-flash) */
|
|
12
|
+
defaultModel?: string
|
|
13
|
+
/** Embedding model (default: text-embedding-004) */
|
|
14
|
+
embeddingModel?: string
|
|
15
|
+
/** Base URL (default: https://generativelanguage.googleapis.com/v1beta) */
|
|
16
|
+
baseUrl?: string
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
export function createGeminiAdapter(options: GeminiAdapterOptions): AiAdapter {
|
|
20
|
+
const {
|
|
21
|
+
apiKey,
|
|
22
|
+
defaultModel = 'gemini-2.0-flash',
|
|
23
|
+
embeddingModel = 'text-embedding-004',
|
|
24
|
+
baseUrl = 'https://generativelanguage.googleapis.com/v1beta',
|
|
25
|
+
} = options
|
|
26
|
+
|
|
27
|
+
function mapRole(role: string): string {
|
|
28
|
+
if (role === 'assistant') return 'model'
|
|
29
|
+
if (role === 'system') return 'user' // Gemini handles system via systemInstruction
|
|
30
|
+
return 'user'
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
return {
|
|
34
|
+
async chat(messages: ChatMessage[], opts?: AiOptions): Promise<ChatResponse> {
|
|
35
|
+
const model = opts?.model || defaultModel
|
|
36
|
+
|
|
37
|
+
// Extract system message for Gemini's systemInstruction
|
|
38
|
+
const systemMsg = messages.find((m) => m.role === 'system')
|
|
39
|
+
const nonSystemMsgs = messages.filter((m) => m.role !== 'system')
|
|
40
|
+
|
|
41
|
+
const body: any = {
|
|
42
|
+
contents: nonSystemMsgs.map((m) => ({
|
|
43
|
+
role: mapRole(m.role),
|
|
44
|
+
parts: [{ text: m.content }],
|
|
45
|
+
})),
|
|
46
|
+
generationConfig: {
|
|
47
|
+
temperature: opts?.temperature,
|
|
48
|
+
maxOutputTokens: opts?.maxTokens,
|
|
49
|
+
},
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
if (systemMsg) {
|
|
53
|
+
body.systemInstruction = { parts: [{ text: systemMsg.content }] }
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
const response = await fetch(
|
|
57
|
+
`${baseUrl}/models/${model}:generateContent?key=${apiKey}`,
|
|
58
|
+
{
|
|
59
|
+
method: 'POST',
|
|
60
|
+
headers: { 'Content-Type': 'application/json' },
|
|
61
|
+
body: JSON.stringify(body),
|
|
62
|
+
},
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
if (!response.ok) {
|
|
66
|
+
const error = await response.text()
|
|
67
|
+
throw new Error(`Gemini API error: ${response.status} — ${error}`)
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
const data = await response.json()
|
|
71
|
+
const candidate = data.candidates?.[0]
|
|
72
|
+
const text = candidate?.content?.parts?.[0]?.text || ''
|
|
73
|
+
const finishReason = candidate?.finishReason === 'STOP' ? 'stop'
|
|
74
|
+
: candidate?.finishReason === 'MAX_TOKENS' ? 'length'
|
|
75
|
+
: 'error'
|
|
76
|
+
|
|
77
|
+
return {
|
|
78
|
+
content: text,
|
|
79
|
+
finishReason: finishReason as ChatResponse['finishReason'],
|
|
80
|
+
usage: data.usageMetadata ? {
|
|
81
|
+
promptTokens: data.usageMetadata.promptTokenCount || 0,
|
|
82
|
+
completionTokens: data.usageMetadata.candidatesTokenCount || 0,
|
|
83
|
+
totalTokens: data.usageMetadata.totalTokenCount || 0,
|
|
84
|
+
} : undefined,
|
|
85
|
+
}
|
|
86
|
+
},
|
|
87
|
+
|
|
88
|
+
async complete(prompt: string, opts?: AiOptions): Promise<string> {
|
|
89
|
+
const response = await this.chat(
|
|
90
|
+
[{ role: 'user', content: prompt }],
|
|
91
|
+
opts,
|
|
92
|
+
)
|
|
93
|
+
return response.content
|
|
94
|
+
},
|
|
95
|
+
|
|
96
|
+
async embed(text: string | string[]): Promise<number[][]> {
|
|
97
|
+
const inputs = Array.isArray(text) ? text : [text]
|
|
98
|
+
const results: number[][] = []
|
|
99
|
+
|
|
100
|
+
for (const input of inputs) {
|
|
101
|
+
const response = await fetch(
|
|
102
|
+
`${baseUrl}/models/${embeddingModel}:embedContent?key=${apiKey}`,
|
|
103
|
+
{
|
|
104
|
+
method: 'POST',
|
|
105
|
+
headers: { 'Content-Type': 'application/json' },
|
|
106
|
+
body: JSON.stringify({
|
|
107
|
+
model: `models/${embeddingModel}`,
|
|
108
|
+
content: { parts: [{ text: input }] },
|
|
109
|
+
}),
|
|
110
|
+
},
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
if (!response.ok) {
|
|
114
|
+
throw new Error(`Gemini embedding error: ${response.status}`)
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
const data = await response.json()
|
|
118
|
+
results.push(data.embedding?.values || [])
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
return results
|
|
122
|
+
},
|
|
123
|
+
|
|
124
|
+
async *stream(messages: ChatMessage[], opts?: AiOptions): AsyncIterable<string> {
|
|
125
|
+
const model = opts?.model || defaultModel
|
|
126
|
+
const systemMsg = messages.find((m) => m.role === 'system')
|
|
127
|
+
const nonSystemMsgs = messages.filter((m) => m.role !== 'system')
|
|
128
|
+
|
|
129
|
+
const body: any = {
|
|
130
|
+
contents: nonSystemMsgs.map((m) => ({
|
|
131
|
+
role: mapRole(m.role),
|
|
132
|
+
parts: [{ text: m.content }],
|
|
133
|
+
})),
|
|
134
|
+
generationConfig: {
|
|
135
|
+
temperature: opts?.temperature,
|
|
136
|
+
maxOutputTokens: opts?.maxTokens,
|
|
137
|
+
},
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
if (systemMsg) {
|
|
141
|
+
body.systemInstruction = { parts: [{ text: systemMsg.content }] }
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
const response = await fetch(
|
|
145
|
+
`${baseUrl}/models/${model}:streamGenerateContent?key=${apiKey}&alt=sse`,
|
|
146
|
+
{
|
|
147
|
+
method: 'POST',
|
|
148
|
+
headers: { 'Content-Type': 'application/json' },
|
|
149
|
+
body: JSON.stringify(body),
|
|
150
|
+
},
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
if (!response.ok || !response.body) {
|
|
154
|
+
const error = await response.text()
|
|
155
|
+
throw new Error(`Gemini stream error: ${response.status} — ${error}`)
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
const reader = response.body.getReader()
|
|
159
|
+
const decoder = new TextDecoder()
|
|
160
|
+
let buffer = ''
|
|
161
|
+
|
|
162
|
+
while (true) {
|
|
163
|
+
const { done, value } = await reader.read()
|
|
164
|
+
if (done) break
|
|
165
|
+
buffer += decoder.decode(value, { stream: true })
|
|
166
|
+
const lines = buffer.split('\n')
|
|
167
|
+
buffer = lines.pop() ?? ''
|
|
168
|
+
for (const line of lines) {
|
|
169
|
+
if (!line.startsWith('data: ')) continue
|
|
170
|
+
const json = line.slice(6).trim()
|
|
171
|
+
if (!json || json === '[DONE]') continue
|
|
172
|
+
try {
|
|
173
|
+
const chunk = JSON.parse(json)
|
|
174
|
+
const text = chunk.candidates?.[0]?.content?.parts?.[0]?.text
|
|
175
|
+
if (text) yield text
|
|
176
|
+
} catch { /* skip malformed chunks */ }
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
},
|
|
180
|
+
}
|
|
181
|
+
}
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
export type { AiAdapter } from './interface'
|
|
2
|
+
export { createLocalStorageAiAdapter } from './localStorage'
|
|
3
|
+
export { createOpenAiAdapter } from './openai'
|
|
4
|
+
export type { OpenAiAdapterOptions } from './openai'
|
|
5
|
+
export { createAnthropicAdapter } from './anthropic'
|
|
6
|
+
export type { AnthropicAdapterOptions } from './anthropic'
|
|
7
|
+
export { createGeminiAdapter } from './gemini'
|
|
8
|
+
export type { GeminiAdapterOptions } from './gemini'
|
|
9
|
+
export { createOllamaAdapter } from './ollama'
|
|
10
|
+
export type { OllamaAdapterOptions } from './ollama'
|
|
11
|
+
export { createCloudflareAiGatewayAdapter } from './cloudflare-gateway'
|
|
12
|
+
export type { CloudflareAiGatewayOptions } from './cloudflare-gateway'
|
|
13
|
+
export { createVercelAiAdapter } from './vercel-ai'
|
|
14
|
+
export type { VercelAiAdapterOptions } from './vercel-ai'
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
// @geenius/adapters — AI adapter interface
|
|
2
|
+
|
|
3
|
+
import type { ChatMessage, ChatResponse, AiOptions } from '../types'
|
|
4
|
+
|
|
5
|
+
export interface AiAdapter {
|
|
6
|
+
chat(messages: ChatMessage[], options?: AiOptions): Promise<ChatResponse>
|
|
7
|
+
complete(prompt: string, options?: AiOptions): Promise<string>
|
|
8
|
+
embed(text: string | string[]): Promise<number[][]>
|
|
9
|
+
/** Stream chat tokens as an async iterable of text deltas. Optional — falls back to chat() if not implemented. */
|
|
10
|
+
stream?(messages: ChatMessage[], options?: AiOptions): AsyncIterable<string>
|
|
11
|
+
}
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
// @geenius/adapters — localStorage AI implementation (canned responses)
|
|
2
|
+
|
|
3
|
+
import type { ChatMessage, ChatResponse, AiOptions } from '../types'
|
|
4
|
+
import type { AiAdapter } from './interface'
|
|
5
|
+
|
|
6
|
+
const HISTORY_KEY = 'geenius_ai_history'
|
|
7
|
+
|
|
8
|
+
const CANNED_RESPONSES = [
|
|
9
|
+
"That's an interesting question! In the Pronto tier, AI responses are simulated. Upgrade to MVP for real AI.",
|
|
10
|
+
"I'm a mock AI assistant. I can demonstrate the chat interface, but for real responses you'll need the MVP tier.",
|
|
11
|
+
"Great question! This is a localStorage-based AI mock. The real AI adapter connects to OpenAI/Anthropic APIs.",
|
|
12
|
+
"I appreciate your input! This demo shows how the AI interface works. Upgrade for actual AI capabilities.",
|
|
13
|
+
"Interesting! While I can't provide real AI analysis in Pronto, this shows the adapter pattern in action.",
|
|
14
|
+
]
|
|
15
|
+
|
|
16
|
+
function getCannedResponse(messages: ChatMessage[]): string {
|
|
17
|
+
const lastMsg = messages[messages.length - 1]?.content || ''
|
|
18
|
+
// Deterministic but varied: hash the message to pick a response
|
|
19
|
+
let hash = 0
|
|
20
|
+
for (let i = 0; i < lastMsg.length; i++) { hash = ((hash << 5) - hash) + lastMsg.charCodeAt(i); hash |= 0 }
|
|
21
|
+
return CANNED_RESPONSES[Math.abs(hash) % CANNED_RESPONSES.length]
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
function saveToHistory(messages: ChatMessage[], response: string) {
|
|
25
|
+
try {
|
|
26
|
+
const history = JSON.parse(localStorage.getItem(HISTORY_KEY) || '[]')
|
|
27
|
+
history.push({ timestamp: new Date().toISOString(), messages, response })
|
|
28
|
+
// Keep last 50 conversations
|
|
29
|
+
if (history.length > 50) history.splice(0, history.length - 50)
|
|
30
|
+
localStorage.setItem(HISTORY_KEY, JSON.stringify(history))
|
|
31
|
+
} catch { /* ignore storage errors */ }
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
export function createLocalStorageAiAdapter(): AiAdapter {
|
|
35
|
+
return {
|
|
36
|
+
async chat(messages: ChatMessage[], _options?: AiOptions): Promise<ChatResponse> {
|
|
37
|
+
// Simulate network delay
|
|
38
|
+
await new Promise(r => setTimeout(r, 300 + Math.random() * 700))
|
|
39
|
+
const content = getCannedResponse(messages)
|
|
40
|
+
saveToHistory(messages, content)
|
|
41
|
+
return {
|
|
42
|
+
content,
|
|
43
|
+
finishReason: 'stop',
|
|
44
|
+
usage: { promptTokens: 10, completionTokens: 20, totalTokens: 30 },
|
|
45
|
+
}
|
|
46
|
+
},
|
|
47
|
+
|
|
48
|
+
async complete(prompt: string, options?: AiOptions): Promise<string> {
|
|
49
|
+
const response = await this.chat([{ role: 'user', content: prompt }], options)
|
|
50
|
+
return response.content
|
|
51
|
+
},
|
|
52
|
+
|
|
53
|
+
async embed(text: string | string[]): Promise<number[][]> {
|
|
54
|
+
const inputs = Array.isArray(text) ? text : [text]
|
|
55
|
+
// Return mock 8-dimensional embeddings (deterministic from text)
|
|
56
|
+
return inputs.map(t => {
|
|
57
|
+
const vec: number[] = []
|
|
58
|
+
for (let i = 0; i < 8; i++) {
|
|
59
|
+
let h = i * 31
|
|
60
|
+
for (let j = 0; j < t.length; j++) { h = ((h << 5) - h) + t.charCodeAt(j); h |= 0 }
|
|
61
|
+
vec.push(Math.sin(h) * 0.5 + 0.5)
|
|
62
|
+
}
|
|
63
|
+
return vec
|
|
64
|
+
})
|
|
65
|
+
},
|
|
66
|
+
|
|
67
|
+
async *stream(messages: ChatMessage[], _options?: AiOptions): AsyncIterable<string> {
|
|
68
|
+
// Simulate streaming by yielding the canned response word-by-word
|
|
69
|
+
const content = getCannedResponse(messages)
|
|
70
|
+
saveToHistory(messages, content)
|
|
71
|
+
const words = content.split(' ')
|
|
72
|
+
for (const word of words) {
|
|
73
|
+
await new Promise(r => setTimeout(r, 40 + Math.random() * 60))
|
|
74
|
+
yield word + ' '
|
|
75
|
+
}
|
|
76
|
+
},
|
|
77
|
+
}
|
|
78
|
+
}
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
// @geenius/adapters — Ollama AI adapter (local LLMs)
|
|
2
|
+
// Uses Ollama's REST API (no SDK dependency required).
|
|
3
|
+
// Supports chat, completion, and embeddings — all running locally.
|
|
4
|
+
|
|
5
|
+
import type { ChatMessage, ChatResponse, AiOptions } from '../types'
|
|
6
|
+
import type { AiAdapter } from './interface'
|
|
7
|
+
|
|
8
|
+
export interface OllamaAdapterOptions {
|
|
9
|
+
/** Ollama server URL (default: http://localhost:11434) */
|
|
10
|
+
host?: string
|
|
11
|
+
/** Default model (default: llama3.2) */
|
|
12
|
+
defaultModel?: string
|
|
13
|
+
/** Embedding model (default: nomic-embed-text) */
|
|
14
|
+
embeddingModel?: string
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
export function createOllamaAdapter(options: OllamaAdapterOptions = {}): AiAdapter {
|
|
18
|
+
const {
|
|
19
|
+
host = 'http://localhost:11434',
|
|
20
|
+
defaultModel = 'llama3.2',
|
|
21
|
+
embeddingModel = 'nomic-embed-text',
|
|
22
|
+
} = options
|
|
23
|
+
|
|
24
|
+
return {
|
|
25
|
+
async chat(messages: ChatMessage[], opts?: AiOptions): Promise<ChatResponse> {
|
|
26
|
+
const response = await fetch(`${host}/api/chat`, {
|
|
27
|
+
method: 'POST',
|
|
28
|
+
headers: { 'Content-Type': 'application/json' },
|
|
29
|
+
body: JSON.stringify({
|
|
30
|
+
model: opts?.model || defaultModel,
|
|
31
|
+
messages: messages.map((m) => ({ role: m.role, content: m.content })),
|
|
32
|
+
stream: false,
|
|
33
|
+
options: {
|
|
34
|
+
temperature: opts?.temperature,
|
|
35
|
+
num_predict: opts?.maxTokens,
|
|
36
|
+
},
|
|
37
|
+
}),
|
|
38
|
+
})
|
|
39
|
+
|
|
40
|
+
if (!response.ok) {
|
|
41
|
+
throw new Error(`Ollama API error: ${response.status} — ${response.statusText}`)
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
const data = await response.json()
|
|
45
|
+
|
|
46
|
+
return {
|
|
47
|
+
content: data.message?.content || '',
|
|
48
|
+
finishReason: data.done ? 'stop' : 'length',
|
|
49
|
+
usage: {
|
|
50
|
+
promptTokens: data.prompt_eval_count || 0,
|
|
51
|
+
completionTokens: data.eval_count || 0,
|
|
52
|
+
totalTokens: (data.prompt_eval_count || 0) + (data.eval_count || 0),
|
|
53
|
+
},
|
|
54
|
+
}
|
|
55
|
+
},
|
|
56
|
+
|
|
57
|
+
async complete(prompt: string, opts?: AiOptions): Promise<string> {
|
|
58
|
+
// Use the generate endpoint for simple completions
|
|
59
|
+
const response = await fetch(`${host}/api/generate`, {
|
|
60
|
+
method: 'POST',
|
|
61
|
+
headers: { 'Content-Type': 'application/json' },
|
|
62
|
+
body: JSON.stringify({
|
|
63
|
+
model: opts?.model || defaultModel,
|
|
64
|
+
prompt,
|
|
65
|
+
stream: false,
|
|
66
|
+
options: {
|
|
67
|
+
temperature: opts?.temperature,
|
|
68
|
+
num_predict: opts?.maxTokens,
|
|
69
|
+
},
|
|
70
|
+
}),
|
|
71
|
+
})
|
|
72
|
+
|
|
73
|
+
if (!response.ok) {
|
|
74
|
+
throw new Error(`Ollama API error: ${response.status}`)
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
const data = await response.json()
|
|
78
|
+
return data.response || ''
|
|
79
|
+
},
|
|
80
|
+
|
|
81
|
+
async embed(text: string | string[]): Promise<number[][]> {
|
|
82
|
+
const inputs = Array.isArray(text) ? text : [text]
|
|
83
|
+
const results: number[][] = []
|
|
84
|
+
|
|
85
|
+
for (const input of inputs) {
|
|
86
|
+
const response = await fetch(`${host}/api/embed`, {
|
|
87
|
+
method: 'POST',
|
|
88
|
+
headers: { 'Content-Type': 'application/json' },
|
|
89
|
+
body: JSON.stringify({
|
|
90
|
+
model: embeddingModel,
|
|
91
|
+
input,
|
|
92
|
+
}),
|
|
93
|
+
})
|
|
94
|
+
|
|
95
|
+
if (!response.ok) {
|
|
96
|
+
throw new Error(`Ollama embedding error: ${response.status}`)
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
const data = await response.json()
|
|
100
|
+
results.push(data.embeddings?.[0] || [])
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
return results
|
|
104
|
+
},
|
|
105
|
+
|
|
106
|
+
async *stream(messages: ChatMessage[], opts?: AiOptions): AsyncIterable<string> {
|
|
107
|
+
const response = await fetch(`${host}/api/chat`, {
|
|
108
|
+
method: 'POST',
|
|
109
|
+
headers: { 'Content-Type': 'application/json' },
|
|
110
|
+
body: JSON.stringify({
|
|
111
|
+
model: opts?.model || defaultModel,
|
|
112
|
+
messages: messages.map((m) => ({ role: m.role, content: m.content })),
|
|
113
|
+
stream: true,
|
|
114
|
+
options: {
|
|
115
|
+
temperature: opts?.temperature,
|
|
116
|
+
num_predict: opts?.maxTokens,
|
|
117
|
+
},
|
|
118
|
+
}),
|
|
119
|
+
})
|
|
120
|
+
|
|
121
|
+
if (!response.ok || !response.body) {
|
|
122
|
+
throw new Error(`Ollama stream error: ${response.status} — ${response.statusText}`)
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
const reader = response.body.getReader()
|
|
126
|
+
const decoder = new TextDecoder()
|
|
127
|
+
|
|
128
|
+
while (true) {
|
|
129
|
+
const { done, value } = await reader.read()
|
|
130
|
+
if (done) break
|
|
131
|
+
const lines = decoder.decode(value, { stream: true }).split('\n').filter(Boolean)
|
|
132
|
+
for (const line of lines) {
|
|
133
|
+
try {
|
|
134
|
+
const chunk = JSON.parse(line)
|
|
135
|
+
const delta = chunk.message?.content
|
|
136
|
+
if (delta) yield delta
|
|
137
|
+
if (chunk.done) return
|
|
138
|
+
} catch { /* skip malformed lines */ }
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
},
|
|
142
|
+
}
|
|
143
|
+
}
|