ailib-router 0.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +60 -0
- package/dist/client.d.ts +9 -0
- package/dist/client.js +29 -0
- package/dist/config.d.ts +2 -0
- package/dist/config.js +85 -0
- package/dist/errors.d.ts +33 -0
- package/dist/errors.js +111 -0
- package/dist/http.d.ts +9 -0
- package/dist/http.js +91 -0
- package/dist/index.d.ts +4 -0
- package/dist/index.js +4 -0
- package/dist/providers/claude.d.ts +13 -0
- package/dist/providers/claude.js +56 -0
- package/dist/providers/gemini.d.ts +13 -0
- package/dist/providers/gemini.js +52 -0
- package/dist/providers/ollama.d.ts +13 -0
- package/dist/providers/ollama.js +56 -0
- package/dist/providers/openai.d.ts +13 -0
- package/dist/providers/openai.js +54 -0
- package/dist/router.d.ts +10 -0
- package/dist/router.js +145 -0
- package/dist/types.d.ts +41 -0
- package/dist/types.js +1 -0
- package/package.json +44 -0
package/README.md
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
# AI Multi Provider Library
|
|
2
|
+
|
|
3
|
+
TypeScript Node.js library to send one message and receive one response using multiple AI providers.
|
|
4
|
+
|
|
5
|
+
## Supported providers (v1)
|
|
6
|
+
|
|
7
|
+
- OpenAI (ChatGPT)
|
|
8
|
+
- Google Gemini
|
|
9
|
+
- Anthropic Claude
|
|
10
|
+
- Ollama Cloud
|
|
11
|
+
- Ollama Local
|
|
12
|
+
|
|
13
|
+
## Features
|
|
14
|
+
|
|
15
|
+
- AUTO mode with configurable provider order and fallback
|
|
16
|
+
- MANUAL mode to choose provider and model
|
|
17
|
+
- In MANUAL mode, if selected model is rate-limited, it retries with other models in the same provider
|
|
18
|
+
- In AUTO mode, if provider/model reaches rate limit, it moves to next available provider/model
|
|
19
|
+
- Normalized error handling across all providers
|
|
20
|
+
|
|
21
|
+
## Install
|
|
22
|
+
|
|
23
|
+
npm install
|
|
24
|
+
|
|
25
|
+
## Build
|
|
26
|
+
|
|
27
|
+
npm run build
|
|
28
|
+
|
|
29
|
+
## Test
|
|
30
|
+
|
|
31
|
+
npm test
|
|
32
|
+
|
|
33
|
+
## Quick use
|
|
34
|
+
|
|
35
|
+
import { createAIClient } from "ailib-router";
|
|
36
|
+
|
|
37
|
+
const client = createAIClient();
|
|
38
|
+
const result = await client.sendMessage({
|
|
39
|
+
mode: "auto",
|
|
40
|
+
message: "Explain clean architecture in 5 lines"
|
|
41
|
+
});
|
|
42
|
+
|
|
43
|
+
console.log(result.provider, result.model, result.content);
|
|
44
|
+
|
|
45
|
+
## Manual mode
|
|
46
|
+
|
|
47
|
+
const result = await client.sendMessage({
|
|
48
|
+
mode: "manual",
|
|
49
|
+
provider: "gemini",
|
|
50
|
+
model: "gemini-2.0-flash",
|
|
51
|
+
message: "Write a short poem"
|
|
52
|
+
});
|
|
53
|
+
|
|
54
|
+
## NestJS example
|
|
55
|
+
|
|
56
|
+
Use AIClient inside a service provider and call sendMessage in your service methods.
|
|
57
|
+
|
|
58
|
+
## Environment
|
|
59
|
+
|
|
60
|
+
Copy values from .env.example into your real .env file and set valid keys and model names.
|
package/dist/client.d.ts
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
import type { AIMessageRequest, AIMessageResponse, LibraryConfig, ProviderId } from "./types.js";
|
|
2
|
+
export declare class AIClient {
|
|
3
|
+
private readonly config;
|
|
4
|
+
private readonly router;
|
|
5
|
+
constructor(config?: LibraryConfig);
|
|
6
|
+
sendMessage(request: AIMessageRequest): Promise<AIMessageResponse>;
|
|
7
|
+
providers(): ProviderId[];
|
|
8
|
+
}
|
|
9
|
+
export declare function createAIClient(config?: LibraryConfig): AIClient;
|
package/dist/client.js
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import { AIRouter } from "./router.js";
|
|
2
|
+
import { ClaudeAdapter } from "./providers/claude.js";
|
|
3
|
+
import { GeminiAdapter } from "./providers/gemini.js";
|
|
4
|
+
import { OllamaAdapter } from "./providers/ollama.js";
|
|
5
|
+
import { OpenAIAdapter } from "./providers/openai.js";
|
|
6
|
+
import { loadConfig } from "./config.js";
|
|
7
|
+
export class AIClient {
|
|
8
|
+
config;
|
|
9
|
+
router;
|
|
10
|
+
constructor(config = loadConfig()) {
|
|
11
|
+
this.config = config;
|
|
12
|
+
this.router = new AIRouter(this.config, {
|
|
13
|
+
openai: new OpenAIAdapter(this.config.providers.openai),
|
|
14
|
+
gemini: new GeminiAdapter(this.config.providers.gemini),
|
|
15
|
+
claude: new ClaudeAdapter(this.config.providers.claude),
|
|
16
|
+
ollamaCloud: new OllamaAdapter("ollamaCloud", this.config.providers.ollamaCloud),
|
|
17
|
+
ollamaLocal: new OllamaAdapter("ollamaLocal", this.config.providers.ollamaLocal),
|
|
18
|
+
});
|
|
19
|
+
}
|
|
20
|
+
async sendMessage(request) {
|
|
21
|
+
return this.router.sendMessage(request);
|
|
22
|
+
}
|
|
23
|
+
providers() {
|
|
24
|
+
return this.config.providerOrder;
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
export function createAIClient(config) {
|
|
28
|
+
return new AIClient(config);
|
|
29
|
+
}
|
package/dist/config.d.ts
ADDED
package/dist/config.js
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
import { AIError } from "./errors.js";
|
|
2
|
+
const DEFAULT_PROVIDER_ORDER = [
|
|
3
|
+
"gemini",
|
|
4
|
+
"openai",
|
|
5
|
+
"claude",
|
|
6
|
+
"ollamaCloud",
|
|
7
|
+
"ollamaLocal",
|
|
8
|
+
];
|
|
9
|
+
export function loadConfig(env = process.env) {
|
|
10
|
+
const timeoutMs = Number(env.AI_REQUEST_TIMEOUT_MS ?? "30000");
|
|
11
|
+
if (!Number.isFinite(timeoutMs) || timeoutMs <= 0) {
|
|
12
|
+
throw new AIError({
|
|
13
|
+
message: "AI_REQUEST_TIMEOUT_MS must be a positive number",
|
|
14
|
+
code: "configuration",
|
|
15
|
+
});
|
|
16
|
+
}
|
|
17
|
+
const providerOrder = parseProviderOrder(env.AI_PROVIDER_ORDER);
|
|
18
|
+
const providers = {
|
|
19
|
+
openai: {
|
|
20
|
+
enabled: Boolean(env.OPENAI_API_KEY),
|
|
21
|
+
apiKey: env.OPENAI_API_KEY,
|
|
22
|
+
baseUrl: env.OPENAI_BASE_URL ?? "https://api.openai.com",
|
|
23
|
+
models: parseList(env.OPENAI_MODELS, ["gpt-4.1-mini", "gpt-4o-mini"]),
|
|
24
|
+
},
|
|
25
|
+
gemini: {
|
|
26
|
+
enabled: Boolean(env.GEMINI_API_KEY),
|
|
27
|
+
apiKey: env.GEMINI_API_KEY,
|
|
28
|
+
baseUrl: env.GEMINI_BASE_URL ?? "https://generativelanguage.googleapis.com",
|
|
29
|
+
models: parseList(env.GEMINI_MODELS, [
|
|
30
|
+
"gemini-2.0-flash",
|
|
31
|
+
"gemini-1.5-flash",
|
|
32
|
+
]),
|
|
33
|
+
},
|
|
34
|
+
claude: {
|
|
35
|
+
enabled: Boolean(env.ANTHROPIC_API_KEY),
|
|
36
|
+
apiKey: env.ANTHROPIC_API_KEY,
|
|
37
|
+
baseUrl: env.ANTHROPIC_BASE_URL ?? "https://api.anthropic.com",
|
|
38
|
+
models: parseList(env.ANTHROPIC_MODELS, [
|
|
39
|
+
"claude-3-5-sonnet-latest",
|
|
40
|
+
"claude-3-5-haiku-latest",
|
|
41
|
+
]),
|
|
42
|
+
},
|
|
43
|
+
ollamaCloud: {
|
|
44
|
+
enabled: Boolean(env.OLLAMA_CLOUD_BASE_URL),
|
|
45
|
+
apiKey: env.OLLAMA_CLOUD_API_KEY,
|
|
46
|
+
baseUrl: env.OLLAMA_CLOUD_BASE_URL ?? "",
|
|
47
|
+
models: parseList(env.OLLAMA_CLOUD_MODELS, []),
|
|
48
|
+
},
|
|
49
|
+
ollamaLocal: {
|
|
50
|
+
enabled: Boolean(env.OLLAMA_LOCAL_BASE_URL),
|
|
51
|
+
baseUrl: env.OLLAMA_LOCAL_BASE_URL ?? "http://localhost:11434",
|
|
52
|
+
models: parseList(env.OLLAMA_LOCAL_MODELS, ["llama3.2"]),
|
|
53
|
+
},
|
|
54
|
+
};
|
|
55
|
+
return {
|
|
56
|
+
timeoutMs,
|
|
57
|
+
providerOrder,
|
|
58
|
+
providers,
|
|
59
|
+
};
|
|
60
|
+
}
|
|
61
|
+
function parseProviderOrder(raw) {
|
|
62
|
+
if (!raw) {
|
|
63
|
+
return DEFAULT_PROVIDER_ORDER;
|
|
64
|
+
}
|
|
65
|
+
const mapped = parseList(raw, []).map((value) => value);
|
|
66
|
+
for (const provider of mapped) {
|
|
67
|
+
if (!DEFAULT_PROVIDER_ORDER.includes(provider)) {
|
|
68
|
+
throw new AIError({
|
|
69
|
+
message: `Unknown provider in AI_PROVIDER_ORDER: ${provider}`,
|
|
70
|
+
code: "configuration",
|
|
71
|
+
});
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
return mapped;
|
|
75
|
+
}
|
|
76
|
+
function parseList(raw, fallback) {
|
|
77
|
+
if (!raw) {
|
|
78
|
+
return fallback;
|
|
79
|
+
}
|
|
80
|
+
const values = raw
|
|
81
|
+
.split(",")
|
|
82
|
+
.map((value) => value.trim())
|
|
83
|
+
.filter(Boolean);
|
|
84
|
+
return values.length > 0 ? values : fallback;
|
|
85
|
+
}
|
package/dist/errors.d.ts
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
import type { AttemptRecord, ProviderId } from "./types.js";
|
|
2
|
+
export type AIErrorCode = "rate_limit" | "auth" | "timeout" | "invalid_request" | "provider_unavailable" | "network" | "configuration" | "all_models_exhausted" | "all_providers_exhausted" | "unknown";
|
|
3
|
+
export declare class AIError extends Error {
|
|
4
|
+
readonly code: AIErrorCode;
|
|
5
|
+
readonly provider?: ProviderId;
|
|
6
|
+
readonly model?: string;
|
|
7
|
+
readonly status?: number;
|
|
8
|
+
readonly retryable: boolean;
|
|
9
|
+
constructor(params: {
|
|
10
|
+
message: string;
|
|
11
|
+
code: AIErrorCode;
|
|
12
|
+
provider?: ProviderId;
|
|
13
|
+
model?: string;
|
|
14
|
+
status?: number;
|
|
15
|
+
retryable?: boolean;
|
|
16
|
+
cause?: unknown;
|
|
17
|
+
});
|
|
18
|
+
}
|
|
19
|
+
export declare class AllModelsExhaustedError extends AIError {
|
|
20
|
+
readonly attempts: AttemptRecord[];
|
|
21
|
+
constructor(provider: ProviderId, attempts: AttemptRecord[]);
|
|
22
|
+
}
|
|
23
|
+
export declare class AllProvidersExhaustedError extends AIError {
|
|
24
|
+
readonly attempts: AttemptRecord[];
|
|
25
|
+
constructor(attempts: AttemptRecord[]);
|
|
26
|
+
}
|
|
27
|
+
export declare function toAIError(input: {
|
|
28
|
+
provider: ProviderId;
|
|
29
|
+
model?: string;
|
|
30
|
+
status?: number;
|
|
31
|
+
message: string;
|
|
32
|
+
cause?: unknown;
|
|
33
|
+
}): AIError;
|
package/dist/errors.js
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
export class AIError extends Error {
|
|
2
|
+
code;
|
|
3
|
+
provider;
|
|
4
|
+
model;
|
|
5
|
+
status;
|
|
6
|
+
retryable;
|
|
7
|
+
constructor(params) {
|
|
8
|
+
super(params.message);
|
|
9
|
+
this.name = "AIError";
|
|
10
|
+
this.code = params.code;
|
|
11
|
+
this.provider = params.provider;
|
|
12
|
+
this.model = params.model;
|
|
13
|
+
this.status = params.status;
|
|
14
|
+
this.retryable = params.retryable ?? false;
|
|
15
|
+
if (params.cause !== undefined) {
|
|
16
|
+
this.cause = params.cause;
|
|
17
|
+
}
|
|
18
|
+
}
|
|
19
|
+
}
|
|
20
|
+
export class AllModelsExhaustedError extends AIError {
|
|
21
|
+
attempts;
|
|
22
|
+
constructor(provider, attempts) {
|
|
23
|
+
const details = formatAttempts(attempts);
|
|
24
|
+
super({
|
|
25
|
+
message: details
|
|
26
|
+
? `All models are exhausted for provider ${provider}. Attempts: ${details}`
|
|
27
|
+
: `All models are exhausted for provider ${provider}`,
|
|
28
|
+
code: "all_models_exhausted",
|
|
29
|
+
provider,
|
|
30
|
+
retryable: false,
|
|
31
|
+
});
|
|
32
|
+
this.name = "AllModelsExhaustedError";
|
|
33
|
+
this.attempts = attempts;
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
export class AllProvidersExhaustedError extends AIError {
|
|
37
|
+
attempts;
|
|
38
|
+
constructor(attempts) {
|
|
39
|
+
const details = formatAttempts(attempts);
|
|
40
|
+
super({
|
|
41
|
+
message: details
|
|
42
|
+
? `All providers are exhausted or unavailable. Attempts: ${details}`
|
|
43
|
+
: "All providers are exhausted or unavailable",
|
|
44
|
+
code: "all_providers_exhausted",
|
|
45
|
+
retryable: false,
|
|
46
|
+
});
|
|
47
|
+
this.name = "AllProvidersExhaustedError";
|
|
48
|
+
this.attempts = attempts;
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
function formatAttempts(attempts) {
|
|
52
|
+
return attempts
|
|
53
|
+
.map((attempt) => `${attempt.provider}/${attempt.model ?? "n/a"}:${attempt.code}:${attempt.message}`)
|
|
54
|
+
.join(" | ");
|
|
55
|
+
}
|
|
56
|
+
export function toAIError(input) {
|
|
57
|
+
const { provider, model, status, message, cause } = input;
|
|
58
|
+
if (status === 401 || status === 403) {
|
|
59
|
+
return new AIError({
|
|
60
|
+
message,
|
|
61
|
+
code: "auth",
|
|
62
|
+
provider,
|
|
63
|
+
model,
|
|
64
|
+
status,
|
|
65
|
+
retryable: false,
|
|
66
|
+
cause,
|
|
67
|
+
});
|
|
68
|
+
}
|
|
69
|
+
if (status === 429) {
|
|
70
|
+
return new AIError({
|
|
71
|
+
message,
|
|
72
|
+
code: "rate_limit",
|
|
73
|
+
provider,
|
|
74
|
+
model,
|
|
75
|
+
status,
|
|
76
|
+
retryable: true,
|
|
77
|
+
cause,
|
|
78
|
+
});
|
|
79
|
+
}
|
|
80
|
+
if (status === 400 || status === 404 || status === 422) {
|
|
81
|
+
return new AIError({
|
|
82
|
+
message,
|
|
83
|
+
code: "invalid_request",
|
|
84
|
+
provider,
|
|
85
|
+
model,
|
|
86
|
+
status,
|
|
87
|
+
retryable: false,
|
|
88
|
+
cause,
|
|
89
|
+
});
|
|
90
|
+
}
|
|
91
|
+
if (status !== undefined && status >= 500) {
|
|
92
|
+
return new AIError({
|
|
93
|
+
message,
|
|
94
|
+
code: "provider_unavailable",
|
|
95
|
+
provider,
|
|
96
|
+
model,
|
|
97
|
+
status,
|
|
98
|
+
retryable: true,
|
|
99
|
+
cause,
|
|
100
|
+
});
|
|
101
|
+
}
|
|
102
|
+
return new AIError({
|
|
103
|
+
message,
|
|
104
|
+
code: "unknown",
|
|
105
|
+
provider,
|
|
106
|
+
model,
|
|
107
|
+
status,
|
|
108
|
+
retryable: false,
|
|
109
|
+
cause,
|
|
110
|
+
});
|
|
111
|
+
}
|
package/dist/http.d.ts
ADDED
package/dist/http.js
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
import { AIError, toAIError } from "./errors.js";
|
|
2
|
+
export async function postJson(params) {
|
|
3
|
+
const controller = new AbortController();
|
|
4
|
+
const timeout = setTimeout(() => controller.abort(), params.timeoutMs);
|
|
5
|
+
try {
|
|
6
|
+
const response = await fetch(params.url, {
|
|
7
|
+
method: "POST",
|
|
8
|
+
headers: {
|
|
9
|
+
"Content-Type": "application/json",
|
|
10
|
+
...params.headers,
|
|
11
|
+
},
|
|
12
|
+
body: JSON.stringify(params.body),
|
|
13
|
+
signal: controller.signal,
|
|
14
|
+
});
|
|
15
|
+
const text = await response.text();
|
|
16
|
+
const parsed = safeJsonParse(text);
|
|
17
|
+
if (!response.ok) {
|
|
18
|
+
const message = extractErrorMessage(parsed) ?? response.statusText;
|
|
19
|
+
throw toAIError({
|
|
20
|
+
provider: params.provider,
|
|
21
|
+
model: params.model,
|
|
22
|
+
status: response.status,
|
|
23
|
+
message,
|
|
24
|
+
});
|
|
25
|
+
}
|
|
26
|
+
return parsed;
|
|
27
|
+
}
|
|
28
|
+
catch (error) {
|
|
29
|
+
if (error instanceof AIError) {
|
|
30
|
+
throw error;
|
|
31
|
+
}
|
|
32
|
+
if (error instanceof Error && error.name === "AbortError") {
|
|
33
|
+
throw new AIError({
|
|
34
|
+
message: "Request timeout",
|
|
35
|
+
code: "timeout",
|
|
36
|
+
provider: params.provider,
|
|
37
|
+
model: params.model,
|
|
38
|
+
retryable: true,
|
|
39
|
+
cause: error,
|
|
40
|
+
});
|
|
41
|
+
}
|
|
42
|
+
const networkMessage = error instanceof Error && error.message
|
|
43
|
+
? `Network or unexpected error: ${error.message}`
|
|
44
|
+
: "Network or unexpected error";
|
|
45
|
+
throw new AIError({
|
|
46
|
+
message: networkMessage,
|
|
47
|
+
code: "network",
|
|
48
|
+
provider: params.provider,
|
|
49
|
+
model: params.model,
|
|
50
|
+
retryable: true,
|
|
51
|
+
cause: error,
|
|
52
|
+
});
|
|
53
|
+
}
|
|
54
|
+
finally {
|
|
55
|
+
clearTimeout(timeout);
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
function safeJsonParse(value) {
|
|
59
|
+
try {
|
|
60
|
+
return JSON.parse(value);
|
|
61
|
+
}
|
|
62
|
+
catch {
|
|
63
|
+
return value;
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
function extractErrorMessage(value) {
|
|
67
|
+
if (!value || typeof value !== "object") {
|
|
68
|
+
return undefined;
|
|
69
|
+
}
|
|
70
|
+
if ("error" in value) {
|
|
71
|
+
const errorField = value.error;
|
|
72
|
+
if (typeof errorField === "string") {
|
|
73
|
+
return errorField;
|
|
74
|
+
}
|
|
75
|
+
if (errorField &&
|
|
76
|
+
typeof errorField === "object" &&
|
|
77
|
+
"message" in errorField) {
|
|
78
|
+
const message = errorField.message;
|
|
79
|
+
if (typeof message === "string") {
|
|
80
|
+
return message;
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
if ("message" in value) {
|
|
85
|
+
const message = value.message;
|
|
86
|
+
if (typeof message === "string") {
|
|
87
|
+
return message;
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
return undefined;
|
|
91
|
+
}
|
package/dist/index.d.ts
ADDED
package/dist/index.js
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import type { AIMessageResponse, ProviderAdapter, ProviderConfig } from "../types.js";
|
|
2
|
+
export declare class ClaudeAdapter implements ProviderAdapter {
|
|
3
|
+
private readonly config;
|
|
4
|
+
readonly id: "claude";
|
|
5
|
+
constructor(config: ProviderConfig);
|
|
6
|
+
isConfigured(): boolean;
|
|
7
|
+
models(): string[];
|
|
8
|
+
sendMessage(input: {
|
|
9
|
+
message: string;
|
|
10
|
+
model: string;
|
|
11
|
+
timeoutMs: number;
|
|
12
|
+
}): Promise<AIMessageResponse>;
|
|
13
|
+
}
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
import { AIError } from "../errors.js";
|
|
2
|
+
import { postJson } from "../http.js";
|
|
3
|
+
export class ClaudeAdapter {
|
|
4
|
+
config;
|
|
5
|
+
id = "claude";
|
|
6
|
+
constructor(config) {
|
|
7
|
+
this.config = config;
|
|
8
|
+
}
|
|
9
|
+
isConfigured() {
|
|
10
|
+
return (this.config.enabled &&
|
|
11
|
+
Boolean(this.config.apiKey) &&
|
|
12
|
+
Boolean(this.config.baseUrl));
|
|
13
|
+
}
|
|
14
|
+
models() {
|
|
15
|
+
return this.config.models;
|
|
16
|
+
}
|
|
17
|
+
async sendMessage(input) {
|
|
18
|
+
if (!this.config.apiKey) {
|
|
19
|
+
throw new AIError({
|
|
20
|
+
message: "Missing ANTHROPIC_API_KEY",
|
|
21
|
+
code: "configuration",
|
|
22
|
+
provider: this.id,
|
|
23
|
+
});
|
|
24
|
+
}
|
|
25
|
+
const data = (await postJson({
|
|
26
|
+
provider: this.id,
|
|
27
|
+
model: input.model,
|
|
28
|
+
timeoutMs: input.timeoutMs,
|
|
29
|
+
url: `${this.config.baseUrl}/v1/messages`,
|
|
30
|
+
headers: {
|
|
31
|
+
"x-api-key": this.config.apiKey,
|
|
32
|
+
"anthropic-version": "2023-06-01",
|
|
33
|
+
},
|
|
34
|
+
body: {
|
|
35
|
+
model: input.model,
|
|
36
|
+
max_tokens: 1024,
|
|
37
|
+
messages: [{ role: "user", content: input.message }],
|
|
38
|
+
},
|
|
39
|
+
}));
|
|
40
|
+
const content = data.content?.find((entry) => entry.type === "text")?.text;
|
|
41
|
+
if (!content) {
|
|
42
|
+
throw new AIError({
|
|
43
|
+
message: "Claude response does not include text content",
|
|
44
|
+
code: "provider_unavailable",
|
|
45
|
+
provider: this.id,
|
|
46
|
+
model: input.model,
|
|
47
|
+
retryable: true,
|
|
48
|
+
});
|
|
49
|
+
}
|
|
50
|
+
return {
|
|
51
|
+
provider: this.id,
|
|
52
|
+
model: input.model,
|
|
53
|
+
content,
|
|
54
|
+
};
|
|
55
|
+
}
|
|
56
|
+
}
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import type { AIMessageResponse, ProviderAdapter, ProviderConfig } from "../types.js";
|
|
2
|
+
export declare class GeminiAdapter implements ProviderAdapter {
|
|
3
|
+
private readonly config;
|
|
4
|
+
readonly id: "gemini";
|
|
5
|
+
constructor(config: ProviderConfig);
|
|
6
|
+
isConfigured(): boolean;
|
|
7
|
+
models(): string[];
|
|
8
|
+
sendMessage(input: {
|
|
9
|
+
message: string;
|
|
10
|
+
model: string;
|
|
11
|
+
timeoutMs: number;
|
|
12
|
+
}): Promise<AIMessageResponse>;
|
|
13
|
+
}
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
import { AIError } from "../errors.js";
|
|
2
|
+
import { postJson } from "../http.js";
|
|
3
|
+
export class GeminiAdapter {
|
|
4
|
+
config;
|
|
5
|
+
id = "gemini";
|
|
6
|
+
constructor(config) {
|
|
7
|
+
this.config = config;
|
|
8
|
+
}
|
|
9
|
+
isConfigured() {
|
|
10
|
+
return (this.config.enabled &&
|
|
11
|
+
Boolean(this.config.apiKey) &&
|
|
12
|
+
Boolean(this.config.baseUrl));
|
|
13
|
+
}
|
|
14
|
+
models() {
|
|
15
|
+
return this.config.models;
|
|
16
|
+
}
|
|
17
|
+
async sendMessage(input) {
|
|
18
|
+
if (!this.config.apiKey) {
|
|
19
|
+
throw new AIError({
|
|
20
|
+
message: "Missing GEMINI_API_KEY",
|
|
21
|
+
code: "configuration",
|
|
22
|
+
provider: this.id,
|
|
23
|
+
});
|
|
24
|
+
}
|
|
25
|
+
const baseUrl = this.config.baseUrl.replace(/\/$/, "");
|
|
26
|
+
const data = (await postJson({
|
|
27
|
+
provider: this.id,
|
|
28
|
+
model: input.model,
|
|
29
|
+
timeoutMs: input.timeoutMs,
|
|
30
|
+
url: `${baseUrl}/v1beta/models/${encodeURIComponent(input.model)}:generateContent?key=${this.config.apiKey}`,
|
|
31
|
+
headers: {},
|
|
32
|
+
body: {
|
|
33
|
+
contents: [{ role: "user", parts: [{ text: input.message }] }],
|
|
34
|
+
},
|
|
35
|
+
}));
|
|
36
|
+
const content = data.candidates?.[0]?.content?.parts?.[0]?.text;
|
|
37
|
+
if (!content) {
|
|
38
|
+
throw new AIError({
|
|
39
|
+
message: "Gemini response does not include message content",
|
|
40
|
+
code: "provider_unavailable",
|
|
41
|
+
provider: this.id,
|
|
42
|
+
model: input.model,
|
|
43
|
+
retryable: true,
|
|
44
|
+
});
|
|
45
|
+
}
|
|
46
|
+
return {
|
|
47
|
+
provider: this.id,
|
|
48
|
+
model: input.model,
|
|
49
|
+
content,
|
|
50
|
+
};
|
|
51
|
+
}
|
|
52
|
+
}
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import type { AIMessageResponse, ProviderAdapter, ProviderConfig, ProviderId } from "../types.js";
|
|
2
|
+
export declare class OllamaAdapter implements ProviderAdapter {
|
|
3
|
+
private readonly config;
|
|
4
|
+
readonly id: ProviderId;
|
|
5
|
+
constructor(id: "ollamaCloud" | "ollamaLocal", config: ProviderConfig);
|
|
6
|
+
isConfigured(): boolean;
|
|
7
|
+
models(): string[];
|
|
8
|
+
sendMessage(input: {
|
|
9
|
+
message: string;
|
|
10
|
+
model: string;
|
|
11
|
+
timeoutMs: number;
|
|
12
|
+
}): Promise<AIMessageResponse>;
|
|
13
|
+
}
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
import { AIError } from "../errors.js";
|
|
2
|
+
import { postJson } from "../http.js";
|
|
3
|
+
export class OllamaAdapter {
|
|
4
|
+
config;
|
|
5
|
+
id;
|
|
6
|
+
constructor(id, config) {
|
|
7
|
+
this.config = config;
|
|
8
|
+
this.id = id;
|
|
9
|
+
}
|
|
10
|
+
isConfigured() {
|
|
11
|
+
return this.config.enabled && Boolean(this.config.baseUrl);
|
|
12
|
+
}
|
|
13
|
+
models() {
|
|
14
|
+
return this.config.models;
|
|
15
|
+
}
|
|
16
|
+
async sendMessage(input) {
|
|
17
|
+
const headers = {};
|
|
18
|
+
if (this.config.apiKey) {
|
|
19
|
+
headers.Authorization = `Bearer ${this.config.apiKey}`;
|
|
20
|
+
}
|
|
21
|
+
const data = (await postJson({
|
|
22
|
+
provider: this.id,
|
|
23
|
+
model: input.model,
|
|
24
|
+
timeoutMs: input.timeoutMs,
|
|
25
|
+
url: buildOllamaChatUrl(this.config.baseUrl),
|
|
26
|
+
headers,
|
|
27
|
+
body: {
|
|
28
|
+
model: input.model,
|
|
29
|
+
messages: [{ role: "user", content: input.message }],
|
|
30
|
+
stream: false,
|
|
31
|
+
},
|
|
32
|
+
}));
|
|
33
|
+
const content = data.message?.content;
|
|
34
|
+
if (!content) {
|
|
35
|
+
throw new AIError({
|
|
36
|
+
message: "Ollama response does not include message content",
|
|
37
|
+
code: "provider_unavailable",
|
|
38
|
+
provider: this.id,
|
|
39
|
+
model: input.model,
|
|
40
|
+
retryable: true,
|
|
41
|
+
});
|
|
42
|
+
}
|
|
43
|
+
return {
|
|
44
|
+
provider: this.id,
|
|
45
|
+
model: input.model,
|
|
46
|
+
content,
|
|
47
|
+
};
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
function buildOllamaChatUrl(baseUrl) {
|
|
51
|
+
const normalizedBaseUrl = baseUrl.replace(/\/$/, "");
|
|
52
|
+
if (normalizedBaseUrl.endsWith("/api")) {
|
|
53
|
+
return `${normalizedBaseUrl}/chat`;
|
|
54
|
+
}
|
|
55
|
+
return `${normalizedBaseUrl}/api/chat`;
|
|
56
|
+
}
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import type { AIMessageResponse, ProviderAdapter, ProviderConfig } from "../types.js";
|
|
2
|
+
export declare class OpenAIAdapter implements ProviderAdapter {
|
|
3
|
+
private readonly config;
|
|
4
|
+
readonly id: "openai";
|
|
5
|
+
constructor(config: ProviderConfig);
|
|
6
|
+
isConfigured(): boolean;
|
|
7
|
+
models(): string[];
|
|
8
|
+
sendMessage(input: {
|
|
9
|
+
message: string;
|
|
10
|
+
model: string;
|
|
11
|
+
timeoutMs: number;
|
|
12
|
+
}): Promise<AIMessageResponse>;
|
|
13
|
+
}
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
import { AIError } from "../errors.js";
|
|
2
|
+
import { postJson } from "../http.js";
|
|
3
|
+
export class OpenAIAdapter {
|
|
4
|
+
config;
|
|
5
|
+
id = "openai";
|
|
6
|
+
constructor(config) {
|
|
7
|
+
this.config = config;
|
|
8
|
+
}
|
|
9
|
+
isConfigured() {
|
|
10
|
+
return (this.config.enabled &&
|
|
11
|
+
Boolean(this.config.apiKey) &&
|
|
12
|
+
Boolean(this.config.baseUrl));
|
|
13
|
+
}
|
|
14
|
+
models() {
|
|
15
|
+
return this.config.models;
|
|
16
|
+
}
|
|
17
|
+
async sendMessage(input) {
|
|
18
|
+
if (!this.config.apiKey) {
|
|
19
|
+
throw new AIError({
|
|
20
|
+
message: "Missing OPENAI_API_KEY",
|
|
21
|
+
code: "configuration",
|
|
22
|
+
provider: this.id,
|
|
23
|
+
});
|
|
24
|
+
}
|
|
25
|
+
const data = (await postJson({
|
|
26
|
+
provider: this.id,
|
|
27
|
+
model: input.model,
|
|
28
|
+
timeoutMs: input.timeoutMs,
|
|
29
|
+
url: `${this.config.baseUrl}/v1/chat/completions`,
|
|
30
|
+
headers: {
|
|
31
|
+
Authorization: `Bearer ${this.config.apiKey}`,
|
|
32
|
+
},
|
|
33
|
+
body: {
|
|
34
|
+
model: input.model,
|
|
35
|
+
messages: [{ role: "user", content: input.message }],
|
|
36
|
+
},
|
|
37
|
+
}));
|
|
38
|
+
const content = data.choices?.[0]?.message?.content;
|
|
39
|
+
if (!content) {
|
|
40
|
+
throw new AIError({
|
|
41
|
+
message: "OpenAI response does not include message content",
|
|
42
|
+
code: "provider_unavailable",
|
|
43
|
+
provider: this.id,
|
|
44
|
+
model: input.model,
|
|
45
|
+
retryable: true,
|
|
46
|
+
});
|
|
47
|
+
}
|
|
48
|
+
return {
|
|
49
|
+
provider: this.id,
|
|
50
|
+
model: input.model,
|
|
51
|
+
content,
|
|
52
|
+
};
|
|
53
|
+
}
|
|
54
|
+
}
|
package/dist/router.d.ts
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import type { AIMessageRequest, AIMessageResponse, LibraryConfig, ProviderAdapter, ProviderId } from "./types.js";
|
|
2
|
+
export declare class AIRouter {
|
|
3
|
+
private readonly config;
|
|
4
|
+
private readonly adapters;
|
|
5
|
+
constructor(config: LibraryConfig, adapters: Record<ProviderId, ProviderAdapter>);
|
|
6
|
+
sendMessage(request: AIMessageRequest): Promise<AIMessageResponse>;
|
|
7
|
+
private sendManual;
|
|
8
|
+
private sendAuto;
|
|
9
|
+
private getModelOrder;
|
|
10
|
+
}
|
package/dist/router.js
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
import { AIError, AllModelsExhaustedError, AllProvidersExhaustedError, } from "./errors.js";
|
|
2
|
+
export class AIRouter {
|
|
3
|
+
config;
|
|
4
|
+
adapters;
|
|
5
|
+
constructor(config, adapters) {
|
|
6
|
+
this.config = config;
|
|
7
|
+
this.adapters = adapters;
|
|
8
|
+
}
|
|
9
|
+
async sendMessage(request) {
|
|
10
|
+
const mode = request.mode ?? "auto";
|
|
11
|
+
if (!request.message?.trim()) {
|
|
12
|
+
throw new AIError({
|
|
13
|
+
message: "message is required",
|
|
14
|
+
code: "invalid_request",
|
|
15
|
+
});
|
|
16
|
+
}
|
|
17
|
+
if (mode === "manual") {
|
|
18
|
+
console.info("[aiLib] mode=manual");
|
|
19
|
+
return this.sendManual(request);
|
|
20
|
+
}
|
|
21
|
+
console.info("[aiLib] mode=auto");
|
|
22
|
+
return this.sendAuto(request);
|
|
23
|
+
}
|
|
24
|
+
async sendManual(request) {
|
|
25
|
+
if (!request.provider) {
|
|
26
|
+
throw new AIError({
|
|
27
|
+
message: "provider is required in manual mode",
|
|
28
|
+
code: "invalid_request",
|
|
29
|
+
});
|
|
30
|
+
}
|
|
31
|
+
const providerId = request.provider;
|
|
32
|
+
const adapter = this.adapters[providerId];
|
|
33
|
+
const attempts = [];
|
|
34
|
+
if (!adapter || !adapter.isConfigured()) {
|
|
35
|
+
throw new AIError({
|
|
36
|
+
message: `Provider ${providerId} is not configured`,
|
|
37
|
+
code: "configuration",
|
|
38
|
+
provider: providerId,
|
|
39
|
+
});
|
|
40
|
+
}
|
|
41
|
+
const candidateModels = this.getModelOrder(adapter.models(), request.model);
|
|
42
|
+
for (const model of candidateModels) {
|
|
43
|
+
try {
|
|
44
|
+
console.info(`[aiLib] trying provider=${providerId} model=${model} (manual)`);
|
|
45
|
+
const response = await adapter.sendMessage({
|
|
46
|
+
message: request.message,
|
|
47
|
+
model,
|
|
48
|
+
timeoutMs: request.timeoutMs ?? this.config.timeoutMs,
|
|
49
|
+
});
|
|
50
|
+
console.info(`[aiLib] using provider=${response.provider} model=${response.model}`);
|
|
51
|
+
return response;
|
|
52
|
+
}
|
|
53
|
+
catch (error) {
|
|
54
|
+
const aiError = normalizeError(error, providerId, model);
|
|
55
|
+
attempts.push({
|
|
56
|
+
provider: providerId,
|
|
57
|
+
model,
|
|
58
|
+
code: aiError.code,
|
|
59
|
+
message: aiError.message,
|
|
60
|
+
});
|
|
61
|
+
if (aiError.code === "rate_limit") {
|
|
62
|
+
console.warn(`[aiLib] rate limit on provider=${providerId} model=${model}, trying next model: ${aiError.message}`);
|
|
63
|
+
continue;
|
|
64
|
+
}
|
|
65
|
+
console.warn(`[aiLib] provider=${providerId} model=${model} failed with code=${aiError.code}: ${aiError.message}`);
|
|
66
|
+
throw aiError;
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
throw new AllModelsExhaustedError(providerId, attempts);
|
|
70
|
+
}
|
|
71
|
+
async sendAuto(request) {
|
|
72
|
+
const attempts = [];
|
|
73
|
+
for (const providerId of this.config.providerOrder) {
|
|
74
|
+
const adapter = this.adapters[providerId];
|
|
75
|
+
if (!adapter || !adapter.isConfigured()) {
|
|
76
|
+
console.info(`[aiLib] skipping provider=${providerId} (not configured)`);
|
|
77
|
+
continue;
|
|
78
|
+
}
|
|
79
|
+
for (const model of adapter.models()) {
|
|
80
|
+
try {
|
|
81
|
+
console.info(`[aiLib] trying provider=${providerId} model=${model} (auto)`);
|
|
82
|
+
const response = await adapter.sendMessage({
|
|
83
|
+
message: request.message,
|
|
84
|
+
model,
|
|
85
|
+
timeoutMs: request.timeoutMs ?? this.config.timeoutMs,
|
|
86
|
+
});
|
|
87
|
+
console.info(`[aiLib] using provider=${response.provider} model=${response.model}`);
|
|
88
|
+
return response;
|
|
89
|
+
}
|
|
90
|
+
catch (error) {
|
|
91
|
+
const aiError = normalizeError(error, providerId, model);
|
|
92
|
+
attempts.push({
|
|
93
|
+
provider: providerId,
|
|
94
|
+
model,
|
|
95
|
+
code: aiError.code,
|
|
96
|
+
message: aiError.message,
|
|
97
|
+
});
|
|
98
|
+
if (aiError.code === "rate_limit" || aiError.retryable) {
|
|
99
|
+
console.warn(`[aiLib] fallback from provider=${providerId} model=${model} due to code=${aiError.code}: ${aiError.message}`);
|
|
100
|
+
continue;
|
|
101
|
+
}
|
|
102
|
+
console.warn(`[aiLib] stopping provider=${providerId} due to non-retryable code=${aiError.code}: ${aiError.message}`);
|
|
103
|
+
break;
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
if (attempts.length > 0) {
|
|
108
|
+
const attemptSummary = attempts
|
|
109
|
+
.map((attempt) => `${attempt.provider}/${attempt.model ?? "n/a"}:${attempt.code}:${attempt.message}`)
|
|
110
|
+
.join(" | ");
|
|
111
|
+
console.error(`[aiLib] all providers exhausted. attempts=${attemptSummary}`);
|
|
112
|
+
}
|
|
113
|
+
throw new AllProvidersExhaustedError(attempts);
|
|
114
|
+
}
|
|
115
|
+
getModelOrder(models, selected) {
|
|
116
|
+
if (!selected) {
|
|
117
|
+
return models;
|
|
118
|
+
}
|
|
119
|
+
const unique = [selected, ...models.filter((model) => model !== selected)];
|
|
120
|
+
return unique;
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
function normalizeError(error, provider, model) {
|
|
124
|
+
if (error instanceof AIError) {
|
|
125
|
+
return error;
|
|
126
|
+
}
|
|
127
|
+
if (error instanceof Error) {
|
|
128
|
+
return new AIError({
|
|
129
|
+
message: error.message,
|
|
130
|
+
code: "unknown",
|
|
131
|
+
provider,
|
|
132
|
+
model,
|
|
133
|
+
retryable: false,
|
|
134
|
+
cause: error,
|
|
135
|
+
});
|
|
136
|
+
}
|
|
137
|
+
return new AIError({
|
|
138
|
+
message: "Unknown error",
|
|
139
|
+
code: "unknown",
|
|
140
|
+
provider,
|
|
141
|
+
model,
|
|
142
|
+
retryable: false,
|
|
143
|
+
cause: error,
|
|
144
|
+
});
|
|
145
|
+
}
|
package/dist/types.d.ts
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
export type ProviderId = "openai" | "gemini" | "claude" | "ollamaCloud" | "ollamaLocal";
|
|
2
|
+
export type RoutingMode = "auto" | "manual";
|
|
3
|
+
export interface AIMessageRequest {
|
|
4
|
+
message: string;
|
|
5
|
+
mode?: RoutingMode;
|
|
6
|
+
provider?: ProviderId;
|
|
7
|
+
model?: string;
|
|
8
|
+
timeoutMs?: number;
|
|
9
|
+
}
|
|
10
|
+
export interface AIMessageResponse {
|
|
11
|
+
provider: ProviderId;
|
|
12
|
+
model: string;
|
|
13
|
+
content: string;
|
|
14
|
+
}
|
|
15
|
+
export interface AttemptRecord {
|
|
16
|
+
provider: ProviderId;
|
|
17
|
+
model?: string;
|
|
18
|
+
code: string;
|
|
19
|
+
message: string;
|
|
20
|
+
}
|
|
21
|
+
export interface ProviderConfig {
|
|
22
|
+
enabled: boolean;
|
|
23
|
+
baseUrl: string;
|
|
24
|
+
apiKey?: string;
|
|
25
|
+
models: string[];
|
|
26
|
+
}
|
|
27
|
+
export interface LibraryConfig {
|
|
28
|
+
providerOrder: ProviderId[];
|
|
29
|
+
timeoutMs: number;
|
|
30
|
+
providers: Record<ProviderId, ProviderConfig>;
|
|
31
|
+
}
|
|
32
|
+
export interface ProviderAdapter {
|
|
33
|
+
id: ProviderId;
|
|
34
|
+
isConfigured(): boolean;
|
|
35
|
+
models(): string[];
|
|
36
|
+
sendMessage(input: {
|
|
37
|
+
message: string;
|
|
38
|
+
model: string;
|
|
39
|
+
timeoutMs: number;
|
|
40
|
+
}): Promise<AIMessageResponse>;
|
|
41
|
+
}
|
package/dist/types.js
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
package/package.json
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "ailib-router",
|
|
3
|
+
"version": "0.0.2",
|
|
4
|
+
"description": "Multi-provider AI chat library with auto/manual routing and fallback",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "dist/index.js",
|
|
7
|
+
"types": "dist/index.d.ts",
|
|
8
|
+
"exports": {
|
|
9
|
+
".": {
|
|
10
|
+
"import": "./dist/index.js",
|
|
11
|
+
"types": "./dist/index.d.ts"
|
|
12
|
+
}
|
|
13
|
+
},
|
|
14
|
+
"files": [
|
|
15
|
+
"dist"
|
|
16
|
+
],
|
|
17
|
+
"license": "MIT",
|
|
18
|
+
"repository": {
|
|
19
|
+
"type": "git",
|
|
20
|
+
"url": "https://github.com/pigos4/Ai-Lib.git"
|
|
21
|
+
},
|
|
22
|
+
"scripts": {
|
|
23
|
+
"build": "tsc -p tsconfig.json",
|
|
24
|
+
"test": "vitest run",
|
|
25
|
+
"test:watch": "vitest",
|
|
26
|
+
"typecheck": "tsc -p tsconfig.json --noEmit"
|
|
27
|
+
},
|
|
28
|
+
"keywords": [
|
|
29
|
+
"ai",
|
|
30
|
+
"openai",
|
|
31
|
+
"gemini",
|
|
32
|
+
"claude",
|
|
33
|
+
"ollama",
|
|
34
|
+
"router"
|
|
35
|
+
],
|
|
36
|
+
"engines": {
|
|
37
|
+
"node": ">=18"
|
|
38
|
+
},
|
|
39
|
+
"devDependencies": {
|
|
40
|
+
"@types/node": "^24.0.0",
|
|
41
|
+
"typescript": "^5.8.3",
|
|
42
|
+
"vitest": "^3.1.2"
|
|
43
|
+
}
|
|
44
|
+
}
|