@layer-ai/core 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +13 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +15 -0
- package/dist/lib/db/postgres.d.ts +27 -0
- package/dist/lib/db/postgres.d.ts.map +1 -0
- package/dist/lib/db/postgres.js +168 -0
- package/dist/lib/db/redis.d.ts +12 -0
- package/dist/lib/db/redis.d.ts.map +1 -0
- package/dist/lib/db/redis.js +95 -0
- package/dist/middleware/auth.d.ts +22 -0
- package/dist/middleware/auth.d.ts.map +1 -0
- package/dist/middleware/auth.js +89 -0
- package/dist/routes/auth.d.ts +4 -0
- package/dist/routes/auth.d.ts.map +1 -0
- package/dist/routes/auth.js +82 -0
- package/dist/routes/complete.d.ts +4 -0
- package/dist/routes/complete.d.ts.map +1 -0
- package/dist/routes/complete.js +223 -0
- package/dist/routes/gates.d.ts +4 -0
- package/dist/routes/gates.d.ts.map +1 -0
- package/dist/routes/gates.js +262 -0
- package/dist/routes/keys.d.ts +4 -0
- package/dist/routes/keys.d.ts.map +1 -0
- package/dist/routes/keys.js +70 -0
- package/dist/routes/logs.d.ts +4 -0
- package/dist/routes/logs.d.ts.map +1 -0
- package/dist/routes/logs.js +120 -0
- package/dist/services/providers/anthropic.d.ts +18 -0
- package/dist/services/providers/anthropic.d.ts.map +1 -0
- package/dist/services/providers/anthropic.js +55 -0
- package/dist/services/providers/base-adapter.d.ts +32 -0
- package/dist/services/providers/base-adapter.d.ts.map +1 -0
- package/dist/services/providers/base-adapter.js +89 -0
- package/dist/services/providers/google.d.ts +18 -0
- package/dist/services/providers/google.d.ts.map +1 -0
- package/dist/services/providers/google.js +39 -0
- package/dist/services/providers/openai-adapter.d.ts +19 -0
- package/dist/services/providers/openai-adapter.d.ts.map +1 -0
- package/dist/services/providers/openai-adapter.js +240 -0
- package/dist/services/providers/openai.d.ts +17 -0
- package/dist/services/providers/openai.d.ts.map +1 -0
- package/dist/services/providers/openai.js +43 -0
- package/dist/services/providers/test-openai-adapter.d.ts +2 -0
- package/dist/services/providers/test-openai-adapter.d.ts.map +1 -0
- package/dist/services/providers/test-openai-adapter.js +118 -0
- package/dist/services/task-analysis.d.ts +7 -0
- package/dist/services/task-analysis.d.ts.map +1 -0
- package/dist/services/task-analysis.js +74 -0
- package/package.json +53 -0
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
import OpenAI from 'openai';
|
|
2
|
+
import { MODEL_REGISTRY } from '@layer-ai/sdk';
|
|
3
|
+
// Lazy-initialize OpenAI client
|
|
4
|
+
let openai = null;
|
|
5
|
+
function getOpenAIClient() {
|
|
6
|
+
if (!openai) {
|
|
7
|
+
openai = new OpenAI({
|
|
8
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
9
|
+
});
|
|
10
|
+
}
|
|
11
|
+
return openai;
|
|
12
|
+
}
|
|
13
|
+
export async function createCompletion(params) {
|
|
14
|
+
const startTime = Date.now();
|
|
15
|
+
// Call OpenAI API
|
|
16
|
+
const response = await getOpenAIClient().chat.completions.create({
|
|
17
|
+
model: params.model,
|
|
18
|
+
messages: params.messages.map(msg => ({
|
|
19
|
+
role: msg.role,
|
|
20
|
+
content: msg.content
|
|
21
|
+
})),
|
|
22
|
+
temperature: params.temperature,
|
|
23
|
+
max_tokens: params.maxTokens,
|
|
24
|
+
top_p: params.topP,
|
|
25
|
+
});
|
|
26
|
+
// Extract response data
|
|
27
|
+
const choice = response.choices[0];
|
|
28
|
+
const content = choice.message.content || '';
|
|
29
|
+
// Get token usage
|
|
30
|
+
const promptTokens = response.usage?.prompt_tokens || 0;
|
|
31
|
+
const completionTokens = response.usage?.completion_tokens || 0;
|
|
32
|
+
const totalTokens = response.usage?.total_tokens || 0;
|
|
33
|
+
// Calculate cost
|
|
34
|
+
const pricing = MODEL_REGISTRY[params.model].pricing;
|
|
35
|
+
const costUsd = (promptTokens / 1000 * pricing.input) + (completionTokens / 1000 * pricing.output);
|
|
36
|
+
return {
|
|
37
|
+
content,
|
|
38
|
+
promptTokens,
|
|
39
|
+
completionTokens,
|
|
40
|
+
totalTokens,
|
|
41
|
+
costUsd
|
|
42
|
+
};
|
|
43
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"test-openai-adapter.d.ts","sourceRoot":"","sources":["../../../src/services/providers/test-openai-adapter.ts"],"names":[],"mappings":""}
|
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
import { OpenAIAdapter } from './openai-adapter.js';
|
|
2
|
+
const adapter = new OpenAIAdapter();
|
|
3
|
+
async function testChatCompletion() {
|
|
4
|
+
console.log('Testing chat completion...');
|
|
5
|
+
const request = {
|
|
6
|
+
gate: 'test-gate',
|
|
7
|
+
model: 'gpt-4o-mini',
|
|
8
|
+
type: 'chat',
|
|
9
|
+
data: {
|
|
10
|
+
messages: [
|
|
11
|
+
{ role: 'user', content: 'Say "Hello World" and nothing else.' }
|
|
12
|
+
],
|
|
13
|
+
temperature: 0.7,
|
|
14
|
+
maxTokens: 10,
|
|
15
|
+
}
|
|
16
|
+
};
|
|
17
|
+
const response = await adapter.call(request);
|
|
18
|
+
console.log('Response:', response.content);
|
|
19
|
+
console.log('Tokens:', response.usage);
|
|
20
|
+
console.log('Cost:', response.cost);
|
|
21
|
+
console.log('Latency:', response.latencyMs + 'ms');
|
|
22
|
+
console.log('Finish reason:', response.finishReason);
|
|
23
|
+
console.log('✅ Chat completion test passed\n');
|
|
24
|
+
}
|
|
25
|
+
async function testChatWithVision() {
|
|
26
|
+
console.log('Testing chat with vision...');
|
|
27
|
+
const request = {
|
|
28
|
+
gate: 'test-gate',
|
|
29
|
+
model: 'gpt-4o-mini',
|
|
30
|
+
type: 'chat',
|
|
31
|
+
data: {
|
|
32
|
+
messages: [
|
|
33
|
+
{
|
|
34
|
+
role: 'user',
|
|
35
|
+
content: 'What color is the sky in this image?',
|
|
36
|
+
images: [{
|
|
37
|
+
url: 'https://images.unsplash.com/photo-1765202659641-9ad9facfe5cf?q=80&w=1364&auto=format&fit=crop&ixlib=rb-4.1.0&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D',
|
|
38
|
+
detail: 'high'
|
|
39
|
+
}]
|
|
40
|
+
}
|
|
41
|
+
],
|
|
42
|
+
maxTokens: 50,
|
|
43
|
+
}
|
|
44
|
+
};
|
|
45
|
+
const response = await adapter.call(request);
|
|
46
|
+
console.log('Response:', response.content);
|
|
47
|
+
console.log('Finish reason:', response.finishReason);
|
|
48
|
+
console.log('✅ Vision test passed\n');
|
|
49
|
+
}
|
|
50
|
+
async function testImageGeneration() {
|
|
51
|
+
console.log('Testing image generation...');
|
|
52
|
+
const request = {
|
|
53
|
+
gate: 'test-gate',
|
|
54
|
+
model: 'dall-e-3',
|
|
55
|
+
type: 'image',
|
|
56
|
+
data: {
|
|
57
|
+
prompt: 'A cute cat playing with a ball of yarn',
|
|
58
|
+
size: '1024x1024',
|
|
59
|
+
quality: 'standard',
|
|
60
|
+
count: 1,
|
|
61
|
+
}
|
|
62
|
+
};
|
|
63
|
+
const response = await adapter.call(request);
|
|
64
|
+
console.log('Generated images:', response.images?.length);
|
|
65
|
+
console.log('Image URL:', response.images?.[0]?.url);
|
|
66
|
+
console.log('Revised prompt:', response.images?.[0]?.revisedPrompt);
|
|
67
|
+
console.log('✅ Image generation test passed\n');
|
|
68
|
+
}
|
|
69
|
+
async function testEmbeddings() {
|
|
70
|
+
console.log('Testing embeddings...');
|
|
71
|
+
const request = {
|
|
72
|
+
gate: 'test-gate',
|
|
73
|
+
model: 'text-embedding-3-small',
|
|
74
|
+
type: 'embeddings',
|
|
75
|
+
data: {
|
|
76
|
+
input: 'Hello world',
|
|
77
|
+
}
|
|
78
|
+
};
|
|
79
|
+
const response = await adapter.call(request);
|
|
80
|
+
console.log('Embeddings dimensions:', response.embeddings?.[0]?.length);
|
|
81
|
+
console.log('Tokens:', response.usage);
|
|
82
|
+
console.log('Cost:', response.cost);
|
|
83
|
+
console.log('✅ Embeddings test passed\n');
|
|
84
|
+
}
|
|
85
|
+
async function testTextToSpeech() {
|
|
86
|
+
console.log('Testing text-to-speech...');
|
|
87
|
+
const request = {
|
|
88
|
+
gate: 'test-gate',
|
|
89
|
+
model: 'tts-1',
|
|
90
|
+
type: 'tts',
|
|
91
|
+
data: {
|
|
92
|
+
input: 'Hello, this is a test.',
|
|
93
|
+
voice: 'alloy',
|
|
94
|
+
speed: 1.0,
|
|
95
|
+
responseFormat: 'mp3',
|
|
96
|
+
}
|
|
97
|
+
};
|
|
98
|
+
const response = await adapter.call(request);
|
|
99
|
+
console.log('Audio format:', response.audio?.format);
|
|
100
|
+
console.log('Audio base64 length:', response.audio?.base64?.length);
|
|
101
|
+
console.log('✅ Text-to-speech test passed\n');
|
|
102
|
+
}
|
|
103
|
+
async function runTests() {
|
|
104
|
+
try {
|
|
105
|
+
await testChatCompletion();
|
|
106
|
+
console.log('Testing vision...');
|
|
107
|
+
await testChatWithVision();
|
|
108
|
+
await testImageGeneration();
|
|
109
|
+
await testEmbeddings();
|
|
110
|
+
await testTextToSpeech();
|
|
111
|
+
console.log('✅ All tests passed!');
|
|
112
|
+
}
|
|
113
|
+
catch (error) {
|
|
114
|
+
console.error('❌ Test failed:', error);
|
|
115
|
+
process.exit(1);
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
runTests();
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
import { TaskAnalysis } from '@layer-ai/sdk';
|
|
2
|
+
export declare function analyzeTask(description: string, userPreferences?: {
|
|
3
|
+
costWeight?: number;
|
|
4
|
+
latencyWeight?: number;
|
|
5
|
+
qualityWeight?: number;
|
|
6
|
+
}): Promise<TaskAnalysis>;
|
|
7
|
+
//# sourceMappingURL=task-analysis.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"task-analysis.d.ts","sourceRoot":"","sources":["../../src/services/task-analysis.ts"],"names":[],"mappings":"AACA,OAAO,EAAkB,YAAY,EAAE,MAAM,eAAe,CAAC;AAE7D,wBAAsB,WAAW,CAC/B,WAAW,EAAE,MAAM,EACnB,eAAe,CAAC,EAAE;IAChB,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB,aAAa,CAAC,EAAE,MAAM,CAAC;CACxB,GACA,OAAO,CAAC,YAAY,CAAC,CA4EvB"}
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
import Anthropic from '@anthropic-ai/sdk';
|
|
2
|
+
import { MODEL_REGISTRY } from '@layer-ai/sdk';
|
|
3
|
+
export async function analyzeTask(description, userPreferences) {
|
|
4
|
+
const anthropic = new Anthropic({
|
|
5
|
+
apiKey: process.env.ANTHROPIC_API_KEY
|
|
6
|
+
});
|
|
7
|
+
const registryContext = JSON.stringify(MODEL_REGISTRY, null, 2);
|
|
8
|
+
const costWeight = userPreferences?.costWeight ?? 0.33;
|
|
9
|
+
const latencyWeight = userPreferences?.latencyWeight ?? 0.33;
|
|
10
|
+
const qualityWeight = userPreferences?.qualityWeight ?? 0.33;
|
|
11
|
+
const prompt = `You are analyzing a task to recommend the best AI models from our registry.
|
|
12
|
+
|
|
13
|
+
MODEL REGISTRY (available models and their capabilities):
|
|
14
|
+
${registryContext}
|
|
15
|
+
|
|
16
|
+
TASK DESCRIPTION:
|
|
17
|
+
"${description}"
|
|
18
|
+
|
|
19
|
+
USER PREFERENCES (0.0 = doesn't care, 1.0 = very important):
|
|
20
|
+
- Cost importance: ${costWeight}
|
|
21
|
+
- Latency importance: ${latencyWeight}
|
|
22
|
+
- Quality importance: ${qualityWeight}
|
|
23
|
+
|
|
24
|
+
Analyze this task and recommend the BEST models from our registry that match BOTH the task requirements AND user preferences.
|
|
25
|
+
|
|
26
|
+
Consider:
|
|
27
|
+
- Math benchmarks (for quantitative tasks)
|
|
28
|
+
- Coding benchmarks (for programming tasks)
|
|
29
|
+
- Intelligence scores (for reasoning)
|
|
30
|
+
- Context window (if task needs long context)
|
|
31
|
+
- Pricing (balance quality vs cost)
|
|
32
|
+
- Performance (speed matters for some tasks)
|
|
33
|
+
- Capabilities (if a model is able to accurately perform task in the description)
|
|
34
|
+
- Model Modalities (what form does model input and output come in)
|
|
35
|
+
|
|
36
|
+
Return JSON with:
|
|
37
|
+
{
|
|
38
|
+
"primary": "model-id", // Best overall choice
|
|
39
|
+
"alternatives": ["id1", "id2"], // 2-3 other good options
|
|
40
|
+
"reasoning": "why these models work for this task and user preferences"
|
|
41
|
+
}`;
|
|
42
|
+
try {
|
|
43
|
+
const response = await anthropic.messages.create({
|
|
44
|
+
model: 'claude-haiku-4-5-20251001',
|
|
45
|
+
max_tokens: 2000,
|
|
46
|
+
temperature: 0.0,
|
|
47
|
+
messages: [{
|
|
48
|
+
role: 'user',
|
|
49
|
+
content: prompt
|
|
50
|
+
}]
|
|
51
|
+
});
|
|
52
|
+
const responseContent = response.content[0];
|
|
53
|
+
if (responseContent.type !== 'text') {
|
|
54
|
+
throw new Error('Unexpected response type from Claude');
|
|
55
|
+
}
|
|
56
|
+
let jsonText = responseContent.text.trim();
|
|
57
|
+
if (jsonText.startsWith('```')) {
|
|
58
|
+
jsonText = jsonText.replace(/^```(?:json)?\n?/, '').replace(/\n?```$/, '');
|
|
59
|
+
}
|
|
60
|
+
const mapping = JSON.parse(jsonText);
|
|
61
|
+
if (typeof (mapping) !== 'object' || Array.isArray(mapping)) {
|
|
62
|
+
throw new Error('Mapping is in wrong format');
|
|
63
|
+
}
|
|
64
|
+
return mapping;
|
|
65
|
+
}
|
|
66
|
+
catch (error) {
|
|
67
|
+
console.error('Failed to find accurate task requirements', error);
|
|
68
|
+
return {
|
|
69
|
+
primary: 'gpt-4o',
|
|
70
|
+
alternatives: ['claude-sonnet-4-5-20250929', 'gemini-2.5-flash'],
|
|
71
|
+
reasoning: 'Task analysis failed, returning safe defaults'
|
|
72
|
+
};
|
|
73
|
+
}
|
|
74
|
+
}
|
package/package.json
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@layer-ai/core",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "Core API routes and services for Layer AI",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "./dist/index.js",
|
|
7
|
+
"types": "./dist/index.d.ts",
|
|
8
|
+
"exports": {
|
|
9
|
+
".": {
|
|
10
|
+
"import": "./dist/index.js",
|
|
11
|
+
"types": "./dist/index.d.ts"
|
|
12
|
+
}
|
|
13
|
+
},
|
|
14
|
+
"files": [
|
|
15
|
+
"dist"
|
|
16
|
+
],
|
|
17
|
+
"scripts": {
|
|
18
|
+
"build": "tsc",
|
|
19
|
+
"dev": "tsc --watch",
|
|
20
|
+
"clean": "rm -rf dist"
|
|
21
|
+
},
|
|
22
|
+
"author": "Micah Nettey",
|
|
23
|
+
"license": "MIT",
|
|
24
|
+
"repository": {
|
|
25
|
+
"type": "git",
|
|
26
|
+
"url": "https://github.com/yourusername/layer-ai.git",
|
|
27
|
+
"directory": "packages/core"
|
|
28
|
+
},
|
|
29
|
+
"bugs": {
|
|
30
|
+
"url": "https://github.com/yourusername/layer-ai/issues"
|
|
31
|
+
},
|
|
32
|
+
"homepage": "https://uselayer.ai",
|
|
33
|
+
"dependencies": {
|
|
34
|
+
"@anthropic-ai/sdk": "^0.39.0",
|
|
35
|
+
"@google/genai": "^1.30.0",
|
|
36
|
+
"@layer-ai/sdk": "workspace:*",
|
|
37
|
+
"bcryptjs": "^2.4.3",
|
|
38
|
+
"express": "^4.18.2",
|
|
39
|
+
"ioredis": "^5.3.2",
|
|
40
|
+
"jsonwebtoken": "^9.0.2",
|
|
41
|
+
"nanoid": "^5.0.4",
|
|
42
|
+
"openai": "^4.24.0",
|
|
43
|
+
"pg": "^8.11.3"
|
|
44
|
+
},
|
|
45
|
+
"devDependencies": {
|
|
46
|
+
"@types/express": "^4.17.21",
|
|
47
|
+
"@types/bcryptjs": "^2.4.6",
|
|
48
|
+
"@types/jsonwebtoken": "^9.0.5",
|
|
49
|
+
"@types/node": "^20.10.4",
|
|
50
|
+
"@types/pg": "^8.10.9",
|
|
51
|
+
"typescript": "^5.3.3"
|
|
52
|
+
}
|
|
53
|
+
}
|