@layer-ai/core 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +13 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +15 -0
- package/dist/lib/db/postgres.d.ts +27 -0
- package/dist/lib/db/postgres.d.ts.map +1 -0
- package/dist/lib/db/postgres.js +168 -0
- package/dist/lib/db/redis.d.ts +12 -0
- package/dist/lib/db/redis.d.ts.map +1 -0
- package/dist/lib/db/redis.js +95 -0
- package/dist/middleware/auth.d.ts +22 -0
- package/dist/middleware/auth.d.ts.map +1 -0
- package/dist/middleware/auth.js +89 -0
- package/dist/routes/auth.d.ts +4 -0
- package/dist/routes/auth.d.ts.map +1 -0
- package/dist/routes/auth.js +82 -0
- package/dist/routes/complete.d.ts +4 -0
- package/dist/routes/complete.d.ts.map +1 -0
- package/dist/routes/complete.js +223 -0
- package/dist/routes/gates.d.ts +4 -0
- package/dist/routes/gates.d.ts.map +1 -0
- package/dist/routes/gates.js +262 -0
- package/dist/routes/keys.d.ts +4 -0
- package/dist/routes/keys.d.ts.map +1 -0
- package/dist/routes/keys.js +70 -0
- package/dist/routes/logs.d.ts +4 -0
- package/dist/routes/logs.d.ts.map +1 -0
- package/dist/routes/logs.js +120 -0
- package/dist/services/providers/anthropic.d.ts +18 -0
- package/dist/services/providers/anthropic.d.ts.map +1 -0
- package/dist/services/providers/anthropic.js +55 -0
- package/dist/services/providers/base-adapter.d.ts +32 -0
- package/dist/services/providers/base-adapter.d.ts.map +1 -0
- package/dist/services/providers/base-adapter.js +89 -0
- package/dist/services/providers/google.d.ts +18 -0
- package/dist/services/providers/google.d.ts.map +1 -0
- package/dist/services/providers/google.js +39 -0
- package/dist/services/providers/openai-adapter.d.ts +19 -0
- package/dist/services/providers/openai-adapter.d.ts.map +1 -0
- package/dist/services/providers/openai-adapter.js +240 -0
- package/dist/services/providers/openai.d.ts +17 -0
- package/dist/services/providers/openai.d.ts.map +1 -0
- package/dist/services/providers/openai.js +43 -0
- package/dist/services/providers/test-openai-adapter.d.ts +2 -0
- package/dist/services/providers/test-openai-adapter.d.ts.map +1 -0
- package/dist/services/providers/test-openai-adapter.js +118 -0
- package/dist/services/task-analysis.d.ts +7 -0
- package/dist/services/task-analysis.d.ts.map +1 -0
- package/dist/services/task-analysis.js +74 -0
- package/package.json +53 -0
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
import { Router } from 'express';
|
|
2
|
+
import { db } from '../lib/db/postgres.js';
|
|
3
|
+
import { authenticate } from '../middleware/auth.js';
|
|
4
|
+
const router = Router();
|
|
5
|
+
// All routes require SDK authentication
|
|
6
|
+
router.use(authenticate);
|
|
7
|
+
// GET /v1/logs - List request logs
|
|
8
|
+
router.get('/', async (req, res) => {
|
|
9
|
+
try {
|
|
10
|
+
const userId = req.userId;
|
|
11
|
+
const limit = parseInt(req.query.limit) || 50;
|
|
12
|
+
const offset = parseInt(req.query.offset) || 0;
|
|
13
|
+
const gate = req.query.gate;
|
|
14
|
+
let query = `
|
|
15
|
+
SELECT
|
|
16
|
+
id,
|
|
17
|
+
user_id,
|
|
18
|
+
gate_id,
|
|
19
|
+
gate_name,
|
|
20
|
+
model_requested,
|
|
21
|
+
model_used,
|
|
22
|
+
prompt_tokens,
|
|
23
|
+
completion_tokens,
|
|
24
|
+
cost_usd,
|
|
25
|
+
latency_ms,
|
|
26
|
+
success,
|
|
27
|
+
error_message,
|
|
28
|
+
created_at as logged_at
|
|
29
|
+
FROM requests
|
|
30
|
+
WHERE user_id = $1
|
|
31
|
+
`;
|
|
32
|
+
const params = [userId];
|
|
33
|
+
if (gate) {
|
|
34
|
+
query += ` AND gate_name = $2`;
|
|
35
|
+
params.push(gate);
|
|
36
|
+
}
|
|
37
|
+
query += ` ORDER BY created_at DESC LIMIT $${params.length + 1} OFFSET $${params.length + 2}`;
|
|
38
|
+
params.push(limit, offset);
|
|
39
|
+
const result = await db.query(query, params);
|
|
40
|
+
const logs = result.rows.map((row) => ({
|
|
41
|
+
id: row.id,
|
|
42
|
+
userId: row.user_id,
|
|
43
|
+
gateId: row.gate_id,
|
|
44
|
+
gateName: row.gate_name,
|
|
45
|
+
modelRequested: row.model_requested,
|
|
46
|
+
modelUsed: row.model_used,
|
|
47
|
+
promptTokens: row.prompt_tokens,
|
|
48
|
+
completionTokens: row.completion_tokens,
|
|
49
|
+
costUsd: parseFloat(row.cost_usd),
|
|
50
|
+
latencyMs: row.latency_ms,
|
|
51
|
+
success: row.success,
|
|
52
|
+
errorMessage: row.error_message,
|
|
53
|
+
loggedAt: row.logged_at,
|
|
54
|
+
}));
|
|
55
|
+
res.json(logs);
|
|
56
|
+
}
|
|
57
|
+
catch (error) {
|
|
58
|
+
console.error('Logs list error:', error);
|
|
59
|
+
res.status(500).json({ error: 'internal_error', message: 'Failed to fetch logs' });
|
|
60
|
+
}
|
|
61
|
+
});
|
|
62
|
+
// GET /v1/logs/overview - Get logs for api calls
|
|
63
|
+
router.get('/overview', async (req, res) => {
|
|
64
|
+
try {
|
|
65
|
+
const userId = req.userId;
|
|
66
|
+
const [statsResult, gatesResult, recentRequestsResult] = await Promise.all([
|
|
67
|
+
// Get aggregate stats
|
|
68
|
+
db.query(`SELECT
|
|
69
|
+
COUNT(*) as total_requests,
|
|
70
|
+
COALESCE(SUM(cost_usd), 0) as total_cost,
|
|
71
|
+
COALESCE(AVG(latency_ms), 0) as avg_latency
|
|
72
|
+
FROM requests
|
|
73
|
+
WHERE user_id = $1`, [userId]),
|
|
74
|
+
// Get gates count
|
|
75
|
+
db.query(`SELECT COUNT(*) as active_gates FROM gates WHERE user_id = $1`, [userId]),
|
|
76
|
+
// Get recent requests
|
|
77
|
+
db.query(`SELECT
|
|
78
|
+
id,
|
|
79
|
+
gate_name,
|
|
80
|
+
model_used,
|
|
81
|
+
prompt_tokens,
|
|
82
|
+
completion_tokens,
|
|
83
|
+
total_tokens,
|
|
84
|
+
cost_usd,
|
|
85
|
+
latency_ms,
|
|
86
|
+
success,
|
|
87
|
+
created_at
|
|
88
|
+
FROM requests
|
|
89
|
+
WHERE user_id = $1
|
|
90
|
+
ORDER BY created_at DESC
|
|
91
|
+
LIMIT 20`, [userId]),
|
|
92
|
+
]);
|
|
93
|
+
const stats = statsResult.rows[0];
|
|
94
|
+
const gatesCount = gatesResult.rows[0];
|
|
95
|
+
const recentRequests = recentRequestsResult.rows;
|
|
96
|
+
res.json({
|
|
97
|
+
totalRequests: parseInt(stats.total_requests),
|
|
98
|
+
totalCost: parseFloat(stats.total_cost),
|
|
99
|
+
avgLatency: Math.round(parseFloat(stats.avg_latency)),
|
|
100
|
+
activeGates: parseInt(gatesCount.active_gates),
|
|
101
|
+
recentRequests: recentRequests.map((req) => ({
|
|
102
|
+
id: req.id,
|
|
103
|
+
gateName: req.gate_name,
|
|
104
|
+
model: req.model_used,
|
|
105
|
+
promptTokens: req.prompt_tokens,
|
|
106
|
+
completionTokens: req.completion_tokens,
|
|
107
|
+
totalTokens: req.total_tokens,
|
|
108
|
+
cost: parseFloat(req.cost_usd),
|
|
109
|
+
latency: req.latency_ms,
|
|
110
|
+
success: req.success,
|
|
111
|
+
createdAt: req.created_at,
|
|
112
|
+
})),
|
|
113
|
+
});
|
|
114
|
+
}
|
|
115
|
+
catch (error) {
|
|
116
|
+
console.error('Analytics overview error:', error);
|
|
117
|
+
res.status(500).json({ error: 'internal_error', message: 'Failed to fetch analytics' });
|
|
118
|
+
}
|
|
119
|
+
});
|
|
120
|
+
export default router;
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
import type { Message } from '@layer-ai/sdk';
|
|
2
|
+
export interface AnthropicCompletionParams {
|
|
3
|
+
model: string;
|
|
4
|
+
messages: Message[];
|
|
5
|
+
temperature?: number;
|
|
6
|
+
maxTokens?: number;
|
|
7
|
+
topP?: number;
|
|
8
|
+
systemPrompt?: string;
|
|
9
|
+
}
|
|
10
|
+
export interface ProviderResponse {
|
|
11
|
+
content: string;
|
|
12
|
+
promptTokens: number;
|
|
13
|
+
completionTokens: number;
|
|
14
|
+
totalTokens: number;
|
|
15
|
+
costUsd: number;
|
|
16
|
+
}
|
|
17
|
+
export declare function createCompletion(params: AnthropicCompletionParams): Promise<ProviderResponse>;
|
|
18
|
+
//# sourceMappingURL=anthropic.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"anthropic.d.ts","sourceRoot":"","sources":["../../../src/services/providers/anthropic.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,OAAO,EAAkB,MAAM,eAAe,CAAC;AAe7D,MAAM,WAAW,yBAAyB;IACxC,KAAK,EAAE,MAAM,CAAC;IACd,QAAQ,EAAE,OAAO,EAAE,CAAC;IACpB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,YAAY,CAAC,EAAE,MAAM,CAAC;CACvB;AAED,MAAM,WAAW,gBAAgB;IAC/B,OAAO,EAAE,MAAM,CAAC;IAChB,YAAY,EAAE,MAAM,CAAC;IACrB,gBAAgB,EAAE,MAAM,CAAC;IACzB,WAAW,EAAE,MAAM,CAAC;IACpB,OAAO,EAAE,MAAM,CAAC;CACjB;AAED,wBAAsB,gBAAgB,CAAC,MAAM,EAAE,yBAAyB,GAAG,OAAO,CAAC,gBAAgB,CAAC,CAgDnG"}
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
import Anthropic from '@anthropic-ai/sdk/index';
|
|
2
|
+
import { MODEL_REGISTRY } from '@layer-ai/sdk';
|
|
3
|
+
// Lazy-initialize Anthropic client
|
|
4
|
+
let anthropic = null;
|
|
5
|
+
function getAnthropicClient() {
|
|
6
|
+
if (!anthropic) {
|
|
7
|
+
anthropic = new Anthropic({
|
|
8
|
+
apiKey: process.env.ANTHROPIC_API_KEY,
|
|
9
|
+
});
|
|
10
|
+
}
|
|
11
|
+
return anthropic;
|
|
12
|
+
}
|
|
13
|
+
export async function createCompletion(params) {
|
|
14
|
+
// Separate system message from conversation
|
|
15
|
+
let systemPrompt = params.systemPrompt || '';
|
|
16
|
+
const conversationMessages = params.messages.filter(msg => {
|
|
17
|
+
if (msg.role === 'system') {
|
|
18
|
+
systemPrompt = msg.content;
|
|
19
|
+
return false;
|
|
20
|
+
}
|
|
21
|
+
return true;
|
|
22
|
+
});
|
|
23
|
+
// map to anthropic format
|
|
24
|
+
const anthropicMessages = conversationMessages.map(msg => ({
|
|
25
|
+
role: msg.role,
|
|
26
|
+
content: msg.content,
|
|
27
|
+
}));
|
|
28
|
+
// Call anthropic api
|
|
29
|
+
const response = await getAnthropicClient().messages.create({
|
|
30
|
+
model: params.model,
|
|
31
|
+
max_tokens: params.maxTokens || 1024,
|
|
32
|
+
messages: anthropicMessages,
|
|
33
|
+
...(systemPrompt && { system: systemPrompt }),
|
|
34
|
+
...(params.temperature != null && { temperature: params.temperature }),
|
|
35
|
+
...(params.topP != null && { top_p: params.topP }),
|
|
36
|
+
});
|
|
37
|
+
// Extract response content
|
|
38
|
+
const content = response.content[0].type === 'text'
|
|
39
|
+
? response.content[0].text
|
|
40
|
+
: '';
|
|
41
|
+
// Get token usage
|
|
42
|
+
const promptTokens = response.usage.input_tokens;
|
|
43
|
+
const completionTokens = response.usage.output_tokens;
|
|
44
|
+
const totalTokens = promptTokens + completionTokens;
|
|
45
|
+
// Calculate cost
|
|
46
|
+
const pricing = MODEL_REGISTRY[params.model].pricing;
|
|
47
|
+
const costUsd = (promptTokens / 1000 * pricing.input) + (completionTokens / 1000 * pricing.output);
|
|
48
|
+
return {
|
|
49
|
+
content,
|
|
50
|
+
promptTokens,
|
|
51
|
+
completionTokens,
|
|
52
|
+
totalTokens,
|
|
53
|
+
costUsd,
|
|
54
|
+
};
|
|
55
|
+
}
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import { LayerRequest, LayerResponse, Role, ImageDetail, ImageSize, ImageQuality, ImageStyle, VideoSize, AudioFormat, AudioMimeType, ImageMimeType, FinishReason, ToolChoice, EncodingFormat, ADAPTER_HANDLED } from '@layer-ai/sdk';
|
|
2
|
+
export { ADAPTER_HANDLED };
|
|
3
|
+
export declare abstract class ProviderAdapter {
|
|
4
|
+
protected abstract provider: string;
|
|
5
|
+
protected roleMappings?: Record<Role, string>;
|
|
6
|
+
protected imageDetailMappings?: Record<ImageDetail, string>;
|
|
7
|
+
protected toolChoiceMappings?: Record<string, string | object>;
|
|
8
|
+
protected finishReasonMappings?: Record<string, FinishReason>;
|
|
9
|
+
protected imageSizeMappings?: Record<ImageSize, string>;
|
|
10
|
+
protected imageQualityMappings?: Record<ImageQuality, string>;
|
|
11
|
+
protected imageStyleMappings?: Record<ImageStyle, string>;
|
|
12
|
+
protected videoSizeMappings?: Record<VideoSize, string>;
|
|
13
|
+
protected audioFormatMappings?: Record<AudioFormat, string>;
|
|
14
|
+
protected audioMimeTypeMappings?: Record<AudioMimeType, string>;
|
|
15
|
+
protected imageMimeTypeMappings?: Record<ImageMimeType, string>;
|
|
16
|
+
protected encodingFormatMappings?: Record<EncodingFormat, string>;
|
|
17
|
+
abstract call(request: LayerRequest): Promise<LayerResponse>;
|
|
18
|
+
protected mapRole(role: Role): string;
|
|
19
|
+
protected mapImageDetail(detail: ImageDetail): string | undefined;
|
|
20
|
+
protected mapImageSize(size: ImageSize): string | undefined;
|
|
21
|
+
protected mapImageQuality(quality: ImageQuality): string | undefined;
|
|
22
|
+
protected mapImageStyle(style: ImageStyle): string | undefined;
|
|
23
|
+
protected mapVideoSize(size: VideoSize): string | undefined;
|
|
24
|
+
protected mapAudioFormat(format: AudioFormat): string | undefined;
|
|
25
|
+
protected mapAudioMimeType(mimeType: AudioMimeType): string | undefined;
|
|
26
|
+
protected mapImageMimeType(mimeType: ImageMimeType): string | undefined;
|
|
27
|
+
protected mapEncodingFormat(format: EncodingFormat): string | undefined;
|
|
28
|
+
protected mapFinishReason(providerFinishReason: string): FinishReason;
|
|
29
|
+
protected mapToolChoice(choice: ToolChoice): string | object | undefined;
|
|
30
|
+
protected calculateCost(model: string, promptTokens: number, completionTokens: number): number;
|
|
31
|
+
}
|
|
32
|
+
//# sourceMappingURL=base-adapter.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"base-adapter.d.ts","sourceRoot":"","sources":["../../../src/services/providers/base-adapter.ts"],"names":[],"mappings":"AAAA,OAAO,EACL,YAAY,EACZ,aAAa,EACb,IAAI,EACJ,WAAW,EACX,SAAS,EACT,YAAY,EACZ,UAAU,EACV,SAAS,EACT,WAAW,EACX,aAAa,EACb,aAAa,EACb,YAAY,EACZ,UAAU,EACV,cAAc,EACd,eAAe,EAGhB,MAAM,eAAe,CAAC;AAEvB,OAAO,EAAE,eAAe,EAAE,CAAC;AAE3B,8BAAsB,eAAe;IACnC,SAAS,CAAC,QAAQ,CAAC,QAAQ,EAAE,MAAM,CAAC;IAEpC,SAAS,CAAC,YAAY,CAAC,EAAE,MAAM,CAAC,IAAI,EAAE,MAAM,CAAC,CAAC;IAC9C,SAAS,CAAC,mBAAmB,CAAC,EAAE,MAAM,CAAC,WAAW,EAAE,MAAM,CAAC,CAAC;IAC5D,SAAS,CAAC,kBAAkB,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,GAAG,MAAM,CAAC,CAAC;IAC/D,SAAS,CAAC,oBAAoB,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,YAAY,CAAC,CAAC;IAC9D,SAAS,CAAC,iBAAiB,CAAC,EAAE,MAAM,CAAC,SAAS,EAAE,MAAM,CAAC,CAAC;IACxD,SAAS,CAAC,oBAAoB,CAAC,EAAE,MAAM,CAAC,YAAY,EAAE,MAAM,CAAC,CAAC;IAC9D,SAAS,CAAC,kBAAkB,CAAC,EAAE,MAAM,CAAC,UAAU,EAAE,MAAM,CAAC,CAAC;IAC1D,SAAS,CAAC,iBAAiB,CAAC,EAAE,MAAM,CAAC,SAAS,EAAE,MAAM,CAAC,CAAC;IACxD,SAAS,CAAC,mBAAmB,CAAC,EAAE,MAAM,CAAC,WAAW,EAAE,MAAM,CAAC,CAAC;IAC5D,SAAS,CAAC,qBAAqB,CAAC,EAAE,MAAM,CAAC,aAAa,EAAE,MAAM,CAAC,CAAC;IAChE,SAAS,CAAC,qBAAqB,CAAC,EAAE,MAAM,CAAC,aAAa,EAAE,MAAM,CAAC,CAAC;IAChE,SAAS,CAAC,sBAAsB,CAAC,EAAE,MAAM,CAAC,cAAc,EAAE,MAAM,CAAC,CAAC;IAElE,QAAQ,CAAC,IAAI,CAAC,OAAO,EAAE,YAAY,GAAG,OAAO,CAAC,aAAa,CAAC;IAE5D,SAAS,CAAC,OAAO,CAAC,IAAI,EAAE,IAAI,GAAG,MAAM;IAcrC,SAAS,CAAC,cAAc,CAAC,MAAM,EAAE,WAAW,GAAG,MAAM,GAAG,SAAS;IAQjE,SAAS,CAAC,YAAY,CAAC,IAAI,EAAE,SAAS,GAAG,MAAM,GAAG,SAAS;IAQ3D,SAAS,CAAC,eAAe,CAAC,OAAO,EAAE,YAAY,GAAG,MAAM,GAAG,SAAS;IAQpE,SAAS,CAAC,aAAa,CAAC,KAAK,EAAE,UAAU,GAAG,MAAM,GAAG,SAAS;IAQ9D,SAAS,CAAC,YAAY,CAAC,IAAI,EAAE,SAAS,GAAG,MAAM,GAAG,SAAS;IAQ3D,SAAS,CAAC,cAAc,CAAC,MAAM,EAAE,WAAW,GAAG,MAAM,GAAG,SAAS;IAQjE,SAAS,CAAC,gBAAgB,CAAC,QAAQ,EAAE,aAAa,GAAG,MAAM,GAAG,SAAS;IAQvE,SAAS,CAAC,gBAAgB,CAAC,QAAQ,EAAE,aAAa,GAAG,MAAM,GAAG,SAAS;IAQvE,SAAS,CAAC,iBAAiB,CAAC,MAAM,EAAE,cAAc,GAAG,MAAM,GAAG,SAAS;IAQvE,SAAS,CAAC,eAAe,CAAC,oBAAoB,EAAE,MAAM,GAAG,YAAY;IAQrE,SAAS,CAAC,aAAa,CAAC,MAAM,EAAE,UAAU,GAAG,MAAM,GAAG,MAAM,GAAG,SAAS;IAYxE,SAAS,CAAC,aAAa,CACrB,KAAK,EAAE,MAAM,EACb,YAAY,EAAE,MAAM,EACpB,gBAAgB,EAAE,MAAM,GACvB,MAAM;CAMV"}
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
import { ADAPTER_HANDLED, MODEL_REGISTRY, } from '@layer-ai/sdk';
|
|
2
|
+
export { ADAPTER_HANDLED };
|
|
3
|
+
export class ProviderAdapter {
|
|
4
|
+
mapRole(role) {
|
|
5
|
+
if (!this.roleMappings) {
|
|
6
|
+
return role;
|
|
7
|
+
}
|
|
8
|
+
const mapped = this.roleMappings[role];
|
|
9
|
+
if (mapped === ADAPTER_HANDLED) {
|
|
10
|
+
return ADAPTER_HANDLED;
|
|
11
|
+
}
|
|
12
|
+
return mapped || role;
|
|
13
|
+
}
|
|
14
|
+
mapImageDetail(detail) {
|
|
15
|
+
if (!this.imageDetailMappings) {
|
|
16
|
+
return undefined;
|
|
17
|
+
}
|
|
18
|
+
return this.imageDetailMappings[detail];
|
|
19
|
+
}
|
|
20
|
+
mapImageSize(size) {
|
|
21
|
+
if (!this.imageSizeMappings) {
|
|
22
|
+
return size;
|
|
23
|
+
}
|
|
24
|
+
return this.imageSizeMappings[size];
|
|
25
|
+
}
|
|
26
|
+
mapImageQuality(quality) {
|
|
27
|
+
if (!this.imageQualityMappings) {
|
|
28
|
+
return undefined;
|
|
29
|
+
}
|
|
30
|
+
return this.imageQualityMappings[quality];
|
|
31
|
+
}
|
|
32
|
+
mapImageStyle(style) {
|
|
33
|
+
if (!this.imageStyleMappings) {
|
|
34
|
+
return undefined;
|
|
35
|
+
}
|
|
36
|
+
return this.imageStyleMappings[style];
|
|
37
|
+
}
|
|
38
|
+
mapVideoSize(size) {
|
|
39
|
+
if (!this.videoSizeMappings) {
|
|
40
|
+
return size;
|
|
41
|
+
}
|
|
42
|
+
return this.videoSizeMappings[size];
|
|
43
|
+
}
|
|
44
|
+
mapAudioFormat(format) {
|
|
45
|
+
if (!this.audioFormatMappings) {
|
|
46
|
+
return undefined;
|
|
47
|
+
}
|
|
48
|
+
return this.audioFormatMappings[format];
|
|
49
|
+
}
|
|
50
|
+
mapAudioMimeType(mimeType) {
|
|
51
|
+
if (!this.audioMimeTypeMappings) {
|
|
52
|
+
return mimeType;
|
|
53
|
+
}
|
|
54
|
+
return this.audioMimeTypeMappings[mimeType];
|
|
55
|
+
}
|
|
56
|
+
mapImageMimeType(mimeType) {
|
|
57
|
+
if (!this.imageMimeTypeMappings) {
|
|
58
|
+
return mimeType;
|
|
59
|
+
}
|
|
60
|
+
return this.imageMimeTypeMappings[mimeType];
|
|
61
|
+
}
|
|
62
|
+
mapEncodingFormat(format) {
|
|
63
|
+
if (!this.encodingFormatMappings) {
|
|
64
|
+
return format;
|
|
65
|
+
}
|
|
66
|
+
return this.encodingFormatMappings[format];
|
|
67
|
+
}
|
|
68
|
+
mapFinishReason(providerFinishReason) {
|
|
69
|
+
if (!this.finishReasonMappings) {
|
|
70
|
+
return 'completed';
|
|
71
|
+
}
|
|
72
|
+
return this.finishReasonMappings[providerFinishReason] || 'completed';
|
|
73
|
+
}
|
|
74
|
+
mapToolChoice(choice) {
|
|
75
|
+
if (typeof choice === 'object') {
|
|
76
|
+
return choice;
|
|
77
|
+
}
|
|
78
|
+
if (!this.toolChoiceMappings) {
|
|
79
|
+
return choice;
|
|
80
|
+
}
|
|
81
|
+
return this.toolChoiceMappings[choice];
|
|
82
|
+
}
|
|
83
|
+
calculateCost(model, promptTokens, completionTokens) {
|
|
84
|
+
const modelInfo = MODEL_REGISTRY[model];
|
|
85
|
+
return modelInfo?.pricing
|
|
86
|
+
? (promptTokens / 1000 * modelInfo.pricing.input) + (completionTokens / 1000 * modelInfo.pricing.output)
|
|
87
|
+
: 0;
|
|
88
|
+
}
|
|
89
|
+
}
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
import type { Message } from '@layer-ai/sdk';
|
|
2
|
+
export interface GoogleCompletionParams {
|
|
3
|
+
model: string;
|
|
4
|
+
messages: Message[];
|
|
5
|
+
temperature?: number;
|
|
6
|
+
maxTokens?: number;
|
|
7
|
+
topP?: number;
|
|
8
|
+
systemPrompt?: string;
|
|
9
|
+
}
|
|
10
|
+
export interface ProviderResponse {
|
|
11
|
+
content: string;
|
|
12
|
+
promptTokens: number;
|
|
13
|
+
completionTokens: number;
|
|
14
|
+
totalTokens: number;
|
|
15
|
+
costUsd: number;
|
|
16
|
+
}
|
|
17
|
+
export declare function createCompletion(params: GoogleCompletionParams): Promise<ProviderResponse>;
|
|
18
|
+
//# sourceMappingURL=google.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"google.d.ts","sourceRoot":"","sources":["../../../src/services/providers/google.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,OAAO,EAAkB,MAAM,eAAe,CAAC;AAY7D,MAAM,WAAW,sBAAsB;IACrC,KAAK,EAAE,MAAM,CAAC;IACd,QAAQ,EAAE,OAAO,EAAE,CAAC;IACpB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,YAAY,CAAC,EAAE,MAAM,CAAC;CACvB;AAGD,MAAM,WAAW,gBAAgB;IAC/B,OAAO,EAAE,MAAM,CAAC;IAChB,YAAY,EAAE,MAAM,CAAC;IACrB,gBAAgB,EAAE,MAAM,CAAC;IACzB,WAAW,EAAE,MAAM,CAAC;IACpB,OAAO,EAAE,MAAM,CAAC;CACjB;AAED,wBAAsB,gBAAgB,CAAC,MAAM,EAAE,sBAAsB,GAAG,OAAO,CAAC,gBAAgB,CAAC,CAiChG"}
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import { GoogleGenAI } from "@google/genai";
|
|
2
|
+
import { MODEL_REGISTRY } from "@layer-ai/sdk";
|
|
3
|
+
let client = null;
|
|
4
|
+
function getGoogleClient() {
|
|
5
|
+
if (!client) {
|
|
6
|
+
client = new GoogleGenAI({ apiKey: process.env.GOOGLE_API_KEY || '' });
|
|
7
|
+
}
|
|
8
|
+
return client;
|
|
9
|
+
}
|
|
10
|
+
export async function createCompletion(params) {
|
|
11
|
+
const contents = params.messages.map(msg => ({
|
|
12
|
+
role: msg.role === 'assistant' ? 'model' : 'user',
|
|
13
|
+
parts: [{ text: msg.content }],
|
|
14
|
+
}));
|
|
15
|
+
const response = await getGoogleClient().models.generateContent({
|
|
16
|
+
model: params.model,
|
|
17
|
+
contents,
|
|
18
|
+
config: {
|
|
19
|
+
systemInstruction: params.systemPrompt,
|
|
20
|
+
temperature: params.temperature,
|
|
21
|
+
maxOutputTokens: params.maxTokens,
|
|
22
|
+
topP: params.topP,
|
|
23
|
+
},
|
|
24
|
+
});
|
|
25
|
+
const content = response.text || '';
|
|
26
|
+
const usageMetadata = response.usageMetadata;
|
|
27
|
+
const promptTokens = usageMetadata?.promptTokenCount || 0;
|
|
28
|
+
const completionTokens = usageMetadata?.candidatesTokenCount || 0;
|
|
29
|
+
const totalTokens = usageMetadata?.totalTokenCount || (promptTokens + completionTokens);
|
|
30
|
+
const pricing = MODEL_REGISTRY[params.model].pricing;
|
|
31
|
+
const costUsd = (promptTokens / 1000 * pricing.input) + (completionTokens / 1000 * pricing.output);
|
|
32
|
+
return {
|
|
33
|
+
content,
|
|
34
|
+
promptTokens,
|
|
35
|
+
completionTokens,
|
|
36
|
+
totalTokens,
|
|
37
|
+
costUsd,
|
|
38
|
+
};
|
|
39
|
+
}
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import { ProviderAdapter } from './base-adapter.js';
|
|
2
|
+
import { LayerRequest, LayerResponse, Role, ImageDetail, ImageSize, ImageQuality, ImageStyle, VideoSize, AudioFormat, FinishReason } from '@layer-ai/sdk';
|
|
3
|
+
export declare class OpenAIAdapter extends ProviderAdapter {
|
|
4
|
+
protected provider: string;
|
|
5
|
+
protected roleMappings: Record<Role, string>;
|
|
6
|
+
protected imageDetailMappings: Record<ImageDetail, string>;
|
|
7
|
+
protected finishReasonMappings: Record<string, FinishReason>;
|
|
8
|
+
protected imageSizeMappings: Record<ImageSize, string>;
|
|
9
|
+
protected imageQualityMappings: Record<ImageQuality, string>;
|
|
10
|
+
protected imageStyleMappings: Record<ImageStyle, string>;
|
|
11
|
+
protected videoSizeMappings: Record<VideoSize, string>;
|
|
12
|
+
protected audioFormatMappings: Record<AudioFormat, string>;
|
|
13
|
+
call(request: LayerRequest): Promise<LayerResponse>;
|
|
14
|
+
private handleChat;
|
|
15
|
+
private handleImageGeneration;
|
|
16
|
+
private handleEmbeddings;
|
|
17
|
+
private handleTextToSpeech;
|
|
18
|
+
}
|
|
19
|
+
//# sourceMappingURL=openai-adapter.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"openai-adapter.d.ts","sourceRoot":"","sources":["../../../src/services/providers/openai-adapter.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,eAAe,EAAE,MAAM,mBAAmB,CAAC;AACpD,OAAO,EACL,YAAY,EACZ,aAAa,EACb,IAAI,EACJ,WAAW,EACX,SAAS,EACT,YAAY,EACZ,UAAU,EACV,SAAS,EACT,WAAW,EACX,YAAY,EACb,MAAM,eAAe,CAAC;AAavB,qBAAa,aAAc,SAAQ,eAAe;IAChD,SAAS,CAAC,QAAQ,SAAY;IAE9B,SAAS,CAAC,YAAY,EAAE,MAAM,CAAC,IAAI,EAAE,MAAM,CAAC,CAQ1C;IAEF,SAAS,CAAC,mBAAmB,EAAE,MAAM,CAAC,WAAW,EAAE,MAAM,CAAC,CAIxD;IAEF,SAAS,CAAC,oBAAoB,EAAE,MAAM,CAAC,MAAM,EAAE,YAAY,CAAC,CAK1D;IAEF,SAAS,CAAC,iBAAiB,EAAE,MAAM,CAAC,SAAS,EAAE,MAAM,CAAC,CAQpD;IAEF,SAAS,CAAC,oBAAoB,EAAE,MAAM,CAAC,YAAY,EAAE,MAAM,CAAC,CAG1D;IAEF,SAAS,CAAC,kBAAkB,EAAE,MAAM,CAAC,UAAU,EAAE,MAAM,CAAC,CAGtD;IAEF,SAAS,CAAC,iBAAiB,EAAE,MAAM,CAAC,SAAS,EAAE,MAAM,CAAC,CAKpD;IAEF,SAAS,CAAC,mBAAmB,EAAE,MAAM,CAAC,WAAW,EAAE,MAAM,CAAC,CAOxD;IAEI,IAAI,CAAC,OAAO,EAAE,YAAY,GAAG,OAAO,CAAC,aAAa,CAAC;YAiB3C,UAAU;YAgGV,qBAAqB;YAyBrB,gBAAgB;YA6BhB,kBAAkB;CA0BjC"}
|
|
@@ -0,0 +1,240 @@
|
|
|
1
|
+
import OpenAI from 'openai';
|
|
2
|
+
import { ProviderAdapter } from './base-adapter.js';
|
|
3
|
+
let openai = null;
|
|
4
|
+
function getOpenAIClient() {
|
|
5
|
+
if (!openai) {
|
|
6
|
+
openai = new OpenAI({
|
|
7
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
8
|
+
});
|
|
9
|
+
}
|
|
10
|
+
return openai;
|
|
11
|
+
}
|
|
12
|
+
export class OpenAIAdapter extends ProviderAdapter {
|
|
13
|
+
constructor() {
|
|
14
|
+
super(...arguments);
|
|
15
|
+
this.provider = 'openai';
|
|
16
|
+
this.roleMappings = {
|
|
17
|
+
system: 'system',
|
|
18
|
+
user: 'user',
|
|
19
|
+
assistant: 'assistant',
|
|
20
|
+
tool: 'tool',
|
|
21
|
+
function: 'function',
|
|
22
|
+
model: 'assistant',
|
|
23
|
+
developer: 'system',
|
|
24
|
+
};
|
|
25
|
+
this.imageDetailMappings = {
|
|
26
|
+
auto: 'auto',
|
|
27
|
+
low: 'low',
|
|
28
|
+
high: 'high',
|
|
29
|
+
};
|
|
30
|
+
this.finishReasonMappings = {
|
|
31
|
+
stop: 'completed',
|
|
32
|
+
length: 'length_limit',
|
|
33
|
+
tool_calls: 'tool_call',
|
|
34
|
+
content_filter: 'filtered',
|
|
35
|
+
};
|
|
36
|
+
this.imageSizeMappings = {
|
|
37
|
+
'256x256': '256x256',
|
|
38
|
+
'512x512': '512x512',
|
|
39
|
+
'1024x1024': '1024x1024',
|
|
40
|
+
'1792x1024': '1792x1024',
|
|
41
|
+
'1024x1792': '1024x1792',
|
|
42
|
+
'1536x1024': '1536x1024',
|
|
43
|
+
'1024x1536': '1024x1536',
|
|
44
|
+
};
|
|
45
|
+
this.imageQualityMappings = {
|
|
46
|
+
standard: 'standard',
|
|
47
|
+
hd: 'hd',
|
|
48
|
+
};
|
|
49
|
+
this.imageStyleMappings = {
|
|
50
|
+
vivid: 'vivid',
|
|
51
|
+
natural: 'natural',
|
|
52
|
+
};
|
|
53
|
+
this.videoSizeMappings = {
|
|
54
|
+
'720x1280': '720x1280',
|
|
55
|
+
'1280x720': '1280x720',
|
|
56
|
+
'1024x1792': '1024x1792',
|
|
57
|
+
'1792x1024': '1792x1024',
|
|
58
|
+
};
|
|
59
|
+
this.audioFormatMappings = {
|
|
60
|
+
mp3: 'mp3',
|
|
61
|
+
opus: 'opus',
|
|
62
|
+
aac: 'aac',
|
|
63
|
+
flac: 'flac',
|
|
64
|
+
wav: 'wav',
|
|
65
|
+
pcm: 'pcm',
|
|
66
|
+
};
|
|
67
|
+
}
|
|
68
|
+
async call(request) {
|
|
69
|
+
switch (request.type) {
|
|
70
|
+
case 'chat':
|
|
71
|
+
return this.handleChat(request);
|
|
72
|
+
case 'image':
|
|
73
|
+
return this.handleImageGeneration(request);
|
|
74
|
+
case 'embeddings':
|
|
75
|
+
return this.handleEmbeddings(request);
|
|
76
|
+
case 'tts':
|
|
77
|
+
return this.handleTextToSpeech(request);
|
|
78
|
+
case 'video':
|
|
79
|
+
throw new Error('Video generation not yet supported by OpenAI');
|
|
80
|
+
default:
|
|
81
|
+
throw new Error(`Unknown modality: ${request.type}`);
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
async handleChat(request) {
|
|
85
|
+
const startTime = Date.now();
|
|
86
|
+
const client = getOpenAIClient();
|
|
87
|
+
const { data: chat, model } = request;
|
|
88
|
+
const messages = [];
|
|
89
|
+
if (chat.systemPrompt) {
|
|
90
|
+
messages.push({ role: 'system', content: chat.systemPrompt });
|
|
91
|
+
}
|
|
92
|
+
for (const msg of chat.messages) {
|
|
93
|
+
const role = this.mapRole(msg.role);
|
|
94
|
+
if (msg.images && msg.images.length > 0) {
|
|
95
|
+
const content = [];
|
|
96
|
+
if (msg.content) {
|
|
97
|
+
content.push({ type: 'text', text: msg.content });
|
|
98
|
+
}
|
|
99
|
+
for (const image of msg.images) {
|
|
100
|
+
const imageUrl = image.url || `data:${image.mimeType || 'image/jpeg'};base64,${image.base64}`;
|
|
101
|
+
content.push({
|
|
102
|
+
type: 'image_url',
|
|
103
|
+
image_url: {
|
|
104
|
+
url: imageUrl,
|
|
105
|
+
...(image.detail && { detail: this.mapImageDetail(image.detail) }),
|
|
106
|
+
},
|
|
107
|
+
});
|
|
108
|
+
}
|
|
109
|
+
messages.push({ role: role, content });
|
|
110
|
+
}
|
|
111
|
+
else if (msg.toolCalls) {
|
|
112
|
+
messages.push({
|
|
113
|
+
role: 'assistant',
|
|
114
|
+
content: msg.content || null,
|
|
115
|
+
tool_calls: msg.toolCalls,
|
|
116
|
+
});
|
|
117
|
+
}
|
|
118
|
+
else if (msg.toolCallId) {
|
|
119
|
+
messages.push({
|
|
120
|
+
role: 'tool',
|
|
121
|
+
content: msg.content || '',
|
|
122
|
+
tool_call_id: msg.toolCallId,
|
|
123
|
+
});
|
|
124
|
+
}
|
|
125
|
+
else {
|
|
126
|
+
messages.push({
|
|
127
|
+
role,
|
|
128
|
+
content: msg.content || '',
|
|
129
|
+
...(role === 'function' && msg.name && { name: msg.name }),
|
|
130
|
+
});
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
const openaiRequest = {
|
|
134
|
+
model: model,
|
|
135
|
+
messages,
|
|
136
|
+
stream: false,
|
|
137
|
+
...(chat.temperature !== undefined && { temperature: chat.temperature }),
|
|
138
|
+
...(chat.maxTokens !== undefined && { max_completion_tokens: chat.maxTokens }),
|
|
139
|
+
...(chat.topP !== undefined && { top_p: chat.topP }),
|
|
140
|
+
...(chat.stopSequences !== undefined && { stop: chat.stopSequences }),
|
|
141
|
+
...(chat.frequencyPenalty !== undefined && { frequency_penalty: chat.frequencyPenalty }),
|
|
142
|
+
...(chat.presencePenalty !== undefined && { presence_penalty: chat.presencePenalty }),
|
|
143
|
+
...(chat.seed !== undefined && { seed: chat.seed }),
|
|
144
|
+
...(chat.tools && {
|
|
145
|
+
tools: chat.tools,
|
|
146
|
+
...(chat.toolChoice && { tool_choice: chat.toolChoice }),
|
|
147
|
+
}),
|
|
148
|
+
};
|
|
149
|
+
const response = await client.chat.completions.create(openaiRequest);
|
|
150
|
+
const choice = response.choices[0];
|
|
151
|
+
const promptTokens = response.usage?.prompt_tokens || 0;
|
|
152
|
+
const completionTokens = response.usage?.completion_tokens || 0;
|
|
153
|
+
const totalTokens = response.usage?.total_tokens || 0;
|
|
154
|
+
const cost = this.calculateCost(model, promptTokens, completionTokens);
|
|
155
|
+
return {
|
|
156
|
+
content: choice.message.content || undefined,
|
|
157
|
+
toolCalls: choice.message.tool_calls,
|
|
158
|
+
model: response.model,
|
|
159
|
+
finishReason: this.mapFinishReason(choice.finish_reason),
|
|
160
|
+
rawFinishReason: choice.finish_reason,
|
|
161
|
+
usage: {
|
|
162
|
+
promptTokens,
|
|
163
|
+
completionTokens,
|
|
164
|
+
totalTokens,
|
|
165
|
+
},
|
|
166
|
+
cost,
|
|
167
|
+
latencyMs: Date.now() - startTime,
|
|
168
|
+
raw: response,
|
|
169
|
+
};
|
|
170
|
+
}
|
|
171
|
+
async handleImageGeneration(request) {
|
|
172
|
+
const startTime = Date.now();
|
|
173
|
+
const client = getOpenAIClient();
|
|
174
|
+
const { data: image, model } = request;
|
|
175
|
+
const response = await client.images.generate({
|
|
176
|
+
model: model,
|
|
177
|
+
prompt: image.prompt,
|
|
178
|
+
...(image.size && { size: this.mapImageSize(image.size) }),
|
|
179
|
+
...(image.quality && { quality: this.mapImageQuality(image.quality) }),
|
|
180
|
+
...(image.count && { n: image.count }),
|
|
181
|
+
...(image.style && { style: this.mapImageStyle(image.style) }),
|
|
182
|
+
});
|
|
183
|
+
return {
|
|
184
|
+
images: (response.data || []).map(img => ({
|
|
185
|
+
url: img.url,
|
|
186
|
+
revisedPrompt: img.revised_prompt,
|
|
187
|
+
})),
|
|
188
|
+
model: model,
|
|
189
|
+
latencyMs: Date.now() - startTime,
|
|
190
|
+
raw: response,
|
|
191
|
+
};
|
|
192
|
+
}
|
|
193
|
+
async handleEmbeddings(request) {
|
|
194
|
+
const startTime = Date.now();
|
|
195
|
+
const client = getOpenAIClient();
|
|
196
|
+
const { data: embedding, model } = request;
|
|
197
|
+
const response = await client.embeddings.create({
|
|
198
|
+
model: model,
|
|
199
|
+
input: embedding.input,
|
|
200
|
+
...(embedding.dimensions && { dimensions: embedding.dimensions }),
|
|
201
|
+
...(embedding.encodingFormat && { encoding_format: embedding.encodingFormat }),
|
|
202
|
+
});
|
|
203
|
+
const promptTokens = response.usage?.prompt_tokens || 0;
|
|
204
|
+
const cost = this.calculateCost(model, promptTokens, 0);
|
|
205
|
+
return {
|
|
206
|
+
embeddings: response.data.map(d => d.embedding),
|
|
207
|
+
model: response.model,
|
|
208
|
+
usage: {
|
|
209
|
+
promptTokens,
|
|
210
|
+
completionTokens: 0,
|
|
211
|
+
totalTokens: promptTokens,
|
|
212
|
+
},
|
|
213
|
+
cost,
|
|
214
|
+
latencyMs: Date.now() - startTime,
|
|
215
|
+
raw: response,
|
|
216
|
+
};
|
|
217
|
+
}
|
|
218
|
+
async handleTextToSpeech(request) {
|
|
219
|
+
const startTime = Date.now();
|
|
220
|
+
const client = getOpenAIClient();
|
|
221
|
+
const { data: tts, model } = request;
|
|
222
|
+
const response = await client.audio.speech.create({
|
|
223
|
+
model: model,
|
|
224
|
+
input: tts.input,
|
|
225
|
+
voice: (tts.voice || 'alloy'),
|
|
226
|
+
...(tts.speed !== undefined && { speed: tts.speed }),
|
|
227
|
+
...(tts.responseFormat && { response_format: this.mapAudioFormat(tts.responseFormat) }),
|
|
228
|
+
});
|
|
229
|
+
const buffer = Buffer.from(await response.arrayBuffer());
|
|
230
|
+
const base64 = buffer.toString('base64');
|
|
231
|
+
return {
|
|
232
|
+
audio: {
|
|
233
|
+
base64,
|
|
234
|
+
format: tts.responseFormat || 'mp3',
|
|
235
|
+
},
|
|
236
|
+
model: model,
|
|
237
|
+
latencyMs: Date.now() - startTime,
|
|
238
|
+
};
|
|
239
|
+
}
|
|
240
|
+
}
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import type { Message } from '@layer-ai/sdk';
|
|
2
|
+
export interface OpenAICompletionParams {
|
|
3
|
+
model: string;
|
|
4
|
+
messages: Message[];
|
|
5
|
+
temperature?: number;
|
|
6
|
+
maxTokens?: number;
|
|
7
|
+
topP?: number;
|
|
8
|
+
}
|
|
9
|
+
export interface ProviderResponse {
|
|
10
|
+
content: string;
|
|
11
|
+
promptTokens: number;
|
|
12
|
+
completionTokens: number;
|
|
13
|
+
totalTokens: number;
|
|
14
|
+
costUsd: number;
|
|
15
|
+
}
|
|
16
|
+
export declare function createCompletion(params: OpenAICompletionParams): Promise<ProviderResponse>;
|
|
17
|
+
//# sourceMappingURL=openai.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"openai.d.ts","sourceRoot":"","sources":["../../../src/services/providers/openai.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,OAAO,EAAkB,MAAM,eAAe,CAAC;AAe7D,MAAM,WAAW,sBAAsB;IACrC,KAAK,EAAE,MAAM,CAAC;IACd,QAAQ,EAAE,OAAO,EAAE,CAAC;IACpB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,IAAI,CAAC,EAAE,MAAM,CAAC;CACf;AAED,MAAM,WAAW,gBAAgB;IAC/B,OAAO,EAAE,MAAM,CAAC;IAChB,YAAY,EAAE,MAAM,CAAC;IACrB,gBAAgB,EAAE,MAAM,CAAC;IACzB,WAAW,EAAE,MAAM,CAAC;IACpB,OAAO,EAAE,MAAM,CAAC;CACjB;AAED,wBAAsB,gBAAgB,CAAC,MAAM,EAAE,sBAAsB,GAAG,OAAO,CAAC,gBAAgB,CAAC,CAmChG"}
|