claude-self-reflect 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/config/claude-desktop-config.json +12 -0
- package/dist/embeddings-gemini.d.ts +76 -0
- package/dist/embeddings-gemini.d.ts.map +1 -0
- package/dist/embeddings-gemini.js +158 -0
- package/dist/embeddings-gemini.js.map +1 -0
- package/dist/embeddings.d.ts +67 -0
- package/dist/embeddings.d.ts.map +1 -0
- package/dist/embeddings.js +252 -0
- package/dist/embeddings.js.map +1 -0
- package/dist/index.d.ts +3 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +307 -0
- package/dist/index.js.map +1 -0
- package/dist/project-isolation.d.ts +29 -0
- package/dist/project-isolation.d.ts.map +1 -0
- package/dist/project-isolation.js +78 -0
- package/dist/project-isolation.js.map +1 -0
- package/package.json +66 -0
- package/src/embeddings-gemini.ts +176 -0
- package/src/embeddings.ts +296 -0
- package/src/index.ts +366 -0
- package/src/project-isolation.ts +93 -0
|
@@ -0,0 +1,176 @@
|
|
|
1
|
+
import fetch from 'node-fetch';
|
|
2
|
+
import { EmbeddingService } from './embeddings';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* Google Gemini embedding service implementation
|
|
6
|
+
* Supports task-specific optimization and variable dimensions
|
|
7
|
+
*/
|
|
8
|
+
export class GeminiEmbeddingService implements EmbeddingService {
|
|
9
|
+
private apiKey: string;
|
|
10
|
+
private model: string;
|
|
11
|
+
private dimensions: number;
|
|
12
|
+
private taskType: GeminiTaskType;
|
|
13
|
+
|
|
14
|
+
constructor(
|
|
15
|
+
apiKey: string,
|
|
16
|
+
model: string = 'gemini-embedding-001',
|
|
17
|
+
dimensions: number = 768, // Can be 768, 1536, or 3072
|
|
18
|
+
taskType: GeminiTaskType = GeminiTaskType.RETRIEVAL_QUERY
|
|
19
|
+
) {
|
|
20
|
+
this.apiKey = apiKey;
|
|
21
|
+
this.model = model;
|
|
22
|
+
this.dimensions = dimensions;
|
|
23
|
+
this.taskType = taskType;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
async generateEmbedding(text: string): Promise<number[]> {
|
|
27
|
+
try {
|
|
28
|
+
const response = await fetch(
|
|
29
|
+
`https://generativelanguage.googleapis.com/v1beta/models/${this.model}:embedContent?key=${this.apiKey}`,
|
|
30
|
+
{
|
|
31
|
+
method: 'POST',
|
|
32
|
+
headers: {
|
|
33
|
+
'Content-Type': 'application/json',
|
|
34
|
+
},
|
|
35
|
+
body: JSON.stringify({
|
|
36
|
+
model: `models/${this.model}`,
|
|
37
|
+
content: {
|
|
38
|
+
parts: [{ text }]
|
|
39
|
+
},
|
|
40
|
+
taskType: this.taskType,
|
|
41
|
+
outputDimensionality: this.dimensions
|
|
42
|
+
}),
|
|
43
|
+
}
|
|
44
|
+
);
|
|
45
|
+
|
|
46
|
+
if (!response.ok) {
|
|
47
|
+
const errorBody = await response.text();
|
|
48
|
+
console.error('Gemini API error details:', errorBody);
|
|
49
|
+
throw new Error(`Gemini API error: ${response.status} ${response.statusText}`);
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
const data = await response.json() as any;
|
|
53
|
+
const embedding = data.embedding.values;
|
|
54
|
+
|
|
55
|
+
// Normalize embeddings for dimensions other than 3072
|
|
56
|
+
if (this.dimensions !== 3072) {
|
|
57
|
+
return this.normalizeEmbedding(embedding);
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
return embedding;
|
|
61
|
+
} catch (error) {
|
|
62
|
+
throw new Error(`Failed to generate Gemini embedding: ${error}`);
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
/**
|
|
67
|
+
* Normalize embedding vector
|
|
68
|
+
*/
|
|
69
|
+
private normalizeEmbedding(embedding: number[]): number[] {
|
|
70
|
+
const magnitude = Math.sqrt(embedding.reduce((sum, val) => sum + val * val, 0));
|
|
71
|
+
return embedding.map(val => val / magnitude);
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
getDimensions(): number {
|
|
75
|
+
return this.dimensions;
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
getModelName(): string {
|
|
79
|
+
return `gemini/${this.model}`;
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
/**
|
|
84
|
+
* Gemini task types for optimized embeddings
|
|
85
|
+
*/
|
|
86
|
+
export enum GeminiTaskType {
|
|
87
|
+
SEMANTIC_SIMILARITY = 'SEMANTIC_SIMILARITY',
|
|
88
|
+
CLASSIFICATION = 'CLASSIFICATION',
|
|
89
|
+
CLUSTERING = 'CLUSTERING',
|
|
90
|
+
RETRIEVAL_DOCUMENT = 'RETRIEVAL_DOCUMENT',
|
|
91
|
+
RETRIEVAL_QUERY = 'RETRIEVAL_QUERY',
|
|
92
|
+
CODE_RETRIEVAL_QUERY = 'CODE_RETRIEVAL_QUERY',
|
|
93
|
+
QUESTION_ANSWERING = 'QUESTION_ANSWERING',
|
|
94
|
+
FACT_VERIFICATION = 'FACT_VERIFICATION'
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
/**
|
|
98
|
+
* Gemini vs Voyage comparison utility
|
|
99
|
+
*/
|
|
100
|
+
export class EmbeddingComparison {
|
|
101
|
+
/**
|
|
102
|
+
* Compare Gemini and Voyage for different use cases
|
|
103
|
+
*/
|
|
104
|
+
static getComparison() {
|
|
105
|
+
return {
|
|
106
|
+
gemini: {
|
|
107
|
+
model: 'gemini-embedding-001',
|
|
108
|
+
dimensions: [768, 1536, 3072],
|
|
109
|
+
taskTypes: Object.values(GeminiTaskType),
|
|
110
|
+
advantages: [
|
|
111
|
+
'Task-specific optimization',
|
|
112
|
+
'Variable dimensions (MRL technique)',
|
|
113
|
+
'Normalized embeddings at 3072',
|
|
114
|
+
'Google infrastructure and reliability',
|
|
115
|
+
'Integrated with Google ecosystem'
|
|
116
|
+
],
|
|
117
|
+
limitations: [
|
|
118
|
+
'No published accuracy benchmarks vs Voyage',
|
|
119
|
+
'Token limits not clearly documented',
|
|
120
|
+
'Potentially higher cost at scale',
|
|
121
|
+
'Requires Google Cloud account'
|
|
122
|
+
],
|
|
123
|
+
bestFor: [
|
|
124
|
+
'Applications already using Google Cloud',
|
|
125
|
+
'Need for task-specific optimization',
|
|
126
|
+
'Variable dimension requirements',
|
|
127
|
+
'Question-answering systems'
|
|
128
|
+
]
|
|
129
|
+
},
|
|
130
|
+
voyage: {
|
|
131
|
+
model: 'voyage-3.5-lite',
|
|
132
|
+
dimensions: 1024,
|
|
133
|
+
accuracy: '66.1%',
|
|
134
|
+
tokenLimit: 32000,
|
|
135
|
+
advantages: [
|
|
136
|
+
'Proven high accuracy (66.1% vs OpenAI 39.2%)',
|
|
137
|
+
'Large token limit (32k)',
|
|
138
|
+
'Cost-effective ($0.02/M tokens)',
|
|
139
|
+
'200M free tokens',
|
|
140
|
+
'Optimized for retrieval tasks'
|
|
141
|
+
],
|
|
142
|
+
limitations: [
|
|
143
|
+
'Fixed dimensions (1024)',
|
|
144
|
+
'No task-specific optimization',
|
|
145
|
+
'Single model variant for lite tier',
|
|
146
|
+
'Less ecosystem integration'
|
|
147
|
+
],
|
|
148
|
+
bestFor: [
|
|
149
|
+
'Long document processing',
|
|
150
|
+
'Cost-sensitive applications',
|
|
151
|
+
'General-purpose retrieval',
|
|
152
|
+
'High accuracy requirements'
|
|
153
|
+
]
|
|
154
|
+
}
|
|
155
|
+
};
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
/**
|
|
159
|
+
* Estimate costs for project
|
|
160
|
+
*/
|
|
161
|
+
static estimateCosts(totalTokens: number) {
|
|
162
|
+
return {
|
|
163
|
+
voyage: {
|
|
164
|
+
freeTokens: 200_000_000,
|
|
165
|
+
costPerMillion: 0.02,
|
|
166
|
+
estimatedCost: Math.max(0, (totalTokens - 200_000_000) / 1_000_000 * 0.02)
|
|
167
|
+
},
|
|
168
|
+
gemini: {
|
|
169
|
+
// Gemini pricing varies by region and usage
|
|
170
|
+
note: 'Gemini pricing varies by region. Check Google Cloud pricing for your region.',
|
|
171
|
+
approximateCostPerMillion: 0.025, // Approximate
|
|
172
|
+
estimatedCost: totalTokens / 1_000_000 * 0.025
|
|
173
|
+
}
|
|
174
|
+
};
|
|
175
|
+
}
|
|
176
|
+
}
|
|
@@ -0,0 +1,296 @@
|
|
|
1
|
+
import { spawn } from 'child_process';
|
|
2
|
+
import fetch from 'node-fetch';
|
|
3
|
+
|
|
4
|
+
export interface EmbeddingService {
|
|
5
|
+
generateEmbedding(text: string): Promise<number[]>;
|
|
6
|
+
getDimensions(): number;
|
|
7
|
+
getModelName(): string;
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
/**
|
|
11
|
+
* OpenAI embedding service - primary choice for production
|
|
12
|
+
*/
|
|
13
|
+
export class OpenAIEmbeddingService implements EmbeddingService {
|
|
14
|
+
private apiKey: string;
|
|
15
|
+
private model: string;
|
|
16
|
+
private dimensions: number;
|
|
17
|
+
|
|
18
|
+
constructor(apiKey: string, model: string = 'text-embedding-3-small') {
|
|
19
|
+
this.apiKey = apiKey;
|
|
20
|
+
this.model = model;
|
|
21
|
+
this.dimensions = model === 'text-embedding-3-small' ? 1536 : 3072;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
async generateEmbedding(text: string): Promise<number[]> {
|
|
25
|
+
try {
|
|
26
|
+
const response = await fetch('https://api.openai.com/v1/embeddings', {
|
|
27
|
+
method: 'POST',
|
|
28
|
+
headers: {
|
|
29
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
30
|
+
'Content-Type': 'application/json',
|
|
31
|
+
},
|
|
32
|
+
body: JSON.stringify({
|
|
33
|
+
input: text,
|
|
34
|
+
model: this.model,
|
|
35
|
+
}),
|
|
36
|
+
});
|
|
37
|
+
|
|
38
|
+
if (!response.ok) {
|
|
39
|
+
const errorBody = await response.text();
|
|
40
|
+
console.error('OpenAI API error details:', errorBody);
|
|
41
|
+
throw new Error(`OpenAI API error: ${response.status} ${response.statusText}`);
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
const data = await response.json() as any;
|
|
45
|
+
return data.data[0].embedding;
|
|
46
|
+
} catch (error) {
|
|
47
|
+
throw new Error(`Failed to generate OpenAI embedding: ${error}`);
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
getDimensions(): number {
|
|
52
|
+
return this.dimensions;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
getModelName(): string {
|
|
56
|
+
return `openai/${this.model}`;
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
/**
|
|
61
|
+
* Voyage AI embedding service - high accuracy option
|
|
62
|
+
*/
|
|
63
|
+
export class VoyageEmbeddingService implements EmbeddingService {
|
|
64
|
+
private apiKey: string;
|
|
65
|
+
private model: string;
|
|
66
|
+
private dimensions: number;
|
|
67
|
+
|
|
68
|
+
constructor(apiKey: string, model: string = 'voyage-3.5-lite') {
|
|
69
|
+
this.apiKey = apiKey;
|
|
70
|
+
this.model = model;
|
|
71
|
+
this.dimensions = 1024; // Voyage default dimensions
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
async generateEmbedding(text: string): Promise<number[]> {
|
|
75
|
+
try {
|
|
76
|
+
const response = await fetch('https://api.voyageai.com/v1/embeddings', {
|
|
77
|
+
method: 'POST',
|
|
78
|
+
headers: {
|
|
79
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
80
|
+
'Content-Type': 'application/json',
|
|
81
|
+
},
|
|
82
|
+
body: JSON.stringify({
|
|
83
|
+
input: [text],
|
|
84
|
+
model: this.model,
|
|
85
|
+
input_type: 'query', // Use query type for search
|
|
86
|
+
}),
|
|
87
|
+
});
|
|
88
|
+
|
|
89
|
+
if (!response.ok) {
|
|
90
|
+
const errorBody = await response.text();
|
|
91
|
+
console.error('Voyage API error details:', errorBody);
|
|
92
|
+
throw new Error(`Voyage API error: ${response.status} ${response.statusText}`);
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
const data = await response.json() as any;
|
|
96
|
+
return data.data[0].embedding;
|
|
97
|
+
} catch (error) {
|
|
98
|
+
throw new Error(`Failed to generate Voyage embedding: ${error}`);
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
getDimensions(): number {
|
|
103
|
+
return this.dimensions;
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
getModelName(): string {
|
|
107
|
+
return `voyage/${this.model}`;
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
/**
|
|
112
|
+
* Local sentence-transformers embedding service - fallback option
|
|
113
|
+
*/
|
|
114
|
+
export class LocalEmbeddingService implements EmbeddingService {
|
|
115
|
+
private pythonScript: string;
|
|
116
|
+
private modelName: string;
|
|
117
|
+
private dimensions: number;
|
|
118
|
+
|
|
119
|
+
constructor(modelName: string = 'sentence-transformers/all-MiniLM-L6-v2') {
|
|
120
|
+
this.modelName = modelName;
|
|
121
|
+
this.dimensions = 384; // all-MiniLM-L6-v2 dimensions
|
|
122
|
+
|
|
123
|
+
this.pythonScript = `
|
|
124
|
+
import sys
|
|
125
|
+
import json
|
|
126
|
+
from sentence_transformers import SentenceTransformer
|
|
127
|
+
|
|
128
|
+
model = SentenceTransformer('${modelName}')
|
|
129
|
+
text = sys.stdin.read()
|
|
130
|
+
embedding = model.encode(text).tolist()
|
|
131
|
+
print(json.dumps(embedding))
|
|
132
|
+
`;
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
async generateEmbedding(text: string): Promise<number[]> {
|
|
136
|
+
return new Promise((resolve, reject) => {
|
|
137
|
+
const python = spawn('python3', ['-c', this.pythonScript]);
|
|
138
|
+
|
|
139
|
+
let output = '';
|
|
140
|
+
let error = '';
|
|
141
|
+
|
|
142
|
+
python.stdout.on('data', (data) => {
|
|
143
|
+
output += data.toString();
|
|
144
|
+
});
|
|
145
|
+
|
|
146
|
+
python.stderr.on('data', (data) => {
|
|
147
|
+
error += data.toString();
|
|
148
|
+
});
|
|
149
|
+
|
|
150
|
+
python.on('close', (code) => {
|
|
151
|
+
if (code !== 0) {
|
|
152
|
+
reject(new Error(`Python process exited with code ${code}: ${error}`));
|
|
153
|
+
return;
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
try {
|
|
157
|
+
const embedding = JSON.parse(output.trim());
|
|
158
|
+
resolve(embedding);
|
|
159
|
+
} catch (e) {
|
|
160
|
+
reject(new Error(`Failed to parse embedding: ${e}`));
|
|
161
|
+
}
|
|
162
|
+
});
|
|
163
|
+
|
|
164
|
+
python.stdin.write(text);
|
|
165
|
+
python.stdin.end();
|
|
166
|
+
});
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
getDimensions(): number {
|
|
170
|
+
return this.dimensions;
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
getModelName(): string {
|
|
174
|
+
return this.modelName;
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
/**
|
|
179
|
+
* Mock embedding service for development/testing
|
|
180
|
+
*/
|
|
181
|
+
export class MockEmbeddingService implements EmbeddingService {
|
|
182
|
+
private dimensions: number = 384;
|
|
183
|
+
|
|
184
|
+
async generateEmbedding(text: string): Promise<number[]> {
|
|
185
|
+
// Generate a deterministic fake embedding based on text
|
|
186
|
+
const embedding = new Array(this.dimensions).fill(0);
|
|
187
|
+
for (let i = 0; i < Math.min(text.length, this.dimensions); i++) {
|
|
188
|
+
embedding[i] = (text.charCodeAt(i) % 256) / 256;
|
|
189
|
+
}
|
|
190
|
+
return embedding;
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
getDimensions(): number {
|
|
194
|
+
return this.dimensions;
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
getModelName(): string {
|
|
198
|
+
return 'mock/deterministic';
|
|
199
|
+
}
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
/**
|
|
203
|
+
* Factory to create appropriate embedding service with fallback chain
|
|
204
|
+
*/
|
|
205
|
+
export async function createEmbeddingService(config?: {
|
|
206
|
+
openaiApiKey?: string;
|
|
207
|
+
voyageApiKey?: string;
|
|
208
|
+
preferLocal?: boolean;
|
|
209
|
+
modelName?: string;
|
|
210
|
+
}): Promise<EmbeddingService> {
|
|
211
|
+
// 1. Try Voyage AI if API key is provided (highest accuracy)
|
|
212
|
+
if (config?.voyageApiKey && !config.preferLocal) {
|
|
213
|
+
try {
|
|
214
|
+
console.error(`Attempting to create Voyage AI service with key: ${config.voyageApiKey.substring(0, 10)}...`);
|
|
215
|
+
const service = new VoyageEmbeddingService(config.voyageApiKey, config.modelName);
|
|
216
|
+
// Test the API key with a simple request
|
|
217
|
+
await service.generateEmbedding('test');
|
|
218
|
+
console.error('Using Voyage AI embedding service');
|
|
219
|
+
return service;
|
|
220
|
+
} catch (error) {
|
|
221
|
+
console.error('Voyage AI embedding service failed, falling back to OpenAI:', error);
|
|
222
|
+
}
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
// 2. Try OpenAI if API key is provided and not preferring local
|
|
226
|
+
if (config?.openaiApiKey && !config.preferLocal) {
|
|
227
|
+
try {
|
|
228
|
+
const service = new OpenAIEmbeddingService(config.openaiApiKey, config.modelName);
|
|
229
|
+
// Test the API key with a simple request
|
|
230
|
+
await service.generateEmbedding('test');
|
|
231
|
+
console.error('Using OpenAI embedding service');
|
|
232
|
+
return service;
|
|
233
|
+
} catch (error) {
|
|
234
|
+
console.error('OpenAI embedding service failed, falling back to local:', error);
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
// 2. Try local sentence-transformers
|
|
239
|
+
try {
|
|
240
|
+
const checkScript = `
|
|
241
|
+
import sentence_transformers
|
|
242
|
+
print("OK")
|
|
243
|
+
`;
|
|
244
|
+
|
|
245
|
+
const python = spawn('python3', ['-c', checkScript]);
|
|
246
|
+
|
|
247
|
+
const hasLocalModel = await new Promise<boolean>((resolve) => {
|
|
248
|
+
python.on('close', (code) => {
|
|
249
|
+
resolve(code === 0);
|
|
250
|
+
});
|
|
251
|
+
});
|
|
252
|
+
|
|
253
|
+
if (hasLocalModel) {
|
|
254
|
+
console.error('Using local sentence-transformers embedding service');
|
|
255
|
+
return new LocalEmbeddingService(config?.modelName);
|
|
256
|
+
}
|
|
257
|
+
} catch (error) {
|
|
258
|
+
console.error('Local embedding service check failed:', error);
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
// 3. Fall back to mock embeddings
|
|
262
|
+
console.error('Warning: No embedding service available, using mock embeddings');
|
|
263
|
+
console.error('For production use, provide OPENAI_API_KEY or install sentence-transformers');
|
|
264
|
+
return new MockEmbeddingService();
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
/**
|
|
268
|
+
* Configuration helper to detect which embedding model was used for existing data
|
|
269
|
+
*/
|
|
270
|
+
export async function detectEmbeddingModel(qdrantUrl: string, collectionName: string): Promise<{
|
|
271
|
+
model: string;
|
|
272
|
+
dimensions: number;
|
|
273
|
+
}> {
|
|
274
|
+
try {
|
|
275
|
+
const response = await fetch(`${qdrantUrl}/collections/${collectionName}`);
|
|
276
|
+
const data = await response.json() as any;
|
|
277
|
+
|
|
278
|
+
const dimensions = data.result?.config?.params?.vectors?.size || 384;
|
|
279
|
+
|
|
280
|
+
// Infer model from dimensions
|
|
281
|
+
if (dimensions === 1024) {
|
|
282
|
+
return { model: 'voyage-3.5-lite', dimensions };
|
|
283
|
+
} else if (dimensions === 1536) {
|
|
284
|
+
return { model: 'text-embedding-3-small', dimensions };
|
|
285
|
+
} else if (dimensions === 3072) {
|
|
286
|
+
return { model: 'text-embedding-3-large', dimensions };
|
|
287
|
+
} else if (dimensions === 384) {
|
|
288
|
+
return { model: 'sentence-transformers/all-MiniLM-L6-v2', dimensions };
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
return { model: 'unknown', dimensions };
|
|
292
|
+
} catch (error) {
|
|
293
|
+
console.error('Failed to detect embedding model:', error);
|
|
294
|
+
return { model: 'sentence-transformers/all-MiniLM-L6-v2', dimensions: 384 };
|
|
295
|
+
}
|
|
296
|
+
}
|