@radaros/core 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +887 -0
- package/dist/index.js +3462 -0
- package/package.json +64 -0
- package/src/agent/agent.ts +314 -0
- package/src/agent/llm-loop.ts +263 -0
- package/src/agent/run-context.ts +35 -0
- package/src/agent/types.ts +77 -0
- package/src/events/event-bus.ts +45 -0
- package/src/events/types.ts +16 -0
- package/src/guardrails/types.ts +5 -0
- package/src/hooks/types.ts +6 -0
- package/src/index.ts +111 -0
- package/src/knowledge/knowledge-base.ts +146 -0
- package/src/logger/logger.ts +232 -0
- package/src/memory/memory.ts +87 -0
- package/src/memory/types.ts +13 -0
- package/src/models/provider.ts +22 -0
- package/src/models/providers/anthropic.ts +330 -0
- package/src/models/providers/google.ts +361 -0
- package/src/models/providers/ollama.ts +211 -0
- package/src/models/providers/openai.ts +323 -0
- package/src/models/registry.ts +90 -0
- package/src/models/types.ts +112 -0
- package/src/session/session-manager.ts +75 -0
- package/src/session/types.ts +10 -0
- package/src/storage/driver.ts +10 -0
- package/src/storage/in-memory.ts +44 -0
- package/src/storage/mongodb.ts +70 -0
- package/src/storage/postgres.ts +81 -0
- package/src/storage/sqlite.ts +81 -0
- package/src/team/modes.ts +1 -0
- package/src/team/team.ts +323 -0
- package/src/team/types.ts +26 -0
- package/src/tools/define-tool.ts +20 -0
- package/src/tools/tool-executor.ts +131 -0
- package/src/tools/types.ts +27 -0
- package/src/vector/base.ts +44 -0
- package/src/vector/embeddings/google.ts +64 -0
- package/src/vector/embeddings/openai.ts +66 -0
- package/src/vector/in-memory.ts +115 -0
- package/src/vector/mongodb.ts +241 -0
- package/src/vector/pgvector.ts +169 -0
- package/src/vector/qdrant.ts +203 -0
- package/src/vector/types.ts +55 -0
- package/src/workflow/step-runner.ts +303 -0
- package/src/workflow/types.ts +55 -0
- package/src/workflow/workflow.ts +68 -0
- package/tsconfig.json +8 -0
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
import { InMemoryStorage } from "../storage/in-memory.js";
|
|
2
|
+
import type { StorageDriver } from "../storage/driver.js";
|
|
3
|
+
import { getTextContent, type ChatMessage } from "../models/types.js";
|
|
4
|
+
import type { MemoryConfig, MemoryEntry } from "./types.js";
|
|
5
|
+
|
|
6
|
+
const SHORT_TERM_NS = "memory:short";
|
|
7
|
+
const LONG_TERM_NS = "memory:long";
|
|
8
|
+
|
|
9
|
+
export class Memory {
|
|
10
|
+
private storage: StorageDriver;
|
|
11
|
+
private maxShortTermMessages: number;
|
|
12
|
+
private enableLongTerm: boolean;
|
|
13
|
+
|
|
14
|
+
constructor(config?: MemoryConfig) {
|
|
15
|
+
this.storage = config?.storage ?? new InMemoryStorage();
|
|
16
|
+
this.maxShortTermMessages = config?.maxShortTermMessages ?? 50;
|
|
17
|
+
this.enableLongTerm = config?.enableLongTerm ?? false;
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
async addMessages(
|
|
21
|
+
sessionId: string,
|
|
22
|
+
messages: ChatMessage[]
|
|
23
|
+
): Promise<void> {
|
|
24
|
+
const existing =
|
|
25
|
+
(await this.storage.get<ChatMessage[]>(SHORT_TERM_NS, sessionId)) ?? [];
|
|
26
|
+
const updated = [...existing, ...messages];
|
|
27
|
+
|
|
28
|
+
if (updated.length > this.maxShortTermMessages) {
|
|
29
|
+
const overflow = updated.splice(
|
|
30
|
+
0,
|
|
31
|
+
updated.length - this.maxShortTermMessages
|
|
32
|
+
);
|
|
33
|
+
|
|
34
|
+
if (this.enableLongTerm && overflow.length > 0) {
|
|
35
|
+
await this.summarizeAndStore(sessionId, overflow);
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
await this.storage.set(SHORT_TERM_NS, sessionId, updated);
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
async getMessages(sessionId: string): Promise<ChatMessage[]> {
|
|
43
|
+
return (
|
|
44
|
+
(await this.storage.get<ChatMessage[]>(SHORT_TERM_NS, sessionId)) ?? []
|
|
45
|
+
);
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
async getSummaries(sessionId: string): Promise<string[]> {
|
|
49
|
+
if (!this.enableLongTerm) return [];
|
|
50
|
+
|
|
51
|
+
const entries = await this.storage.list<MemoryEntry>(
|
|
52
|
+
LONG_TERM_NS,
|
|
53
|
+
sessionId
|
|
54
|
+
);
|
|
55
|
+
return entries.map((e) => e.value.summary);
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
async getContextString(sessionId: string): Promise<string> {
|
|
59
|
+
const summaries = await this.getSummaries(sessionId);
|
|
60
|
+
if (summaries.length === 0) return "";
|
|
61
|
+
return `Previous context:\n${summaries.join("\n")}`;
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
private async summarizeAndStore(
|
|
65
|
+
sessionId: string,
|
|
66
|
+
messages: ChatMessage[]
|
|
67
|
+
): Promise<void> {
|
|
68
|
+
const textParts = messages
|
|
69
|
+
.filter((m) => m.content)
|
|
70
|
+
.map((m) => `${m.role}: ${getTextContent(m.content)}`);
|
|
71
|
+
|
|
72
|
+
if (textParts.length === 0) return;
|
|
73
|
+
|
|
74
|
+
const summary = textParts.join(" | ").slice(0, 500);
|
|
75
|
+
const entry: MemoryEntry = {
|
|
76
|
+
key: `${sessionId}:${Date.now()}`,
|
|
77
|
+
summary,
|
|
78
|
+
createdAt: new Date(),
|
|
79
|
+
};
|
|
80
|
+
|
|
81
|
+
await this.storage.set(LONG_TERM_NS, entry.key, entry);
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
async clear(sessionId: string): Promise<void> {
|
|
85
|
+
await this.storage.delete(SHORT_TERM_NS, sessionId);
|
|
86
|
+
}
|
|
87
|
+
}
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import type { StorageDriver } from "../storage/driver.js";
|
|
2
|
+
|
|
3
|
+
export interface MemoryConfig {
|
|
4
|
+
storage?: StorageDriver;
|
|
5
|
+
maxShortTermMessages?: number;
|
|
6
|
+
enableLongTerm?: boolean;
|
|
7
|
+
}
|
|
8
|
+
|
|
9
|
+
export interface MemoryEntry {
|
|
10
|
+
key: string;
|
|
11
|
+
summary: string;
|
|
12
|
+
createdAt: Date;
|
|
13
|
+
}
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
import type {
|
|
2
|
+
ChatMessage,
|
|
3
|
+
ModelConfig,
|
|
4
|
+
ModelResponse,
|
|
5
|
+
StreamChunk,
|
|
6
|
+
ToolDefinition,
|
|
7
|
+
} from "./types.js";
|
|
8
|
+
|
|
9
|
+
export interface ModelProvider {
|
|
10
|
+
readonly providerId: string;
|
|
11
|
+
readonly modelId: string;
|
|
12
|
+
|
|
13
|
+
generate(
|
|
14
|
+
messages: ChatMessage[],
|
|
15
|
+
options?: ModelConfig & { tools?: ToolDefinition[] }
|
|
16
|
+
): Promise<ModelResponse>;
|
|
17
|
+
|
|
18
|
+
stream(
|
|
19
|
+
messages: ChatMessage[],
|
|
20
|
+
options?: ModelConfig & { tools?: ToolDefinition[] }
|
|
21
|
+
): AsyncGenerator<StreamChunk>;
|
|
22
|
+
}
|
|
@@ -0,0 +1,330 @@
|
|
|
1
|
+
import { createRequire } from "node:module";
|
|
2
|
+
import type { ModelProvider } from "../provider.js";
|
|
3
|
+
import {
|
|
4
|
+
getTextContent,
|
|
5
|
+
isMultiModal,
|
|
6
|
+
type ChatMessage,
|
|
7
|
+
type ContentPart,
|
|
8
|
+
type ModelConfig,
|
|
9
|
+
type ModelResponse,
|
|
10
|
+
type StreamChunk,
|
|
11
|
+
type ToolDefinition,
|
|
12
|
+
type TokenUsage,
|
|
13
|
+
type ToolCall,
|
|
14
|
+
} from "../types.js";
|
|
15
|
+
|
|
16
|
+
const _require = createRequire(import.meta.url);
|
|
17
|
+
|
|
18
|
+
interface AnthropicConfig {
|
|
19
|
+
apiKey?: string;
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
export class AnthropicProvider implements ModelProvider {
|
|
23
|
+
readonly providerId = "anthropic";
|
|
24
|
+
readonly modelId: string;
|
|
25
|
+
private client: any;
|
|
26
|
+
private AnthropicCtor: any;
|
|
27
|
+
private clientCache = new Map<string, any>();
|
|
28
|
+
|
|
29
|
+
constructor(modelId: string, config?: AnthropicConfig) {
|
|
30
|
+
this.modelId = modelId;
|
|
31
|
+
try {
|
|
32
|
+
const mod = _require("@anthropic-ai/sdk");
|
|
33
|
+
this.AnthropicCtor = mod.default ?? mod;
|
|
34
|
+
const key = config?.apiKey ?? process.env.ANTHROPIC_API_KEY;
|
|
35
|
+
if (key) {
|
|
36
|
+
this.client = new this.AnthropicCtor({ apiKey: key });
|
|
37
|
+
}
|
|
38
|
+
} catch {
|
|
39
|
+
throw new Error(
|
|
40
|
+
"@anthropic-ai/sdk is required for AnthropicProvider. Install it: npm install @anthropic-ai/sdk"
|
|
41
|
+
);
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
private getClient(apiKey?: string): any {
|
|
46
|
+
if (apiKey) {
|
|
47
|
+
let cached = this.clientCache.get(apiKey);
|
|
48
|
+
if (!cached) {
|
|
49
|
+
cached = new this.AnthropicCtor({ apiKey });
|
|
50
|
+
this.clientCache.set(apiKey, cached);
|
|
51
|
+
}
|
|
52
|
+
return cached;
|
|
53
|
+
}
|
|
54
|
+
if (this.client) return this.client;
|
|
55
|
+
const envKey = process.env.ANTHROPIC_API_KEY;
|
|
56
|
+
if (envKey) {
|
|
57
|
+
this.client = new this.AnthropicCtor({ apiKey: envKey });
|
|
58
|
+
return this.client;
|
|
59
|
+
}
|
|
60
|
+
throw new Error("No Anthropic API key provided. Pass it via the x-anthropic-api-key header, apiKey in request body, or set ANTHROPIC_API_KEY env var.");
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
async generate(
|
|
64
|
+
messages: ChatMessage[],
|
|
65
|
+
options?: ModelConfig & { tools?: ToolDefinition[] }
|
|
66
|
+
): Promise<ModelResponse> {
|
|
67
|
+
const { systemMsg, anthropicMessages } =
|
|
68
|
+
this.toAnthropicMessages(messages);
|
|
69
|
+
|
|
70
|
+
const params: Record<string, unknown> = {
|
|
71
|
+
model: this.modelId,
|
|
72
|
+
messages: anthropicMessages,
|
|
73
|
+
max_tokens: options?.maxTokens ?? 4096,
|
|
74
|
+
};
|
|
75
|
+
|
|
76
|
+
if (systemMsg) params.system = systemMsg;
|
|
77
|
+
if (options?.temperature !== undefined)
|
|
78
|
+
params.temperature = options.temperature;
|
|
79
|
+
if (options?.topP !== undefined) params.top_p = options.topP;
|
|
80
|
+
if (options?.stop) params.stop_sequences = options.stop;
|
|
81
|
+
if (options?.tools?.length) {
|
|
82
|
+
params.tools = this.toAnthropicTools(options.tools);
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
const client = this.getClient(options?.apiKey);
|
|
86
|
+
const response = await client.messages.create(params);
|
|
87
|
+
return this.normalizeResponse(response);
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
async *stream(
|
|
91
|
+
messages: ChatMessage[],
|
|
92
|
+
options?: ModelConfig & { tools?: ToolDefinition[] }
|
|
93
|
+
): AsyncGenerator<StreamChunk> {
|
|
94
|
+
const { systemMsg, anthropicMessages } =
|
|
95
|
+
this.toAnthropicMessages(messages);
|
|
96
|
+
|
|
97
|
+
const params: Record<string, unknown> = {
|
|
98
|
+
model: this.modelId,
|
|
99
|
+
messages: anthropicMessages,
|
|
100
|
+
max_tokens: options?.maxTokens ?? 4096,
|
|
101
|
+
stream: true,
|
|
102
|
+
};
|
|
103
|
+
|
|
104
|
+
if (systemMsg) params.system = systemMsg;
|
|
105
|
+
if (options?.temperature !== undefined)
|
|
106
|
+
params.temperature = options.temperature;
|
|
107
|
+
if (options?.topP !== undefined) params.top_p = options.topP;
|
|
108
|
+
if (options?.stop) params.stop_sequences = options.stop;
|
|
109
|
+
if (options?.tools?.length) {
|
|
110
|
+
params.tools = this.toAnthropicTools(options.tools);
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
const client = this.getClient(options?.apiKey);
|
|
114
|
+
const stream = await client.messages.create(params);
|
|
115
|
+
|
|
116
|
+
let currentToolId = "";
|
|
117
|
+
|
|
118
|
+
for await (const event of stream) {
|
|
119
|
+
switch (event.type) {
|
|
120
|
+
case "content_block_start": {
|
|
121
|
+
if (event.content_block?.type === "tool_use") {
|
|
122
|
+
currentToolId = event.content_block.id;
|
|
123
|
+
yield {
|
|
124
|
+
type: "tool_call_start",
|
|
125
|
+
toolCall: {
|
|
126
|
+
id: event.content_block.id,
|
|
127
|
+
name: event.content_block.name,
|
|
128
|
+
},
|
|
129
|
+
};
|
|
130
|
+
}
|
|
131
|
+
break;
|
|
132
|
+
}
|
|
133
|
+
case "content_block_delta": {
|
|
134
|
+
if (event.delta?.type === "text_delta") {
|
|
135
|
+
yield { type: "text", text: event.delta.text };
|
|
136
|
+
} else if (event.delta?.type === "input_json_delta") {
|
|
137
|
+
yield {
|
|
138
|
+
type: "tool_call_delta",
|
|
139
|
+
toolCallId: currentToolId,
|
|
140
|
+
argumentsDelta: event.delta.partial_json,
|
|
141
|
+
};
|
|
142
|
+
}
|
|
143
|
+
break;
|
|
144
|
+
}
|
|
145
|
+
case "content_block_stop": {
|
|
146
|
+
if (currentToolId) {
|
|
147
|
+
yield { type: "tool_call_end", toolCallId: currentToolId };
|
|
148
|
+
currentToolId = "";
|
|
149
|
+
}
|
|
150
|
+
break;
|
|
151
|
+
}
|
|
152
|
+
case "message_delta": {
|
|
153
|
+
const usage: TokenUsage | undefined = event.usage
|
|
154
|
+
? {
|
|
155
|
+
promptTokens: 0,
|
|
156
|
+
completionTokens: event.usage.output_tokens ?? 0,
|
|
157
|
+
totalTokens: event.usage.output_tokens ?? 0,
|
|
158
|
+
}
|
|
159
|
+
: undefined;
|
|
160
|
+
|
|
161
|
+
let finishReason = event.delta?.stop_reason ?? "stop";
|
|
162
|
+
if (finishReason === "tool_use") finishReason = "tool_calls";
|
|
163
|
+
if (finishReason === "end_turn") finishReason = "stop";
|
|
164
|
+
|
|
165
|
+
yield { type: "finish", finishReason, usage };
|
|
166
|
+
break;
|
|
167
|
+
}
|
|
168
|
+
case "message_start": {
|
|
169
|
+
if (event.message?.usage) {
|
|
170
|
+
// Input tokens available at start
|
|
171
|
+
}
|
|
172
|
+
break;
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
private toAnthropicMessages(messages: ChatMessage[]): {
|
|
179
|
+
systemMsg: string | undefined;
|
|
180
|
+
anthropicMessages: unknown[];
|
|
181
|
+
} {
|
|
182
|
+
let systemMsg: string | undefined;
|
|
183
|
+
const anthropicMessages: unknown[] = [];
|
|
184
|
+
|
|
185
|
+
for (const msg of messages) {
|
|
186
|
+
if (msg.role === "system") {
|
|
187
|
+
systemMsg = getTextContent(msg.content) || undefined;
|
|
188
|
+
continue;
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
if (msg.role === "user") {
|
|
192
|
+
if (isMultiModal(msg.content)) {
|
|
193
|
+
anthropicMessages.push({
|
|
194
|
+
role: "user",
|
|
195
|
+
content: msg.content.map((p) => this.partToAnthropic(p)),
|
|
196
|
+
});
|
|
197
|
+
} else {
|
|
198
|
+
anthropicMessages.push({
|
|
199
|
+
role: "user",
|
|
200
|
+
content: [{ type: "text", text: msg.content ?? "" }],
|
|
201
|
+
});
|
|
202
|
+
}
|
|
203
|
+
continue;
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
if (msg.role === "assistant") {
|
|
207
|
+
const content: unknown[] = [];
|
|
208
|
+
if (msg.content) {
|
|
209
|
+
content.push({ type: "text", text: msg.content });
|
|
210
|
+
}
|
|
211
|
+
if (msg.toolCalls) {
|
|
212
|
+
for (const tc of msg.toolCalls) {
|
|
213
|
+
content.push({
|
|
214
|
+
type: "tool_use",
|
|
215
|
+
id: tc.id,
|
|
216
|
+
name: tc.name,
|
|
217
|
+
input: tc.arguments,
|
|
218
|
+
});
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
anthropicMessages.push({
|
|
222
|
+
role: "assistant",
|
|
223
|
+
content: content.length > 0 ? content : [{ type: "text", text: "" }],
|
|
224
|
+
});
|
|
225
|
+
continue;
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
if (msg.role === "tool") {
|
|
229
|
+
anthropicMessages.push({
|
|
230
|
+
role: "user",
|
|
231
|
+
content: [
|
|
232
|
+
{
|
|
233
|
+
type: "tool_result",
|
|
234
|
+
tool_use_id: msg.toolCallId,
|
|
235
|
+
content: msg.content ?? "",
|
|
236
|
+
},
|
|
237
|
+
],
|
|
238
|
+
});
|
|
239
|
+
continue;
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
return { systemMsg, anthropicMessages };
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
private partToAnthropic(part: ContentPart): unknown {
|
|
247
|
+
switch (part.type) {
|
|
248
|
+
case "text":
|
|
249
|
+
return { type: "text", text: part.text };
|
|
250
|
+
case "image": {
|
|
251
|
+
const isUrl = part.data.startsWith("http://") || part.data.startsWith("https://");
|
|
252
|
+
if (isUrl) {
|
|
253
|
+
return { type: "image", source: { type: "url", url: part.data } };
|
|
254
|
+
}
|
|
255
|
+
return {
|
|
256
|
+
type: "image",
|
|
257
|
+
source: {
|
|
258
|
+
type: "base64",
|
|
259
|
+
media_type: part.mimeType ?? "image/png",
|
|
260
|
+
data: part.data,
|
|
261
|
+
},
|
|
262
|
+
};
|
|
263
|
+
}
|
|
264
|
+
case "audio":
|
|
265
|
+
return {
|
|
266
|
+
type: "text",
|
|
267
|
+
text: `[Audio content: ${part.mimeType ?? "audio"}]`,
|
|
268
|
+
};
|
|
269
|
+
case "file":
|
|
270
|
+
if (part.mimeType === "application/pdf") {
|
|
271
|
+
return {
|
|
272
|
+
type: "document",
|
|
273
|
+
source: { type: "base64", media_type: "application/pdf", data: part.data },
|
|
274
|
+
};
|
|
275
|
+
}
|
|
276
|
+
return {
|
|
277
|
+
type: "text",
|
|
278
|
+
text: `[File: ${part.filename ?? "attachment"} (${part.mimeType})]`,
|
|
279
|
+
};
|
|
280
|
+
}
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
private toAnthropicTools(tools: ToolDefinition[]): unknown[] {
|
|
284
|
+
return tools.map((t) => ({
|
|
285
|
+
name: t.name,
|
|
286
|
+
description: t.description,
|
|
287
|
+
input_schema: t.parameters,
|
|
288
|
+
}));
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
private normalizeResponse(response: any): ModelResponse {
|
|
292
|
+
const toolCalls: ToolCall[] = [];
|
|
293
|
+
let textContent = "";
|
|
294
|
+
|
|
295
|
+
for (const block of response.content ?? []) {
|
|
296
|
+
if (block.type === "text") {
|
|
297
|
+
textContent += block.text;
|
|
298
|
+
} else if (block.type === "tool_use") {
|
|
299
|
+
toolCalls.push({
|
|
300
|
+
id: block.id,
|
|
301
|
+
name: block.name,
|
|
302
|
+
arguments: block.input ?? {},
|
|
303
|
+
});
|
|
304
|
+
}
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
const usage: TokenUsage = {
|
|
308
|
+
promptTokens: response.usage?.input_tokens ?? 0,
|
|
309
|
+
completionTokens: response.usage?.output_tokens ?? 0,
|
|
310
|
+
totalTokens:
|
|
311
|
+
(response.usage?.input_tokens ?? 0) +
|
|
312
|
+
(response.usage?.output_tokens ?? 0),
|
|
313
|
+
};
|
|
314
|
+
|
|
315
|
+
let finishReason: ModelResponse["finishReason"] = "stop";
|
|
316
|
+
if (response.stop_reason === "tool_use") finishReason = "tool_calls";
|
|
317
|
+
else if (response.stop_reason === "max_tokens") finishReason = "length";
|
|
318
|
+
|
|
319
|
+
return {
|
|
320
|
+
message: {
|
|
321
|
+
role: "assistant",
|
|
322
|
+
content: textContent || null,
|
|
323
|
+
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
324
|
+
},
|
|
325
|
+
usage,
|
|
326
|
+
finishReason,
|
|
327
|
+
raw: response,
|
|
328
|
+
};
|
|
329
|
+
}
|
|
330
|
+
}
|