kite-agent 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kite/__init__.py +46 -0
- kite/ab_testing.py +384 -0
- kite/agent.py +556 -0
- kite/agents/__init__.py +3 -0
- kite/agents/plan_execute.py +191 -0
- kite/agents/react_agent.py +509 -0
- kite/agents/reflective_agent.py +90 -0
- kite/agents/rewoo.py +119 -0
- kite/agents/tot.py +151 -0
- kite/conversation.py +125 -0
- kite/core.py +974 -0
- kite/data_loaders.py +111 -0
- kite/embedding_providers.py +372 -0
- kite/llm_providers.py +1278 -0
- kite/memory/__init__.py +6 -0
- kite/memory/advanced_rag.py +333 -0
- kite/memory/graph_rag.py +719 -0
- kite/memory/session_memory.py +423 -0
- kite/memory/vector_memory.py +579 -0
- kite/monitoring.py +611 -0
- kite/observers.py +107 -0
- kite/optimization/__init__.py +9 -0
- kite/optimization/resource_router.py +80 -0
- kite/persistence.py +42 -0
- kite/pipeline/__init__.py +5 -0
- kite/pipeline/deterministic_pipeline.py +323 -0
- kite/pipeline/reactive_pipeline.py +171 -0
- kite/pipeline_manager.py +15 -0
- kite/routing/__init__.py +6 -0
- kite/routing/aggregator_router.py +325 -0
- kite/routing/llm_router.py +149 -0
- kite/routing/semantic_router.py +228 -0
- kite/safety/__init__.py +6 -0
- kite/safety/circuit_breaker.py +360 -0
- kite/safety/guardrails.py +82 -0
- kite/safety/idempotency_manager.py +304 -0
- kite/safety/kill_switch.py +75 -0
- kite/tool.py +183 -0
- kite/tool_registry.py +87 -0
- kite/tools/__init__.py +21 -0
- kite/tools/code_execution.py +53 -0
- kite/tools/contrib/__init__.py +19 -0
- kite/tools/contrib/calculator.py +26 -0
- kite/tools/contrib/datetime_utils.py +20 -0
- kite/tools/contrib/linkedin.py +428 -0
- kite/tools/contrib/web_search.py +30 -0
- kite/tools/mcp/__init__.py +31 -0
- kite/tools/mcp/database_mcp.py +267 -0
- kite/tools/mcp/gdrive_mcp_server.py +503 -0
- kite/tools/mcp/gmail_mcp_server.py +601 -0
- kite/tools/mcp/postgres_mcp_server.py +490 -0
- kite/tools/mcp/slack_mcp_server.py +538 -0
- kite/tools/mcp/stripe_mcp_server.py +219 -0
- kite/tools/search.py +90 -0
- kite/tools/system_tools.py +54 -0
- kite/tools_manager.py +27 -0
- kite_agent-0.1.0.dist-info/METADATA +621 -0
- kite_agent-0.1.0.dist-info/RECORD +61 -0
- kite_agent-0.1.0.dist-info/WHEEL +5 -0
- kite_agent-0.1.0.dist-info/licenses/LICENSE +21 -0
- kite_agent-0.1.0.dist-info/top_level.txt +1 -0
kite/__init__.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Kite Framework - Production-Ready Agentic AI
|
|
3
|
+
|
|
4
|
+
A lightweight, safety-first framework for building intelligent AI agents
|
|
5
|
+
with enterprise-grade reliability.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
__version__ = "0.1.0"
|
|
9
|
+
__author__ = "Thien Nguyen"
|
|
10
|
+
__license__ = "MIT"
|
|
11
|
+
|
|
12
|
+
from .core import Kite
|
|
13
|
+
from .agent import Agent
|
|
14
|
+
from .tool import Tool
|
|
15
|
+
|
|
16
|
+
# Safety components
|
|
17
|
+
from .safety.circuit_breaker import CircuitBreaker, CircuitBreakerConfig, CircuitState
|
|
18
|
+
from .safety.idempotency_manager import IdempotencyManager
|
|
19
|
+
from .safety.kill_switch import KillSwitch
|
|
20
|
+
|
|
21
|
+
# Agent patterns
|
|
22
|
+
from .agents.react_agent import ReActAgent
|
|
23
|
+
from .agents.plan_execute import PlanExecuteAgent
|
|
24
|
+
from .agents.rewoo import ReWOOAgent
|
|
25
|
+
from .agents.tot import TreeOfThoughtsAgent
|
|
26
|
+
|
|
27
|
+
__all__ = [
|
|
28
|
+
# Core
|
|
29
|
+
"Kite",
|
|
30
|
+
"Agent",
|
|
31
|
+
"Tool",
|
|
32
|
+
"__version__",
|
|
33
|
+
|
|
34
|
+
# Safety
|
|
35
|
+
"CircuitBreaker",
|
|
36
|
+
"CircuitBreakerConfig",
|
|
37
|
+
"CircuitState",
|
|
38
|
+
"IdempotencyManager",
|
|
39
|
+
"KillSwitch",
|
|
40
|
+
|
|
41
|
+
# Agents
|
|
42
|
+
"ReActAgent",
|
|
43
|
+
"PlanExecuteAgent",
|
|
44
|
+
"ReWOOAgent",
|
|
45
|
+
"TreeOfThoughtsAgent",
|
|
46
|
+
]
|
kite/ab_testing.py
ADDED
|
@@ -0,0 +1,384 @@
|
|
|
1
|
+
"""
|
|
2
|
+
A/B Testing Framework for Agents
|
|
3
|
+
Test different prompts, models, and configurations.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import time
|
|
7
|
+
import random
|
|
8
|
+
import hashlib
|
|
9
|
+
from typing import Dict, List, Any, Optional, Callable
|
|
10
|
+
from dataclasses import dataclass, field
|
|
11
|
+
from collections import defaultdict
|
|
12
|
+
import json
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class Variant:
|
|
17
|
+
"""A/B test variant configuration."""
|
|
18
|
+
name: str
|
|
19
|
+
weight: float # 0-1, must sum to 1.0 across variants
|
|
20
|
+
config: Dict[str, Any]
|
|
21
|
+
|
|
22
|
+
# Metrics
|
|
23
|
+
impressions: int = 0
|
|
24
|
+
conversions: int = 0
|
|
25
|
+
total_latency: float = 0
|
|
26
|
+
errors: int = 0
|
|
27
|
+
|
|
28
|
+
def conversion_rate(self) -> float:
|
|
29
|
+
"""Calculate conversion rate."""
|
|
30
|
+
return self.conversions / self.impressions if self.impressions > 0 else 0
|
|
31
|
+
|
|
32
|
+
def avg_latency(self) -> float:
|
|
33
|
+
"""Calculate average latency."""
|
|
34
|
+
return self.total_latency / self.impressions if self.impressions > 0 else 0
|
|
35
|
+
|
|
36
|
+
def error_rate(self) -> float:
|
|
37
|
+
"""Calculate error rate."""
|
|
38
|
+
return self.errors / self.impressions if self.impressions > 0 else 0
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@dataclass
|
|
42
|
+
class Experiment:
|
|
43
|
+
"""A/B test experiment."""
|
|
44
|
+
name: str
|
|
45
|
+
description: str
|
|
46
|
+
variants: List[Variant]
|
|
47
|
+
start_time: float = field(default_factory=time.time)
|
|
48
|
+
end_time: Optional[float] = None
|
|
49
|
+
active: bool = True
|
|
50
|
+
|
|
51
|
+
def __post_init__(self):
|
|
52
|
+
# Validate weights sum to 1.0
|
|
53
|
+
total_weight = sum(v.weight for v in self.variants)
|
|
54
|
+
if not (0.99 <= total_weight <= 1.01):
|
|
55
|
+
raise ValueError(f"Variant weights must sum to 1.0, got {total_weight}")
|
|
56
|
+
|
|
57
|
+
def get_variant(self, user_id: str) -> Variant:
|
|
58
|
+
"""
|
|
59
|
+
Get variant for user (consistent assignment).
|
|
60
|
+
Uses hash-based assignment for consistency.
|
|
61
|
+
"""
|
|
62
|
+
# Hash user_id to get consistent assignment
|
|
63
|
+
hash_value = int(hashlib.md5(
|
|
64
|
+
f"{self.name}:{user_id}".encode()
|
|
65
|
+
).hexdigest(), 16)
|
|
66
|
+
|
|
67
|
+
# Normalize to 0-1
|
|
68
|
+
normalized = (hash_value % 10000) / 10000.0
|
|
69
|
+
|
|
70
|
+
# Select variant based on cumulative weights
|
|
71
|
+
cumulative = 0
|
|
72
|
+
for variant in self.variants:
|
|
73
|
+
cumulative += variant.weight
|
|
74
|
+
if normalized <= cumulative:
|
|
75
|
+
return variant
|
|
76
|
+
|
|
77
|
+
return self.variants[-1] # Fallback
|
|
78
|
+
|
|
79
|
+
def record_impression(self, variant_name: str):
|
|
80
|
+
"""Record an impression."""
|
|
81
|
+
for v in self.variants:
|
|
82
|
+
if v.name == variant_name:
|
|
83
|
+
v.impressions += 1
|
|
84
|
+
break
|
|
85
|
+
|
|
86
|
+
def record_conversion(self, variant_name: str):
|
|
87
|
+
"""Record a conversion."""
|
|
88
|
+
for v in self.variants:
|
|
89
|
+
if v.name == variant_name:
|
|
90
|
+
v.conversions += 1
|
|
91
|
+
break
|
|
92
|
+
|
|
93
|
+
def record_latency(self, variant_name: str, latency: float):
|
|
94
|
+
"""Record latency."""
|
|
95
|
+
for v in self.variants:
|
|
96
|
+
if v.name == variant_name:
|
|
97
|
+
v.total_latency += latency
|
|
98
|
+
break
|
|
99
|
+
|
|
100
|
+
def record_error(self, variant_name: str):
|
|
101
|
+
"""Record an error."""
|
|
102
|
+
for v in self.variants:
|
|
103
|
+
if v.name == variant_name:
|
|
104
|
+
v.errors += 1
|
|
105
|
+
break
|
|
106
|
+
|
|
107
|
+
def get_results(self) -> Dict:
|
|
108
|
+
"""Get experiment results."""
|
|
109
|
+
results = {
|
|
110
|
+
'name': self.name,
|
|
111
|
+
'description': self.description,
|
|
112
|
+
'duration': (self.end_time or time.time()) - self.start_time,
|
|
113
|
+
'active': self.active,
|
|
114
|
+
'variants': []
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
for v in self.variants:
|
|
118
|
+
results['variants'].append({
|
|
119
|
+
'name': v.name,
|
|
120
|
+
'weight': v.weight,
|
|
121
|
+
'impressions': v.impressions,
|
|
122
|
+
'conversions': v.conversions,
|
|
123
|
+
'conversion_rate': v.conversion_rate(),
|
|
124
|
+
'avg_latency': v.avg_latency(),
|
|
125
|
+
'error_rate': v.error_rate()
|
|
126
|
+
})
|
|
127
|
+
|
|
128
|
+
# Calculate winner
|
|
129
|
+
if all(v.impressions >= 100 for v in self.variants):
|
|
130
|
+
winner = max(self.variants, key=lambda v: v.conversion_rate())
|
|
131
|
+
results['winner'] = winner.name
|
|
132
|
+
results['confidence'] = self._calculate_confidence(winner)
|
|
133
|
+
|
|
134
|
+
return results
|
|
135
|
+
|
|
136
|
+
def _calculate_confidence(self, winner: Variant) -> float:
|
|
137
|
+
"""Simple confidence calculation."""
|
|
138
|
+
# Simplified - in production use proper statistical tests
|
|
139
|
+
if winner.impressions < 100:
|
|
140
|
+
return 0.0
|
|
141
|
+
|
|
142
|
+
others_avg = sum(
|
|
143
|
+
v.conversion_rate()
|
|
144
|
+
for v in self.variants
|
|
145
|
+
if v != winner
|
|
146
|
+
) / (len(self.variants) - 1)
|
|
147
|
+
|
|
148
|
+
improvement = (winner.conversion_rate() - others_avg) / others_avg if others_avg > 0 else 0
|
|
149
|
+
|
|
150
|
+
return min(improvement * 10, 1.0) # Simplified
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
class ABTestManager:
|
|
154
|
+
"""
|
|
155
|
+
Manage multiple A/B tests.
|
|
156
|
+
|
|
157
|
+
Example:
|
|
158
|
+
manager = ABTestManager()
|
|
159
|
+
|
|
160
|
+
# Create experiment
|
|
161
|
+
exp = manager.create_experiment(
|
|
162
|
+
name="prompt_test",
|
|
163
|
+
description="Test different system prompts",
|
|
164
|
+
variants=[
|
|
165
|
+
Variant("control", 0.5, {"prompt": "You are helpful"}),
|
|
166
|
+
Variant("friendly", 0.5, {"prompt": "You are very friendly"})
|
|
167
|
+
]
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
# Get variant for user
|
|
171
|
+
variant = manager.get_variant("prompt_test", user_id)
|
|
172
|
+
|
|
173
|
+
# Use variant config
|
|
174
|
+
response = llm.chat(system_prompt=variant.config['prompt'])
|
|
175
|
+
|
|
176
|
+
# Record metrics
|
|
177
|
+
manager.record_impression("prompt_test", variant.name)
|
|
178
|
+
manager.record_conversion("prompt_test", variant.name)
|
|
179
|
+
"""
|
|
180
|
+
|
|
181
|
+
def __init__(self):
|
|
182
|
+
self.experiments: Dict[str, Experiment] = {}
|
|
183
|
+
|
|
184
|
+
def create_experiment(self, name: str, description: str,
|
|
185
|
+
variants: List[Variant]) -> Experiment:
|
|
186
|
+
"""Create new experiment."""
|
|
187
|
+
if name in self.experiments:
|
|
188
|
+
raise ValueError(f"Experiment {name} already exists")
|
|
189
|
+
|
|
190
|
+
exp = Experiment(name, description, variants)
|
|
191
|
+
self.experiments[name] = exp
|
|
192
|
+
return exp
|
|
193
|
+
|
|
194
|
+
def get_experiment(self, name: str) -> Optional[Experiment]:
|
|
195
|
+
"""Get experiment by name."""
|
|
196
|
+
return self.experiments.get(name)
|
|
197
|
+
|
|
198
|
+
def get_variant(self, experiment_name: str, user_id: str) -> Optional[Variant]:
|
|
199
|
+
"""Get variant for user."""
|
|
200
|
+
exp = self.experiments.get(experiment_name)
|
|
201
|
+
if exp and exp.active:
|
|
202
|
+
return exp.get_variant(user_id)
|
|
203
|
+
return None
|
|
204
|
+
|
|
205
|
+
def record_impression(self, experiment_name: str, variant_name: str):
|
|
206
|
+
"""Record impression."""
|
|
207
|
+
exp = self.experiments.get(experiment_name)
|
|
208
|
+
if exp:
|
|
209
|
+
exp.record_impression(variant_name)
|
|
210
|
+
|
|
211
|
+
def record_conversion(self, experiment_name: str, variant_name: str):
|
|
212
|
+
"""Record conversion."""
|
|
213
|
+
exp = self.experiments.get(experiment_name)
|
|
214
|
+
if exp:
|
|
215
|
+
exp.record_conversion(variant_name)
|
|
216
|
+
|
|
217
|
+
def record_latency(self, experiment_name: str, variant_name: str,
|
|
218
|
+
latency: float):
|
|
219
|
+
"""Record latency."""
|
|
220
|
+
exp = self.experiments.get(experiment_name)
|
|
221
|
+
if exp:
|
|
222
|
+
exp.record_latency(variant_name, latency)
|
|
223
|
+
|
|
224
|
+
def record_error(self, experiment_name: str, variant_name: str):
|
|
225
|
+
"""Record error."""
|
|
226
|
+
exp = self.experiments.get(experiment_name)
|
|
227
|
+
if exp:
|
|
228
|
+
exp.record_error(variant_name)
|
|
229
|
+
|
|
230
|
+
def stop_experiment(self, name: str):
|
|
231
|
+
"""Stop experiment."""
|
|
232
|
+
exp = self.experiments.get(name)
|
|
233
|
+
if exp:
|
|
234
|
+
exp.active = False
|
|
235
|
+
exp.end_time = time.time()
|
|
236
|
+
|
|
237
|
+
def get_results(self, name: str) -> Optional[Dict]:
|
|
238
|
+
"""Get experiment results."""
|
|
239
|
+
exp = self.experiments.get(name)
|
|
240
|
+
return exp.get_results() if exp else None
|
|
241
|
+
|
|
242
|
+
def list_experiments(self) -> List[str]:
|
|
243
|
+
"""List all experiments."""
|
|
244
|
+
return list(self.experiments.keys())
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
class PromptVersionManager:
|
|
248
|
+
"""
|
|
249
|
+
Manage prompt versions with A/B testing.
|
|
250
|
+
|
|
251
|
+
Example:
|
|
252
|
+
manager = PromptVersionManager()
|
|
253
|
+
|
|
254
|
+
# Register prompts
|
|
255
|
+
manager.register("system_prompt", "v1", "You are helpful")
|
|
256
|
+
manager.register("system_prompt", "v2", "You are very helpful")
|
|
257
|
+
|
|
258
|
+
# Create A/B test
|
|
259
|
+
manager.create_test(
|
|
260
|
+
"system_prompt",
|
|
261
|
+
versions=["v1", "v2"],
|
|
262
|
+
weights=[0.5, 0.5]
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
# Get prompt for user
|
|
266
|
+
prompt = manager.get_prompt("system_prompt", user_id)
|
|
267
|
+
"""
|
|
268
|
+
|
|
269
|
+
def __init__(self):
|
|
270
|
+
self.prompts: Dict[str, Dict[str, str]] = defaultdict(dict)
|
|
271
|
+
self.ab_manager = ABTestManager()
|
|
272
|
+
|
|
273
|
+
def register(self, prompt_name: str, version: str, content: str):
|
|
274
|
+
"""Register a prompt version."""
|
|
275
|
+
self.prompts[prompt_name][version] = content
|
|
276
|
+
|
|
277
|
+
def get_versions(self, prompt_name: str) -> List[str]:
|
|
278
|
+
"""Get all versions of a prompt."""
|
|
279
|
+
return list(self.prompts[prompt_name].keys())
|
|
280
|
+
|
|
281
|
+
def create_test(self, prompt_name: str, versions: List[str],
|
|
282
|
+
weights: List[float], description: str = ""):
|
|
283
|
+
"""Create A/B test for prompt versions."""
|
|
284
|
+
variants = []
|
|
285
|
+
for version, weight in zip(versions, weights):
|
|
286
|
+
if version not in self.prompts[prompt_name]:
|
|
287
|
+
raise ValueError(f"Version {version} not found")
|
|
288
|
+
|
|
289
|
+
variants.append(Variant(
|
|
290
|
+
name=version,
|
|
291
|
+
weight=weight,
|
|
292
|
+
config={'prompt': self.prompts[prompt_name][version]}
|
|
293
|
+
))
|
|
294
|
+
|
|
295
|
+
self.ab_manager.create_experiment(
|
|
296
|
+
name=f"prompt_{prompt_name}",
|
|
297
|
+
description=description or f"Test {prompt_name} versions",
|
|
298
|
+
variants=variants
|
|
299
|
+
)
|
|
300
|
+
|
|
301
|
+
def get_prompt(self, prompt_name: str, user_id: str) -> str:
|
|
302
|
+
"""Get prompt for user (with A/B test if active)."""
|
|
303
|
+
exp_name = f"prompt_{prompt_name}"
|
|
304
|
+
variant = self.ab_manager.get_variant(exp_name, user_id)
|
|
305
|
+
|
|
306
|
+
if variant:
|
|
307
|
+
self.ab_manager.record_impression(exp_name, variant.name)
|
|
308
|
+
return variant.config['prompt']
|
|
309
|
+
|
|
310
|
+
# Fallback to latest version
|
|
311
|
+
versions = self.get_versions(prompt_name)
|
|
312
|
+
if versions:
|
|
313
|
+
return self.prompts[prompt_name][versions[-1]]
|
|
314
|
+
|
|
315
|
+
return ""
|
|
316
|
+
|
|
317
|
+
def record_success(self, prompt_name: str, user_id: str):
|
|
318
|
+
"""Record successful use of prompt."""
|
|
319
|
+
exp_name = f"prompt_{prompt_name}"
|
|
320
|
+
variant = self.ab_manager.get_variant(exp_name, user_id)
|
|
321
|
+
if variant:
|
|
322
|
+
self.ab_manager.record_conversion(exp_name, variant.name)
|
|
323
|
+
|
|
324
|
+
def get_results(self, prompt_name: str) -> Optional[Dict]:
|
|
325
|
+
"""Get A/B test results for prompt."""
|
|
326
|
+
return self.ab_manager.get_results(f"prompt_{prompt_name}")
|
|
327
|
+
|
|
328
|
+
|
|
329
|
+
if __name__ == "__main__":
|
|
330
|
+
print("A/B Testing Framework Example\n")
|
|
331
|
+
|
|
332
|
+
# Example 1: Basic A/B test
|
|
333
|
+
print("1. Basic A/B Test")
|
|
334
|
+
manager = ABTestManager()
|
|
335
|
+
|
|
336
|
+
exp = manager.create_experiment(
|
|
337
|
+
name="model_test",
|
|
338
|
+
description="Test GPT-4 vs Claude",
|
|
339
|
+
variants=[
|
|
340
|
+
Variant("gpt4", 0.5, {"model": "gpt-4"}),
|
|
341
|
+
Variant("claude", 0.5, {"model": "claude-3"})
|
|
342
|
+
]
|
|
343
|
+
)
|
|
344
|
+
|
|
345
|
+
# Simulate usage
|
|
346
|
+
for i in range(200):
|
|
347
|
+
user_id = f"user_{i}"
|
|
348
|
+
variant = manager.get_variant("model_test", user_id)
|
|
349
|
+
|
|
350
|
+
manager.record_impression("model_test", variant.name)
|
|
351
|
+
|
|
352
|
+
# Simulate conversion
|
|
353
|
+
if random.random() < 0.3:
|
|
354
|
+
manager.record_conversion("model_test", variant.name)
|
|
355
|
+
|
|
356
|
+
results = manager.get_results("model_test")
|
|
357
|
+
print(f" Results: {results['variants'][0]['conversion_rate']:.1%} vs {results['variants'][1]['conversion_rate']:.1%}")
|
|
358
|
+
|
|
359
|
+
# Example 2: Prompt versioning
|
|
360
|
+
print("\n2. Prompt Version Test")
|
|
361
|
+
prompt_mgr = PromptVersionManager()
|
|
362
|
+
|
|
363
|
+
prompt_mgr.register("greeting", "v1", "Hello, how can I help?")
|
|
364
|
+
prompt_mgr.register("greeting", "v2", "Hi there! What can I do for you today?")
|
|
365
|
+
|
|
366
|
+
prompt_mgr.create_test(
|
|
367
|
+
"greeting",
|
|
368
|
+
versions=["v1", "v2"],
|
|
369
|
+
weights=[0.5, 0.5]
|
|
370
|
+
)
|
|
371
|
+
|
|
372
|
+
# Use it
|
|
373
|
+
for i in range(100):
|
|
374
|
+
user_id = f"user_{i}"
|
|
375
|
+
prompt = prompt_mgr.get_prompt("greeting", user_id)
|
|
376
|
+
|
|
377
|
+
# Simulate success
|
|
378
|
+
if random.random() < 0.4:
|
|
379
|
+
prompt_mgr.record_success("greeting", user_id)
|
|
380
|
+
|
|
381
|
+
results = prompt_mgr.get_results("greeting")
|
|
382
|
+
print(f" Prompt test results: {len(results['variants'])} variants tested")
|
|
383
|
+
|
|
384
|
+
print("\n[OK] A/B testing framework working")
|