cite-agent 1.0.3__py3-none-any.whl → 1.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cite-agent might be problematic. Click here for more details.
- cite_agent/__init__.py +1 -1
- cite_agent/agent_backend_only.py +30 -4
- cite_agent/cli.py +24 -26
- cite_agent/cli_conversational.py +294 -0
- cite_agent/enhanced_ai_agent.py +2776 -118
- cite_agent/streaming_ui.py +252 -0
- {cite_agent-1.0.3.dist-info → cite_agent-1.0.5.dist-info}/METADATA +4 -3
- cite_agent-1.0.5.dist-info/RECORD +50 -0
- {cite_agent-1.0.3.dist-info → cite_agent-1.0.5.dist-info}/top_level.txt +1 -0
- src/__init__.py +1 -0
- src/services/__init__.py +132 -0
- src/services/auth_service/__init__.py +3 -0
- src/services/auth_service/auth_manager.py +33 -0
- src/services/graph/__init__.py +1 -0
- src/services/graph/knowledge_graph.py +194 -0
- src/services/llm_service/__init__.py +5 -0
- src/services/llm_service/llm_manager.py +495 -0
- src/services/paper_service/__init__.py +5 -0
- src/services/paper_service/openalex.py +231 -0
- src/services/performance_service/__init__.py +1 -0
- src/services/performance_service/rust_performance.py +395 -0
- src/services/research_service/__init__.py +23 -0
- src/services/research_service/chatbot.py +2056 -0
- src/services/research_service/citation_manager.py +436 -0
- src/services/research_service/context_manager.py +1441 -0
- src/services/research_service/conversation_manager.py +597 -0
- src/services/research_service/critical_paper_detector.py +577 -0
- src/services/research_service/enhanced_research.py +121 -0
- src/services/research_service/enhanced_synthesizer.py +375 -0
- src/services/research_service/query_generator.py +777 -0
- src/services/research_service/synthesizer.py +1273 -0
- src/services/search_service/__init__.py +5 -0
- src/services/search_service/indexer.py +186 -0
- src/services/search_service/search_engine.py +342 -0
- src/services/simple_enhanced_main.py +287 -0
- cite_agent/__distribution__.py +0 -7
- cite_agent-1.0.3.dist-info/RECORD +0 -23
- {cite_agent-1.0.3.dist-info → cite_agent-1.0.5.dist-info}/WHEEL +0 -0
- {cite_agent-1.0.3.dist-info → cite_agent-1.0.5.dist-info}/entry_points.txt +0 -0
- {cite_agent-1.0.3.dist-info → cite_agent-1.0.5.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,252 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Streaming Chat UI - Cursor/Claude Style Interface
|
|
4
|
+
Minimal, clean, conversational interface for data analysis assistant
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import sys
|
|
8
|
+
import time
|
|
9
|
+
import asyncio
|
|
10
|
+
from typing import Optional, AsyncGenerator
|
|
11
|
+
from rich.console import Console
|
|
12
|
+
from rich.markdown import Markdown
|
|
13
|
+
from rich.text import Text
|
|
14
|
+
from rich.live import Live
|
|
15
|
+
from rich.spinner import Spinner
|
|
16
|
+
|
|
17
|
+
console = Console()
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class StreamingChatUI:
|
|
21
|
+
"""
|
|
22
|
+
Clean, minimal chat interface matching Cursor/Claude aesthetics
|
|
23
|
+
- Simple header (just app name)
|
|
24
|
+
- "You:" / "Agent:" conversation labels
|
|
25
|
+
- Streaming character-by-character output
|
|
26
|
+
- Transient action indicators
|
|
27
|
+
- Markdown rendering for rich text
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
def __init__(self, app_name: str = "Nocturnal Archive", working_dir: Optional[str] = None):
|
|
31
|
+
self.app_name = app_name
|
|
32
|
+
self.working_dir = working_dir
|
|
33
|
+
self.console = Console()
|
|
34
|
+
self.typing_speed = 0.015 # ~60 chars/sec
|
|
35
|
+
|
|
36
|
+
def show_header(self):
|
|
37
|
+
"""Display minimal header on startup"""
|
|
38
|
+
self.console.print(f"\n[bold cyan]{self.app_name}[/bold cyan]")
|
|
39
|
+
if self.working_dir:
|
|
40
|
+
self.console.print(f"[dim]Connected to: {self.working_dir}[/dim]")
|
|
41
|
+
self.console.print("─" * 70)
|
|
42
|
+
self.console.print()
|
|
43
|
+
|
|
44
|
+
def show_user_message(self, message: str):
|
|
45
|
+
"""Display user message with 'You:' prefix"""
|
|
46
|
+
self.console.print(f"[bold]You:[/bold] {message}")
|
|
47
|
+
self.console.print()
|
|
48
|
+
|
|
49
|
+
async def stream_agent_response(
|
|
50
|
+
self,
|
|
51
|
+
content_generator: AsyncGenerator[str, None],
|
|
52
|
+
show_markdown: bool = True
|
|
53
|
+
):
|
|
54
|
+
"""
|
|
55
|
+
Stream agent response character-by-character
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
content_generator: Async generator yielding text chunks
|
|
59
|
+
show_markdown: Whether to render as markdown (default True)
|
|
60
|
+
"""
|
|
61
|
+
# No prefix for agent - just stream naturally
|
|
62
|
+
buffer = ""
|
|
63
|
+
|
|
64
|
+
async for chunk in content_generator:
|
|
65
|
+
buffer += chunk
|
|
66
|
+
# Stream character by character for natural feel
|
|
67
|
+
for char in chunk:
|
|
68
|
+
self.console.print(char, end="", style="white")
|
|
69
|
+
await asyncio.sleep(self.typing_speed)
|
|
70
|
+
|
|
71
|
+
self.console.print() # Newline after response
|
|
72
|
+
self.console.print() # Extra space for readability
|
|
73
|
+
|
|
74
|
+
return buffer
|
|
75
|
+
|
|
76
|
+
async def stream_markdown_response(self, markdown_text: str):
|
|
77
|
+
"""
|
|
78
|
+
Stream a markdown response with proper formatting
|
|
79
|
+
Used for final rendering after streaming is complete
|
|
80
|
+
"""
|
|
81
|
+
# Render markdown with Rich
|
|
82
|
+
md = Markdown(markdown_text)
|
|
83
|
+
self.console.print(md)
|
|
84
|
+
self.console.print()
|
|
85
|
+
|
|
86
|
+
def show_action_indicator(self, action: str) -> Live:
|
|
87
|
+
"""
|
|
88
|
+
Show a transient action indicator (e.g., [reading file...])
|
|
89
|
+
Returns Live object that should be stopped when action completes
|
|
90
|
+
|
|
91
|
+
Usage:
|
|
92
|
+
indicator = ui.show_action_indicator("analyzing data")
|
|
93
|
+
# ... do work ...
|
|
94
|
+
indicator.stop()
|
|
95
|
+
"""
|
|
96
|
+
spinner = Spinner("dots", text=f"[dim]{action}[/dim]")
|
|
97
|
+
live = Live(spinner, console=self.console, transient=True)
|
|
98
|
+
live.start()
|
|
99
|
+
return live
|
|
100
|
+
|
|
101
|
+
def show_error(self, error_message: str):
|
|
102
|
+
"""Display error message"""
|
|
103
|
+
self.console.print(f"[red]Error:[/red] {error_message}")
|
|
104
|
+
self.console.print()
|
|
105
|
+
|
|
106
|
+
def show_info(self, message: str):
|
|
107
|
+
"""Display info message"""
|
|
108
|
+
self.console.print(f"[dim]{message}[/dim]")
|
|
109
|
+
self.console.print()
|
|
110
|
+
|
|
111
|
+
def show_rate_limit_message(
|
|
112
|
+
self,
|
|
113
|
+
limit_type: str = "Archive API",
|
|
114
|
+
remaining_capabilities: Optional[list] = None
|
|
115
|
+
):
|
|
116
|
+
"""
|
|
117
|
+
Show soft degradation message when rate limited
|
|
118
|
+
|
|
119
|
+
Args:
|
|
120
|
+
limit_type: What service is limited (e.g., "Archive API")
|
|
121
|
+
remaining_capabilities: List of what's still available
|
|
122
|
+
"""
|
|
123
|
+
self.console.print(
|
|
124
|
+
f"\n[yellow]I've reached the daily limit for {limit_type} queries.[/yellow]\n"
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
if remaining_capabilities:
|
|
128
|
+
self.console.print("[bold]However, I can still assist you with:[/bold]")
|
|
129
|
+
for capability in remaining_capabilities:
|
|
130
|
+
self.console.print(f" • {capability}")
|
|
131
|
+
self.console.print()
|
|
132
|
+
|
|
133
|
+
self.console.print(
|
|
134
|
+
"[dim]For unlimited access, consider upgrading to Pro.[/dim]\n"
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
def get_user_input(self, prompt: str = "You: ") -> str:
|
|
138
|
+
"""Get user input with custom prompt"""
|
|
139
|
+
try:
|
|
140
|
+
user_input = self.console.input(f"[bold]{prompt}[/bold]")
|
|
141
|
+
self.console.print()
|
|
142
|
+
return user_input.strip()
|
|
143
|
+
except (KeyboardInterrupt, EOFError):
|
|
144
|
+
self.console.print("\n[dim]Goodbye![/dim]")
|
|
145
|
+
sys.exit(0)
|
|
146
|
+
|
|
147
|
+
def clear_screen(self):
|
|
148
|
+
"""Clear terminal screen"""
|
|
149
|
+
self.console.clear()
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
# Utility functions for streaming from Groq API
|
|
153
|
+
|
|
154
|
+
async def groq_stream_to_generator(stream) -> AsyncGenerator[str, None]:
|
|
155
|
+
"""
|
|
156
|
+
Convert Groq streaming response to async generator
|
|
157
|
+
|
|
158
|
+
Args:
|
|
159
|
+
stream: Groq stream object from client.chat.completions.create(stream=True)
|
|
160
|
+
|
|
161
|
+
Yields:
|
|
162
|
+
Text chunks from the stream
|
|
163
|
+
"""
|
|
164
|
+
for chunk in stream:
|
|
165
|
+
if chunk.choices and chunk.choices[0].delta.content:
|
|
166
|
+
yield chunk.choices[0].delta.content
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
async def simulate_streaming(text: str, chunk_size: int = 5) -> AsyncGenerator[str, None]:
|
|
170
|
+
"""
|
|
171
|
+
Simulate streaming for testing purposes
|
|
172
|
+
|
|
173
|
+
Args:
|
|
174
|
+
text: Full text to stream
|
|
175
|
+
chunk_size: Characters per chunk
|
|
176
|
+
|
|
177
|
+
Yields:
|
|
178
|
+
Text chunks
|
|
179
|
+
"""
|
|
180
|
+
for i in range(0, len(text), chunk_size):
|
|
181
|
+
chunk = text[i:i + chunk_size]
|
|
182
|
+
yield chunk
|
|
183
|
+
await asyncio.sleep(0.05) # Simulate network delay
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
# Example usage
|
|
187
|
+
async def example_usage():
|
|
188
|
+
"""Example of how to use the streaming UI"""
|
|
189
|
+
|
|
190
|
+
ui = StreamingChatUI(
|
|
191
|
+
app_name="Nocturnal Archive",
|
|
192
|
+
working_dir="/home/researcher/project"
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
# Show header on startup
|
|
196
|
+
ui.show_header()
|
|
197
|
+
|
|
198
|
+
# Simulate conversation
|
|
199
|
+
ui.show_user_message("hello")
|
|
200
|
+
|
|
201
|
+
# Simulate streaming response
|
|
202
|
+
response_text = (
|
|
203
|
+
"Good evening. I'm ready to assist with your analysis. "
|
|
204
|
+
"What would you like to work on today?"
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
async def response_generator():
|
|
208
|
+
async for chunk in simulate_streaming(response_text):
|
|
209
|
+
yield chunk
|
|
210
|
+
|
|
211
|
+
await ui.stream_agent_response(response_generator())
|
|
212
|
+
|
|
213
|
+
# Get next user input
|
|
214
|
+
user_input = ui.get_user_input()
|
|
215
|
+
ui.show_user_message(user_input)
|
|
216
|
+
|
|
217
|
+
# Show action indicator
|
|
218
|
+
indicator = ui.show_action_indicator("reading file")
|
|
219
|
+
await asyncio.sleep(2) # Simulate work
|
|
220
|
+
indicator.stop()
|
|
221
|
+
|
|
222
|
+
# Stream another response with markdown
|
|
223
|
+
markdown_response = """
|
|
224
|
+
I can see you have several data files here:
|
|
225
|
+
|
|
226
|
+
• **gdp_data_2020_2024.csv** (245 KB)
|
|
227
|
+
• **unemployment_rates.xlsx** (89 KB)
|
|
228
|
+
|
|
229
|
+
Which dataset would you like me to analyze first?
|
|
230
|
+
"""
|
|
231
|
+
|
|
232
|
+
async def md_generator():
|
|
233
|
+
async for chunk in simulate_streaming(markdown_response):
|
|
234
|
+
yield chunk
|
|
235
|
+
|
|
236
|
+
await ui.stream_agent_response(md_generator())
|
|
237
|
+
|
|
238
|
+
# Show rate limit message
|
|
239
|
+
ui.show_rate_limit_message(
|
|
240
|
+
limit_type="Archive API",
|
|
241
|
+
remaining_capabilities=[
|
|
242
|
+
"Local data analysis (unlimited)",
|
|
243
|
+
"Web searches (unlimited)",
|
|
244
|
+
"Financial data (5 queries remaining)",
|
|
245
|
+
"Conversation and file reading"
|
|
246
|
+
]
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
if __name__ == "__main__":
|
|
251
|
+
# Run example
|
|
252
|
+
asyncio.run(example_usage())
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: cite-agent
|
|
3
|
-
Version: 1.0.
|
|
4
|
-
Summary: AI
|
|
3
|
+
Version: 1.0.5
|
|
4
|
+
Summary: Terminal AI assistant for academic research with citation verification
|
|
5
5
|
Home-page: https://github.com/Spectating101/cite-agent
|
|
6
6
|
Author: Cite-Agent Team
|
|
7
7
|
Author-email: contact@citeagent.dev
|
|
@@ -15,8 +15,9 @@ Classifier: Programming Language :: Python :: 3.12
|
|
|
15
15
|
Requires-Python: >=3.9
|
|
16
16
|
Description-Content-Type: text/markdown
|
|
17
17
|
License-File: LICENSE
|
|
18
|
-
Requires-Dist: requests>=2.31.0
|
|
19
18
|
Requires-Dist: aiohttp>=3.9.0
|
|
19
|
+
Requires-Dist: groq>=0.4.0
|
|
20
|
+
Requires-Dist: requests>=2.31.0
|
|
20
21
|
Requires-Dist: python-dotenv>=1.0.0
|
|
21
22
|
Requires-Dist: pydantic>=2.5.0
|
|
22
23
|
Requires-Dist: rich>=13.7.0
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
cite_agent/__init__.py,sha256=wAXV2v8nNOmIAd0rh8196ItBl9hHWBVOBl5Re4VB77I,1645
|
|
2
|
+
cite_agent/account_client.py,sha256=yLuzhIJoIZuXHXGbaVMzDxRATQwcy-wiaLnUrDuwUhI,5725
|
|
3
|
+
cite_agent/agent_backend_only.py,sha256=H4DH4hmKhT0T3rQLAb2xnnJVjxl3pOZaljL9r6JndFY,6314
|
|
4
|
+
cite_agent/ascii_plotting.py,sha256=lk8BaECs6fmjtp4iH12G09-frlRehAN7HLhHt2crers,8570
|
|
5
|
+
cite_agent/auth.py,sha256=CYBNv8r1_wfdhsx-YcWOiXCiKvPBymaMca6w7JV__FQ,9809
|
|
6
|
+
cite_agent/backend_only_client.py,sha256=WqLF8x7aXTro2Q3ehqKMsdCg53s6fNk9Hy86bGxqmmw,2561
|
|
7
|
+
cite_agent/cli.py,sha256=ddZNtv8NPWlzTsipFZ1dgbgUAK7BrpY9GJP9jGkqs_I,18704
|
|
8
|
+
cite_agent/cli_conversational.py,sha256=RAmgRNRyB8gQ8QLvWU-Tt23j2lmA34rQNT5F3_7SOq0,11141
|
|
9
|
+
cite_agent/cli_enhanced.py,sha256=EAaSw9qtiYRWUXF6_05T19GCXlz9cCSz6n41ASnXIPc,7407
|
|
10
|
+
cite_agent/dashboard.py,sha256=VGV5XQU1PnqvTsxfKMcue3j2ri_nvm9Be6O5aVays_w,10502
|
|
11
|
+
cite_agent/enhanced_ai_agent.py,sha256=uDAchU26IfGtmAhzhwsxZ_oOQTCB8za-wfPkKRdaibQ,121739
|
|
12
|
+
cite_agent/rate_limiter.py,sha256=-0fXx8Tl4zVB4O28n9ojU2weRo-FBF1cJo9Z5jC2LxQ,10908
|
|
13
|
+
cite_agent/setup_config.py,sha256=kNZNr5cZmCXr43rGWNenNJXZ1Kfz7PrdLXpAqxM7WgM,16404
|
|
14
|
+
cite_agent/streaming_ui.py,sha256=N6TWOo7GVQ_Ynfw73JCfrdGcLIU-PwbS3GbsHQHegmg,7810
|
|
15
|
+
cite_agent/telemetry.py,sha256=55kXdHvI24ZsEkbFtihcjIfJt2oiSXcEpLzTxQ3KCdQ,2916
|
|
16
|
+
cite_agent/ui.py,sha256=r1OAeY3NSeqhAjJYmEBH9CaennBuibFAz1Mur6YF80E,6134
|
|
17
|
+
cite_agent/updater.py,sha256=kL2GYL1AKoZ9JoTXxFT5_AkvYvObcCrO2sIVyBw9JgU,7057
|
|
18
|
+
cite_agent/web_search.py,sha256=j-BRhT8EBC6BEPgACQPeVwB1SVGKDz4XLM7sowacvSc,6587
|
|
19
|
+
cite_agent-1.0.5.dist-info/licenses/LICENSE,sha256=XJkyO4IymhSUniN1ENY6lLrL2729gn_rbRlFK6_Hi9M,1074
|
|
20
|
+
src/__init__.py,sha256=0eEpjRfjRjOTilP66y-AbGNslBsVYr_clE-bZUzsX7s,40
|
|
21
|
+
src/services/__init__.py,sha256=pTGLCH_84mz4nGtYMwQES5w-LzoSulUtx_uuNM6r-LA,4257
|
|
22
|
+
src/services/simple_enhanced_main.py,sha256=IJoOplCqcVUg3GvN_BRyAhpGrLm_WEPy2jmHcNCY6R0,9257
|
|
23
|
+
src/services/auth_service/__init__.py,sha256=VVFfBUr_GMJuxVH_553D2PZmZ9vhHeab9_qiJEf-g6Q,38
|
|
24
|
+
src/services/auth_service/auth_manager.py,sha256=MJdWFE36R_htoyBbjgGSTSx2Py61sTM3lhBjXBZ4Bog,873
|
|
25
|
+
src/services/graph/__init__.py,sha256=jheRQ-x652RZ68fKyUqUNGXmTAJsp5URVMhlOauFRO0,29
|
|
26
|
+
src/services/graph/knowledge_graph.py,sha256=ips2IpVpxDFkdPku4XKgZNRnoR2NjZqZk3xbIArJaaM,7348
|
|
27
|
+
src/services/llm_service/__init__.py,sha256=eNAsQpJtVXpJENb-gHtpKzWpncnHHAMB05EI48wrugQ,122
|
|
28
|
+
src/services/llm_service/llm_manager.py,sha256=6o5KN-3wJ0hT8PS9hPMpTGS6G9SlleSzYsXZQRjj_vI,21027
|
|
29
|
+
src/services/paper_service/__init__.py,sha256=0ONhTf_3H81l5y6EqHMRZd5dCXLAXDa-gbYwge84zKA,142
|
|
30
|
+
src/services/paper_service/openalex.py,sha256=pPhPcHMK2gQJCUVPB4ujE8xya0UqUvfcN95cy5ooP68,8801
|
|
31
|
+
src/services/performance_service/__init__.py,sha256=48bYfW4pzf-FG9644kTnNwGyD1tJJ7tVn3cD3r_ZAbk,65
|
|
32
|
+
src/services/performance_service/rust_performance.py,sha256=n-FzJ98XslmpUAkmmuaunYDTPz-9ZY-qL4oWAoBAaoA,15558
|
|
33
|
+
src/services/research_service/__init__.py,sha256=ZCBzSUdstHqwMmJ1x0kJK4PkRlv9OrSOEFeQFoVM-7M,813
|
|
34
|
+
src/services/research_service/chatbot.py,sha256=12pVAoe_fd2RXi6_cP-fxfRnWyJStsyn8znVu5cy9qo,91153
|
|
35
|
+
src/services/research_service/citation_manager.py,sha256=vzyVivBS0_9IiFE-wOH9hiLiC-fpHmiaZpR1084DenE,16586
|
|
36
|
+
src/services/research_service/context_manager.py,sha256=FGbeylLWKvgoA5fElyiqg5IhnMBIZ-t3w0oDHN4Zy1E,61332
|
|
37
|
+
src/services/research_service/conversation_manager.py,sha256=-rdzURzu-SiqozyeQLid5a5lS-KzIqGDozdE8BG-DTs,22854
|
|
38
|
+
src/services/research_service/critical_paper_detector.py,sha256=gc3oZHB8RqDhxFqJx21NoKLcHmmqHXRo0eXY-AL5KSc,21941
|
|
39
|
+
src/services/research_service/enhanced_research.py,sha256=5B8zZjJ2iSLEgnjfyDKow5x_MLRANLJdMbLmmPR5Lc0,4268
|
|
40
|
+
src/services/research_service/enhanced_synthesizer.py,sha256=puJg2C10KXryCMPkec-chC4rxbIJdFFswo7w4rbaXkc,16603
|
|
41
|
+
src/services/research_service/query_generator.py,sha256=LcFTGsewE6l2LRgUI2E6fXAcpy4vaYaUFFfZhI_WlYU,30707
|
|
42
|
+
src/services/research_service/synthesizer.py,sha256=lCcu37PWhWVNphHKaJJDIC-JQ5OINAN7OJ7iV9BWAvM,52557
|
|
43
|
+
src/services/search_service/__init__.py,sha256=UZFXdd7r6wietQ2kESXEyGffdfBbpghquecQde7auF4,137
|
|
44
|
+
src/services/search_service/indexer.py,sha256=u3-uwdAfmahWWsdebDF9i8XIyp7YtUMIHzlmBLBnPPM,7252
|
|
45
|
+
src/services/search_service/search_engine.py,sha256=S9HqQ_mk-8W4d4MUOgBbEGQGV29-eSuceSFvVb4Xk-k,12500
|
|
46
|
+
cite_agent-1.0.5.dist-info/METADATA,sha256=MuIZ15aNJBnahJSKtmW6kqNHIZVUVtCb_XLjz4Voc0w,6904
|
|
47
|
+
cite_agent-1.0.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
48
|
+
cite_agent-1.0.5.dist-info/entry_points.txt,sha256=bJ0u28nFIxQKH1PWQ2ak4PV-FAjhoxTC7YADEdDenFw,83
|
|
49
|
+
cite_agent-1.0.5.dist-info/top_level.txt,sha256=TgOFqJTIy8vDZuOoYA2QgagkqZtfhM5Acvt_IsWzAKo,15
|
|
50
|
+
cite_agent-1.0.5.dist-info/RECORD,,
|
src/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Project root package initializer."""
|
src/services/__init__.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Main services package for AI services layer
|
|
3
|
+
Provides unified access to all service components
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
# Import all major service classes for easy access
|
|
7
|
+
from .llm_service.llm_manager import LLMManager
|
|
8
|
+
from .research_service.enhanced_research import EnhancedResearchService
|
|
9
|
+
from .context_manager.advanced_context import AdvancedContextManager
|
|
10
|
+
from .tool_framework.tool_manager import ToolManager
|
|
11
|
+
from .auth_service.auth_manager import auth_manager
|
|
12
|
+
|
|
13
|
+
# Service registry for dependency injection
|
|
14
|
+
SERVICE_REGISTRY = {}
|
|
15
|
+
|
|
16
|
+
def register_service(name: str, service_instance):
|
|
17
|
+
"""Register a service instance in the global registry"""
|
|
18
|
+
SERVICE_REGISTRY[name] = service_instance
|
|
19
|
+
|
|
20
|
+
def get_service(name: str):
|
|
21
|
+
"""Get a service instance from the registry"""
|
|
22
|
+
return SERVICE_REGISTRY.get(name)
|
|
23
|
+
|
|
24
|
+
def initialize_services(config: dict = None):
|
|
25
|
+
"""Initialize all core services with configuration"""
|
|
26
|
+
config = config or {}
|
|
27
|
+
|
|
28
|
+
# Initialize services (with minimal config for testing)
|
|
29
|
+
services = {}
|
|
30
|
+
|
|
31
|
+
try:
|
|
32
|
+
# LLM Manager (needs redis_url but we'll handle gracefully)
|
|
33
|
+
redis_url = config.get('redis_url', 'redis://localhost:6379')
|
|
34
|
+
llm_manager = LLMManager(redis_url=redis_url)
|
|
35
|
+
services['llm_manager'] = llm_manager
|
|
36
|
+
register_service('llm_manager', llm_manager)
|
|
37
|
+
except Exception as e:
|
|
38
|
+
# Graceful fallback for testing
|
|
39
|
+
print(f"LLM Manager initialization skipped: {e}")
|
|
40
|
+
|
|
41
|
+
try:
|
|
42
|
+
# Research Service
|
|
43
|
+
research_service = EnhancedResearchService()
|
|
44
|
+
services['research_service'] = research_service
|
|
45
|
+
register_service('research_service', research_service)
|
|
46
|
+
except Exception as e:
|
|
47
|
+
print(f"Research Service initialization skipped: {e}")
|
|
48
|
+
|
|
49
|
+
try:
|
|
50
|
+
# Context Manager
|
|
51
|
+
context_manager = AdvancedContextManager()
|
|
52
|
+
services['context_manager'] = context_manager
|
|
53
|
+
register_service('context_manager', context_manager)
|
|
54
|
+
except Exception as e:
|
|
55
|
+
print(f"Context Manager initialization skipped: {e}")
|
|
56
|
+
|
|
57
|
+
try:
|
|
58
|
+
# Tool Manager
|
|
59
|
+
tool_manager = ToolManager()
|
|
60
|
+
services['tool_manager'] = tool_manager
|
|
61
|
+
register_service('tool_manager', tool_manager)
|
|
62
|
+
except Exception as e:
|
|
63
|
+
print(f"Tool Manager initialization skipped: {e}")
|
|
64
|
+
|
|
65
|
+
return services
|
|
66
|
+
|
|
67
|
+
class ServiceLayer:
|
|
68
|
+
"""Unified service layer for easy access to all AI services"""
|
|
69
|
+
|
|
70
|
+
def __init__(self, config: dict = None):
|
|
71
|
+
self.config = config or {}
|
|
72
|
+
self.services = {}
|
|
73
|
+
self._initialized = False
|
|
74
|
+
|
|
75
|
+
def initialize(self):
|
|
76
|
+
"""Initialize all services"""
|
|
77
|
+
if self._initialized:
|
|
78
|
+
return
|
|
79
|
+
|
|
80
|
+
self.services = initialize_services(self.config)
|
|
81
|
+
self._initialized = True
|
|
82
|
+
|
|
83
|
+
@property
|
|
84
|
+
def llm_manager(self) -> LLMManager:
|
|
85
|
+
"""Get LLM Manager service"""
|
|
86
|
+
return self.services.get('llm_manager')
|
|
87
|
+
|
|
88
|
+
@property
|
|
89
|
+
def research_service(self) -> EnhancedResearchService:
|
|
90
|
+
"""Get Research service"""
|
|
91
|
+
return self.services.get('research_service')
|
|
92
|
+
|
|
93
|
+
@property
|
|
94
|
+
def context_manager(self) -> AdvancedContextManager:
|
|
95
|
+
"""Get Context Manager service"""
|
|
96
|
+
return self.services.get('context_manager')
|
|
97
|
+
|
|
98
|
+
@property
|
|
99
|
+
def tool_manager(self) -> ToolManager:
|
|
100
|
+
"""Get Tool Manager service"""
|
|
101
|
+
return self.services.get('tool_manager')
|
|
102
|
+
|
|
103
|
+
def get_health_status(self) -> dict:
|
|
104
|
+
"""Get health status of all services"""
|
|
105
|
+
status = {
|
|
106
|
+
"services_initialized": self._initialized,
|
|
107
|
+
"total_services": len(self.services),
|
|
108
|
+
"available_services": list(self.services.keys())
|
|
109
|
+
}
|
|
110
|
+
return status
|
|
111
|
+
|
|
112
|
+
# Global service layer instance
|
|
113
|
+
_service_layer = None
|
|
114
|
+
|
|
115
|
+
def get_service_layer(config: dict = None) -> ServiceLayer:
|
|
116
|
+
"""Get the global service layer instance"""
|
|
117
|
+
global _service_layer
|
|
118
|
+
if _service_layer is None:
|
|
119
|
+
_service_layer = ServiceLayer(config)
|
|
120
|
+
return _service_layer
|
|
121
|
+
|
|
122
|
+
# Export key classes and functions
|
|
123
|
+
__all__ = [
|
|
124
|
+
'LLMManager',
|
|
125
|
+
'EnhancedResearchService',
|
|
126
|
+
'AdvancedContextManager',
|
|
127
|
+
'ToolManager',
|
|
128
|
+
'ServiceLayer',
|
|
129
|
+
'get_service_layer',
|
|
130
|
+
'initialize_services',
|
|
131
|
+
'auth_manager'
|
|
132
|
+
]
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Basic authentication manager for testing
|
|
3
|
+
"""
|
|
4
|
+
from typing import Dict, Any, Optional
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class AuthManager:
|
|
8
|
+
"""Basic auth manager for testing purposes"""
|
|
9
|
+
|
|
10
|
+
def __init__(self):
|
|
11
|
+
self.test_user = {
|
|
12
|
+
"id": "test_user_123",
|
|
13
|
+
"username": "test_user",
|
|
14
|
+
"email": "test@example.com"
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
async def get_current_user(self) -> Dict[str, Any]:
|
|
18
|
+
"""Return test user for testing"""
|
|
19
|
+
return self.test_user
|
|
20
|
+
|
|
21
|
+
def verify_token(self, token: str) -> Optional[Dict[str, Any]]:
|
|
22
|
+
"""Verify token (test implementation)"""
|
|
23
|
+
if token == "test_token":
|
|
24
|
+
return self.test_user
|
|
25
|
+
return None
|
|
26
|
+
|
|
27
|
+
def create_token(self, user_data: Dict[str, Any]) -> str:
|
|
28
|
+
"""Create token (test implementation)"""
|
|
29
|
+
return "test_token"
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
# Global instance
|
|
33
|
+
auth_manager = AuthManager()
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Graph service package."""
|