aipa-cli 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aipa_cli-0.1.0.dist-info/METADATA +69 -0
- aipa_cli-0.1.0.dist-info/RECORD +28 -0
- aipa_cli-0.1.0.dist-info/WHEEL +4 -0
- aipa_cli-0.1.0.dist-info/entry_points.txt +2 -0
- aipa_cli-0.1.0.dist-info/licenses/LICENSE +21 -0
- aipriceaction_terminal/__init__.py +3 -0
- aipriceaction_terminal/__main__.py +3 -0
- aipriceaction_terminal/actions.py +73 -0
- aipriceaction_terminal/agents/__init__.py +20 -0
- aipriceaction_terminal/agents/agent.py +175 -0
- aipriceaction_terminal/agents/callbacks.py +202 -0
- aipriceaction_terminal/agents/config.py +35 -0
- aipriceaction_terminal/agents/personas.py +175 -0
- aipriceaction_terminal/agents/tools.py +152 -0
- aipriceaction_terminal/app.py +97 -0
- aipriceaction_terminal/bindings.py +25 -0
- aipriceaction_terminal/chat.py +345 -0
- aipriceaction_terminal/cli.py +54 -0
- aipriceaction_terminal/cli_commands.py +51 -0
- aipriceaction_terminal/settings_tab.py +76 -0
- aipriceaction_terminal/theme.py +29 -0
- aipriceaction_terminal/ticker_data.py +33 -0
- aipriceaction_terminal/user_settings.py +27 -0
- aipriceaction_terminal/utils.py +29 -0
- aipriceaction_terminal/widgets/__init__.py +6 -0
- aipriceaction_terminal/widgets/chat_input.py +179 -0
- aipriceaction_terminal/widgets/ticker_select.py +168 -0
- aipriceaction_terminal/workflows.py +172 -0
|
@@ -0,0 +1,345 @@
|
|
|
1
|
+
"""Chat tab: message history + input + slash commands."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
from textual import work
|
|
8
|
+
from textual.widgets import RichLog, Input, Static
|
|
9
|
+
from textual.containers import Vertical, VerticalScroll
|
|
10
|
+
from textual.screen import Screen
|
|
11
|
+
|
|
12
|
+
from .widgets import ChatInput
|
|
13
|
+
from .utils import write_context_result, write_error, write_export_result
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class ThinkingModal(Screen[None]):
|
|
17
|
+
"""Modal overlay showing full thinking text."""
|
|
18
|
+
|
|
19
|
+
BINDINGS = [("ctrl+o", "close")]
|
|
20
|
+
|
|
21
|
+
DEFAULT_CSS = """
|
|
22
|
+
ThinkingModal {
|
|
23
|
+
align: center middle;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
#thinking-dialog {
|
|
27
|
+
width: 90%;
|
|
28
|
+
height: 80%;
|
|
29
|
+
max-width: 160;
|
|
30
|
+
border: thick $accent;
|
|
31
|
+
background: $surface;
|
|
32
|
+
padding: 0;
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
#thinking-title {
|
|
36
|
+
height: 3;
|
|
37
|
+
width: 100%;
|
|
38
|
+
content-align: center middle;
|
|
39
|
+
color: $text;
|
|
40
|
+
text-style: bold;
|
|
41
|
+
border-bottom: solid $accent;
|
|
42
|
+
padding: 0 1;
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
#thinking-scroll {
|
|
46
|
+
width: 100%;
|
|
47
|
+
height: 1fr;
|
|
48
|
+
padding: 1;
|
|
49
|
+
color: $text-muted;
|
|
50
|
+
}
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
def __init__(self, history: list[tuple[str, str]]) -> None:
|
|
54
|
+
super().__init__()
|
|
55
|
+
self._history = history
|
|
56
|
+
|
|
57
|
+
def compose(self):
|
|
58
|
+
count = len(self._history)
|
|
59
|
+
with Vertical(id="thinking-dialog"):
|
|
60
|
+
yield Static(
|
|
61
|
+
f"Thinking ({count} {'entry' if count == 1 else 'entries'}) (Esc / Ctrl+O to close)",
|
|
62
|
+
id="thinking-title",
|
|
63
|
+
)
|
|
64
|
+
with VerticalScroll(id="thinking-scroll"):
|
|
65
|
+
for i, (ts, text) in enumerate(self._history):
|
|
66
|
+
if i > 0:
|
|
67
|
+
yield Static("─" * 60, classes="thinking-separator")
|
|
68
|
+
yield Static(f"[bold]{ts}[/bold]", classes="thinking-ts")
|
|
69
|
+
yield Static(text, classes="thinking-text")
|
|
70
|
+
|
|
71
|
+
def on_mount(self) -> None:
|
|
72
|
+
scroll = self.query_one("#thinking-scroll", VerticalScroll)
|
|
73
|
+
scroll.can_focus = True
|
|
74
|
+
scroll.focus()
|
|
75
|
+
|
|
76
|
+
def action_close(self) -> None:
|
|
77
|
+
self.app.pop_screen()
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class ChatTab(Vertical):
|
|
81
|
+
"""Chat interface for AI ticker analysis."""
|
|
82
|
+
|
|
83
|
+
BINDINGS = [("ctrl+o", "show_thinking", "Thinking")]
|
|
84
|
+
|
|
85
|
+
DEFAULT_CSS = """
|
|
86
|
+
ChatTab {
|
|
87
|
+
height: 100%;
|
|
88
|
+
}
|
|
89
|
+
#chat-log {
|
|
90
|
+
height: 1fr;
|
|
91
|
+
border: solid $accent;
|
|
92
|
+
padding: 1;
|
|
93
|
+
overflow-x: hidden;
|
|
94
|
+
}
|
|
95
|
+
#thinking-area {
|
|
96
|
+
height: 3;
|
|
97
|
+
border: solid $accent;
|
|
98
|
+
padding: 0 1;
|
|
99
|
+
overflow-y: hidden;
|
|
100
|
+
color: $text-muted;
|
|
101
|
+
text-style: italic;
|
|
102
|
+
}
|
|
103
|
+
#thinking-area.hidden {
|
|
104
|
+
display: none;
|
|
105
|
+
}
|
|
106
|
+
#chat-input {
|
|
107
|
+
height: 3;
|
|
108
|
+
}
|
|
109
|
+
"""
|
|
110
|
+
|
|
111
|
+
def compose(self):
|
|
112
|
+
yield RichLog(id="chat-log", highlight=True, markup=True, wrap=True, min_width=1)
|
|
113
|
+
yield Static(id="thinking-area", classes="hidden")
|
|
114
|
+
yield ChatInput(id="chat-input")
|
|
115
|
+
|
|
116
|
+
def __init__(self, **kwargs):
|
|
117
|
+
super().__init__(**kwargs)
|
|
118
|
+
self._thinking_history: list[tuple[str, str]] = []
|
|
119
|
+
|
|
120
|
+
def on_mount(self) -> None:
|
|
121
|
+
log = self.query_one("#chat-log", RichLog)
|
|
122
|
+
log.can_focus = False
|
|
123
|
+
log.write(
|
|
124
|
+
"[bold cyan]AIPriceAction Terminal[/bold cyan]\n"
|
|
125
|
+
"Type [bold]/help[/bold] for available commands.\n"
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
def _show_thinking_area(self, text: str) -> None:
|
|
129
|
+
"""Show the thinking area with truncated text."""
|
|
130
|
+
area = self.query_one("#thinking-area", Static)
|
|
131
|
+
area.remove_class("hidden")
|
|
132
|
+
truncated = text[-200:] if len(text) > 200 else text
|
|
133
|
+
area.update(truncated)
|
|
134
|
+
|
|
135
|
+
def _hide_thinking_area(self) -> None:
|
|
136
|
+
"""Hide the thinking area and clear its content."""
|
|
137
|
+
area = self.query_one("#thinking-area", Static)
|
|
138
|
+
area.add_class("hidden")
|
|
139
|
+
area.update("")
|
|
140
|
+
|
|
141
|
+
def _store_thinking(self, text: str) -> None:
|
|
142
|
+
"""Store complete thinking text with timestamp for later viewing with Ctrl+O."""
|
|
143
|
+
ts = datetime.now().strftime("%H:%M:%S")
|
|
144
|
+
self._thinking_history.append((ts, text))
|
|
145
|
+
|
|
146
|
+
def action_show_thinking(self) -> None:
|
|
147
|
+
"""Push a modal with full thinking history, or pop if already showing."""
|
|
148
|
+
if isinstance(self.app.screen_stack[-1], ThinkingModal):
|
|
149
|
+
self.app.pop_screen()
|
|
150
|
+
return
|
|
151
|
+
if self._thinking_history:
|
|
152
|
+
self.app.push_screen(ThinkingModal(self._thinking_history))
|
|
153
|
+
|
|
154
|
+
def on_input_submitted(self, event: Input.Submitted) -> None:
|
|
155
|
+
text = event.value.strip()
|
|
156
|
+
if not text:
|
|
157
|
+
return
|
|
158
|
+
event.input.value = ""
|
|
159
|
+
|
|
160
|
+
# Save to history via ChatInput
|
|
161
|
+
chat_input = self.query_one("#chat-input", ChatInput)
|
|
162
|
+
chat_input.push_history(text)
|
|
163
|
+
|
|
164
|
+
log = self.query_one("#chat-log", RichLog)
|
|
165
|
+
|
|
166
|
+
if text.startswith("/"):
|
|
167
|
+
self._handle_slash_command(text)
|
|
168
|
+
else:
|
|
169
|
+
log.write(f"[bold cyan]You:[/bold cyan] {text}")
|
|
170
|
+
self._run_agent_chat(text)
|
|
171
|
+
|
|
172
|
+
def _handle_slash_command(self, text: str) -> None:
|
|
173
|
+
parts = text.split(maxsplit=2)
|
|
174
|
+
cmd = parts[0].lower()
|
|
175
|
+
arg = parts[1] if len(parts) > 1 else None
|
|
176
|
+
|
|
177
|
+
log = self.query_one("#chat-log", RichLog)
|
|
178
|
+
|
|
179
|
+
if cmd == "/help":
|
|
180
|
+
log.write(
|
|
181
|
+
"[bold yellow]Available commands:[/bold yellow]\n"
|
|
182
|
+
" /analyze <ticker> [interval] - Build AI context (e.g. /analyze VIC or /analyze STB 1h)\n"
|
|
183
|
+
" /export <ticker> [tickers...] [--interval 1D] [--path ~/dir/]\n"
|
|
184
|
+
" - Export AI context to markdown file\n"
|
|
185
|
+
" /deep-research [q] - Multi-agent deep research (not yet implemented)\n"
|
|
186
|
+
" /exit - Quit the application\n"
|
|
187
|
+
" /help - Show this help message\n"
|
|
188
|
+
" /clear - Clear chat history\n"
|
|
189
|
+
)
|
|
190
|
+
elif cmd == "/clear":
|
|
191
|
+
log.clear()
|
|
192
|
+
self.app.agent.clear_history()
|
|
193
|
+
elif cmd == "/exit":
|
|
194
|
+
self.app.exit()
|
|
195
|
+
elif cmd == "/analyze":
|
|
196
|
+
if not arg:
|
|
197
|
+
log.write("[bold red]Usage: /analyze <ticker> [interval][/bold red] (e.g. /analyze VIC or /analyze STB 1h)")
|
|
198
|
+
return
|
|
199
|
+
interval = parts[2] if len(parts) > 2 else self.app.interval
|
|
200
|
+
ticker = arg
|
|
201
|
+
log.write(f"[bold cyan]You:[/bold cyan] /analyze {ticker} {interval}")
|
|
202
|
+
log.write("[dim]Building context...[/dim]")
|
|
203
|
+
self._run_analyze(ticker, interval)
|
|
204
|
+
elif cmd == "/deep-research":
|
|
205
|
+
question = " ".join(parts[1:]) if len(parts) > 1 else ""
|
|
206
|
+
log.write("[bold cyan]You:[/bold cyan] /deep-research" + (f" {question}" if question else ""))
|
|
207
|
+
log.write(
|
|
208
|
+
"[bold yellow]Deep research is not yet implemented.[/bold yellow]\n"
|
|
209
|
+
"[dim]This will eventually run the multi-agent LangGraph pipeline "
|
|
210
|
+
"(supervisor -> parallel workers -> aggregator -> reviewer).[/dim]\n"
|
|
211
|
+
)
|
|
212
|
+
elif cmd == "/export":
|
|
213
|
+
args = text.split()[1:] # skip /export
|
|
214
|
+
tickers: list[str] = []
|
|
215
|
+
interval: str | None = None
|
|
216
|
+
out_path: str | None = None
|
|
217
|
+
i = 0
|
|
218
|
+
while i < len(args):
|
|
219
|
+
if args[i] == "--interval" and i + 1 < len(args):
|
|
220
|
+
interval = args[i + 1]
|
|
221
|
+
i += 2
|
|
222
|
+
elif args[i] == "--path" and i + 1 < len(args):
|
|
223
|
+
out_path = args[i + 1]
|
|
224
|
+
i += 2
|
|
225
|
+
else:
|
|
226
|
+
tickers.append(args[i])
|
|
227
|
+
i += 1
|
|
228
|
+
if not tickers:
|
|
229
|
+
log.write(
|
|
230
|
+
"[bold red]Usage: /export <ticker> [tickers...] "
|
|
231
|
+
"[--interval 1D] [--path ~/dir/][/bold red]"
|
|
232
|
+
)
|
|
233
|
+
return
|
|
234
|
+
interval = interval or self.app.interval
|
|
235
|
+
export_dir = Path(out_path).expanduser() if out_path else Path("~/aipriceaction-exports").expanduser()
|
|
236
|
+
ticker_label = "_".join(tickers)
|
|
237
|
+
log.write(f"[bold cyan]You:[/bold cyan] /export {ticker_label} --interval {interval}")
|
|
238
|
+
log.write("[dim]Building context and exporting...[/dim]")
|
|
239
|
+
self._run_export(tickers, interval, export_dir)
|
|
240
|
+
else:
|
|
241
|
+
log.write(f"[bold red]Unknown command:[/bold red] {cmd}")
|
|
242
|
+
|
|
243
|
+
@work(exclusive=True)
|
|
244
|
+
async def _run_analyze(self, ticker: str, interval: str) -> None:
|
|
245
|
+
"""Build AI context for a ticker in a background worker."""
|
|
246
|
+
try:
|
|
247
|
+
builder = self.app.builder
|
|
248
|
+
|
|
249
|
+
context = await asyncio.to_thread(
|
|
250
|
+
builder.build, ticker=ticker, interval=interval
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
log = self.query_one("#chat-log", RichLog)
|
|
254
|
+
write_context_result(log, ticker, interval, context)
|
|
255
|
+
except Exception as e:
|
|
256
|
+
log = self.query_one("#chat-log", RichLog)
|
|
257
|
+
write_error(log, e)
|
|
258
|
+
|
|
259
|
+
@work(exclusive=True)
|
|
260
|
+
async def _run_export(
|
|
261
|
+
self, tickers: list[str], interval: str, export_dir: Path
|
|
262
|
+
) -> None:
|
|
263
|
+
"""Build AI context and export to markdown file."""
|
|
264
|
+
try:
|
|
265
|
+
builder = self.app.builder
|
|
266
|
+
|
|
267
|
+
if len(tickers) == 1:
|
|
268
|
+
context = await asyncio.to_thread(
|
|
269
|
+
builder.build, ticker=tickers[0], interval=interval
|
|
270
|
+
)
|
|
271
|
+
else:
|
|
272
|
+
context = await asyncio.to_thread(
|
|
273
|
+
builder.build, tickers=tickers, interval=interval
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
export_dir.mkdir(parents=True, exist_ok=True)
|
|
277
|
+
ticker_label = "_".join(tickers)
|
|
278
|
+
date_str = datetime.now().strftime("%Y-%m-%d")
|
|
279
|
+
filename = f"{ticker_label}_{interval}_{date_str}.md"
|
|
280
|
+
filepath = export_dir / filename
|
|
281
|
+
|
|
282
|
+
await asyncio.to_thread(filepath.write_text, context, encoding="utf-8")
|
|
283
|
+
|
|
284
|
+
log = self.query_one("#chat-log", RichLog)
|
|
285
|
+
write_export_result(log, str(filepath), len(context))
|
|
286
|
+
except Exception as e:
|
|
287
|
+
log = self.query_one("#chat-log", RichLog)
|
|
288
|
+
write_error(log, e)
|
|
289
|
+
|
|
290
|
+
@work(exclusive=True)
|
|
291
|
+
async def _run_agent_chat(self, message: str) -> None:
|
|
292
|
+
"""Stream an agent response into the chat log."""
|
|
293
|
+
log = self.query_one("#chat-log", RichLog)
|
|
294
|
+
try:
|
|
295
|
+
from .agents.callbacks import StreamEventType
|
|
296
|
+
buffer: list[str] = []
|
|
297
|
+
thinking_buf: list[str] = []
|
|
298
|
+
|
|
299
|
+
def flush() -> None:
|
|
300
|
+
"""Write buffered tokens as a single line to the RichLog."""
|
|
301
|
+
if buffer:
|
|
302
|
+
log.write("".join(buffer))
|
|
303
|
+
buffer.clear()
|
|
304
|
+
|
|
305
|
+
def collapse_thinking() -> None:
|
|
306
|
+
"""Collapse thinking area: write summary to log, store text."""
|
|
307
|
+
if thinking_buf:
|
|
308
|
+
text = "".join(thinking_buf)
|
|
309
|
+
thinking_buf.clear()
|
|
310
|
+
# Skip trivial fragments (e.g. trailing "." from the model)
|
|
311
|
+
if len(text.strip()) <= 1:
|
|
312
|
+
self._hide_thinking_area()
|
|
313
|
+
return
|
|
314
|
+
self._store_thinking(text)
|
|
315
|
+
self._hide_thinking_area()
|
|
316
|
+
log.write(f"[dim]Thought for {len(text)} chars (Ctrl+O to view)[/dim]")
|
|
317
|
+
|
|
318
|
+
async for event in self.app.agent.stream(message):
|
|
319
|
+
if event.type == StreamEventType.THINKING:
|
|
320
|
+
thinking_buf.append(event.content)
|
|
321
|
+
self._show_thinking_area("".join(thinking_buf))
|
|
322
|
+
|
|
323
|
+
elif event.type == StreamEventType.TOKEN:
|
|
324
|
+
if thinking_buf:
|
|
325
|
+
collapse_thinking()
|
|
326
|
+
buffer.append(event.content)
|
|
327
|
+
if "\n" in event.content:
|
|
328
|
+
flush()
|
|
329
|
+
|
|
330
|
+
elif event.type == StreamEventType.DONE:
|
|
331
|
+
if thinking_buf:
|
|
332
|
+
collapse_thinking()
|
|
333
|
+
flush()
|
|
334
|
+
log.write("")
|
|
335
|
+
|
|
336
|
+
else:
|
|
337
|
+
flush()
|
|
338
|
+
if event.type == StreamEventType.TOOL_CALL_START:
|
|
339
|
+
log.write(f"[dim italic]{event.content}[/dim italic]")
|
|
340
|
+
elif event.type == StreamEventType.TOOL_RESULT:
|
|
341
|
+
log.write(f"[dim]{event.content}[/dim]")
|
|
342
|
+
elif event.type == StreamEventType.ERROR:
|
|
343
|
+
log.write(f"[bold red]{event.content}[/bold red]")
|
|
344
|
+
except Exception as e:
|
|
345
|
+
log.write(f"[bold red]Agent error: {e}[/bold red]\n")
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
"""CLI dispatcher: routes to TUI or subcommands."""
|
|
2
|
+
|
|
3
|
+
import argparse
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def run():
|
|
7
|
+
parser = argparse.ArgumentParser(prog="aipa", description="AIPriceAction terminal")
|
|
8
|
+
sub = parser.add_subparsers(dest="command")
|
|
9
|
+
|
|
10
|
+
# aipa analyze VCB [tickers...] [--interval 1D] [--limit N]
|
|
11
|
+
# [--source vn] [--start-date] [--end-date] [--reference-ticker VNINDEX]
|
|
12
|
+
# [--lang en] [--ma-type ema]
|
|
13
|
+
p_analyze = sub.add_parser("analyze", help="Build AI context for a ticker")
|
|
14
|
+
p_analyze.add_argument("tickers", nargs="+", help="Ticker symbol(s)")
|
|
15
|
+
p_analyze.add_argument("--interval", default="1D")
|
|
16
|
+
p_analyze.add_argument("--limit", type=int, default=None)
|
|
17
|
+
p_analyze.add_argument("--source", default=None)
|
|
18
|
+
p_analyze.add_argument("--start-date", default=None)
|
|
19
|
+
p_analyze.add_argument("--end-date", default=None)
|
|
20
|
+
p_analyze.add_argument("--reference-ticker", default="VNINDEX")
|
|
21
|
+
p_analyze.add_argument("--lang", default="en", choices=["en", "vn"])
|
|
22
|
+
p_analyze.add_argument("--ma-type", default="ema", choices=["ema", "sma"])
|
|
23
|
+
|
|
24
|
+
# aipa get-ohlcv-data TICKER [--interval 1D] [--limit N]
|
|
25
|
+
# [--start-date] [--end-date] [--source] [--ma] [--ema]
|
|
26
|
+
p_ohlcv = sub.add_parser("get-ohlcv-data", help="Fetch raw OHLCV data")
|
|
27
|
+
p_ohlcv.add_argument("ticker", help="Ticker symbol")
|
|
28
|
+
p_ohlcv.add_argument("--interval", default="1D")
|
|
29
|
+
p_ohlcv.add_argument("--limit", type=int, default=None)
|
|
30
|
+
p_ohlcv.add_argument("--start-date", default=None)
|
|
31
|
+
p_ohlcv.add_argument("--end-date", default=None)
|
|
32
|
+
p_ohlcv.add_argument("--source", default=None)
|
|
33
|
+
p_ohlcv.add_argument("--ma", action="store_true", default=True)
|
|
34
|
+
p_ohlcv.add_argument("--no-ma", dest="ma", action="store_false")
|
|
35
|
+
p_ohlcv.add_argument("--ema", action="store_true", default=False)
|
|
36
|
+
|
|
37
|
+
# aipa deep-research [question]
|
|
38
|
+
p_deep = sub.add_parser("deep-research", help="Multi-agent deep research (not yet implemented)")
|
|
39
|
+
p_deep.add_argument("question", nargs="*", help="Research question")
|
|
40
|
+
|
|
41
|
+
args = parser.parse_args()
|
|
42
|
+
|
|
43
|
+
if args.command == "analyze":
|
|
44
|
+
from .cli_commands import cmd_analyze
|
|
45
|
+
cmd_analyze(args)
|
|
46
|
+
elif args.command == "get-ohlcv-data":
|
|
47
|
+
from .cli_commands import cmd_get_ohlcv
|
|
48
|
+
cmd_get_ohlcv(args)
|
|
49
|
+
elif args.command == "deep-research":
|
|
50
|
+
from .cli_commands import cmd_deep_research
|
|
51
|
+
cmd_deep_research(" ".join(args.question) if args.question else "")
|
|
52
|
+
else:
|
|
53
|
+
from .app import main
|
|
54
|
+
main()
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
"""CLI subcommand implementations — thin wrappers around SDK methods."""
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def cmd_analyze(args) -> None:
|
|
5
|
+
from aipriceaction import AIContextBuilder
|
|
6
|
+
|
|
7
|
+
builder = AIContextBuilder(lang=args.lang, ma_type=args.ma_type)
|
|
8
|
+
if len(args.tickers) == 1:
|
|
9
|
+
context = builder.build(
|
|
10
|
+
ticker=args.tickers[0],
|
|
11
|
+
interval=args.interval,
|
|
12
|
+
limit=args.limit,
|
|
13
|
+
source=args.source,
|
|
14
|
+
start_date=args.start_date,
|
|
15
|
+
end_date=args.end_date,
|
|
16
|
+
reference_ticker=args.reference_ticker,
|
|
17
|
+
include_system_prompt=False,
|
|
18
|
+
)
|
|
19
|
+
else:
|
|
20
|
+
context = builder.build(
|
|
21
|
+
tickers=args.tickers,
|
|
22
|
+
interval=args.interval,
|
|
23
|
+
limit=args.limit,
|
|
24
|
+
source=args.source,
|
|
25
|
+
start_date=args.start_date,
|
|
26
|
+
end_date=args.end_date,
|
|
27
|
+
reference_ticker=args.reference_ticker,
|
|
28
|
+
include_system_prompt=False,
|
|
29
|
+
)
|
|
30
|
+
print(context)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def cmd_get_ohlcv(args) -> None:
|
|
34
|
+
from aipriceaction import AIPriceAction
|
|
35
|
+
|
|
36
|
+
client = AIPriceAction()
|
|
37
|
+
df = client.get_ohlcv(
|
|
38
|
+
ticker=args.ticker,
|
|
39
|
+
interval=args.interval,
|
|
40
|
+
limit=args.limit,
|
|
41
|
+
start_date=args.start_date,
|
|
42
|
+
end_date=args.end_date,
|
|
43
|
+
source=args.source,
|
|
44
|
+
ma=args.ma,
|
|
45
|
+
ema=args.ema,
|
|
46
|
+
)
|
|
47
|
+
print(df.to_string(index=False))
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def cmd_deep_research(question: str) -> None:
|
|
51
|
+
print("deep-research is not yet implemented.")
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
"""Settings tab: placeholder for configuration."""
|
|
2
|
+
|
|
3
|
+
from textual.containers import Vertical, Horizontal
|
|
4
|
+
from textual.widgets import Static, Input, Button, Select
|
|
5
|
+
|
|
6
|
+
from .user_settings import save_settings
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class SettingsTab(Vertical):
|
|
10
|
+
"""Settings configuration."""
|
|
11
|
+
|
|
12
|
+
DEFAULT_CSS = """
|
|
13
|
+
SettingsTab {
|
|
14
|
+
padding: 1 2;
|
|
15
|
+
}
|
|
16
|
+
.setting-row {
|
|
17
|
+
height: auto;
|
|
18
|
+
margin-bottom: 1;
|
|
19
|
+
}
|
|
20
|
+
#apply-btn {
|
|
21
|
+
margin-top: 1;
|
|
22
|
+
}
|
|
23
|
+
.setting-label {
|
|
24
|
+
width: 12;
|
|
25
|
+
}
|
|
26
|
+
.setting-input {
|
|
27
|
+
width: 20;
|
|
28
|
+
}
|
|
29
|
+
#setting-language {
|
|
30
|
+
width: 20;
|
|
31
|
+
}
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
def compose(self):
|
|
35
|
+
yield Static("[bold]Settings[/bold] (placeholder)")
|
|
36
|
+
yield Static("")
|
|
37
|
+
with Horizontal(classes="setting-row"):
|
|
38
|
+
yield Static("Ref Ticker:", classes="setting-label")
|
|
39
|
+
yield Input(value="VNINDEX", id="setting-ticker", classes="setting-input")
|
|
40
|
+
with Horizontal(classes="setting-row"):
|
|
41
|
+
yield Static("Interval:", classes="setting-label")
|
|
42
|
+
yield Select(
|
|
43
|
+
[("1m", "1m"), ("1h", "1h"), ("1D", "1D")],
|
|
44
|
+
value="1D",
|
|
45
|
+
allow_blank=False,
|
|
46
|
+
id="setting-interval",
|
|
47
|
+
)
|
|
48
|
+
with Horizontal(classes="setting-row"):
|
|
49
|
+
yield Static("Language:", classes="setting-label")
|
|
50
|
+
yield Select(
|
|
51
|
+
[("English", "en"), ("Tiếng Việt", "vn")],
|
|
52
|
+
value="en",
|
|
53
|
+
allow_blank=False,
|
|
54
|
+
id="setting-language",
|
|
55
|
+
)
|
|
56
|
+
yield Button("Apply", id="apply-btn", variant="primary")
|
|
57
|
+
|
|
58
|
+
def on_button_pressed(self, event: Button.Pressed) -> None:
|
|
59
|
+
if event.button.id == "apply-btn":
|
|
60
|
+
ticker = self.query_one("#setting-ticker", Input).value.strip().upper()
|
|
61
|
+
interval = self.query_one("#setting-interval", Select).value
|
|
62
|
+
language = self.query_one("#setting-language", Select).value
|
|
63
|
+
|
|
64
|
+
if ticker:
|
|
65
|
+
self.app.ticker = ticker
|
|
66
|
+
if interval:
|
|
67
|
+
self.app.interval = interval
|
|
68
|
+
if language:
|
|
69
|
+
self.app.language = language
|
|
70
|
+
from aipriceaction import AIContextBuilder
|
|
71
|
+
self.app.builder = AIContextBuilder(lang=language)
|
|
72
|
+
from .agents import AgentSession, AgentConfig
|
|
73
|
+
self.app.agent = AgentSession(AgentConfig(lang=language))
|
|
74
|
+
|
|
75
|
+
self.app.notify(f"Settings applied: {ticker} / {interval} / {language}")
|
|
76
|
+
save_settings({"ticker": ticker, "interval": interval, "language": language})
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
"""Custom Dracula-inspired green theme and screen CSS."""
|
|
2
|
+
|
|
3
|
+
from textual.theme import Theme
|
|
4
|
+
|
|
5
|
+
AI_GREEN = Theme(
|
|
6
|
+
name="ai-green",
|
|
7
|
+
primary="#50fa7b",
|
|
8
|
+
secondary="#8be9fd",
|
|
9
|
+
accent="#50fa7b",
|
|
10
|
+
warning="#f1fa8c",
|
|
11
|
+
error="#ff5555",
|
|
12
|
+
success="#50fa7b",
|
|
13
|
+
background="#1a1e2e",
|
|
14
|
+
surface="#282a36",
|
|
15
|
+
panel="#44475a",
|
|
16
|
+
dark=True,
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
SCREEN_CSS = """
|
|
20
|
+
Screen {
|
|
21
|
+
layout: vertical;
|
|
22
|
+
}
|
|
23
|
+
TabbedContent {
|
|
24
|
+
height: 1fr;
|
|
25
|
+
}
|
|
26
|
+
TabbedContent TabPane {
|
|
27
|
+
height: 1fr;
|
|
28
|
+
}
|
|
29
|
+
"""
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
"""Ticker data tab: placeholder for raw OHLCV data view."""
|
|
2
|
+
|
|
3
|
+
from textual.containers import Vertical
|
|
4
|
+
from textual.widgets import Static
|
|
5
|
+
|
|
6
|
+
_MODE_LABELS = {
|
|
7
|
+
"vn": "VN Tickers",
|
|
8
|
+
"crypto": "Crypto",
|
|
9
|
+
"global": "Global",
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class TickerDataTab(Vertical):
|
|
14
|
+
"""Display raw OHLCV data for a ticker source."""
|
|
15
|
+
|
|
16
|
+
DEFAULT_CSS = """
|
|
17
|
+
TickerDataTab {
|
|
18
|
+
padding: 1 2;
|
|
19
|
+
}
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
def __init__(self, mode: str = "vn", **kwargs):
|
|
23
|
+
super().__init__(**kwargs)
|
|
24
|
+
self.mode = mode
|
|
25
|
+
|
|
26
|
+
def compose(self):
|
|
27
|
+
label = _MODE_LABELS.get(self.mode, self.mode)
|
|
28
|
+
yield Static(f"[bold]{label}[/bold]", id="td-title")
|
|
29
|
+
yield Static(
|
|
30
|
+
"[dim italic]This tab will display ticker data in a table format.\n"
|
|
31
|
+
"Not yet implemented.[/dim italic]",
|
|
32
|
+
id="td-hint",
|
|
33
|
+
)
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""Persistent user settings stored in ~/.aipriceaction/settings.json."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
_CONFIG_DIR = Path.home() / ".aipriceaction"
|
|
7
|
+
_SETTINGS_FILE = _CONFIG_DIR / "settings.json"
|
|
8
|
+
|
|
9
|
+
_DEFAULTS = {
|
|
10
|
+
"ticker": "VNINDEX",
|
|
11
|
+
"interval": "1D",
|
|
12
|
+
"language": "en",
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def load_settings() -> dict:
|
|
17
|
+
"""Load settings from disk, falling back to defaults for missing keys."""
|
|
18
|
+
if _SETTINGS_FILE.exists():
|
|
19
|
+
data = json.loads(_SETTINGS_FILE.read_text())
|
|
20
|
+
return {**_DEFAULTS, **data}
|
|
21
|
+
return dict(_DEFAULTS)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def save_settings(data: dict) -> None:
|
|
25
|
+
"""Persist settings to disk, creating the config directory if needed."""
|
|
26
|
+
_CONFIG_DIR.mkdir(parents=True, exist_ok=True)
|
|
27
|
+
_SETTINGS_FILE.write_text(json.dumps(data, indent=2))
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
"""Shared display helpers for RichLog output formatting."""
|
|
2
|
+
|
|
3
|
+
from textual.widgets import RichLog
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def write_context_result(
|
|
7
|
+
log: RichLog, ticker: str, interval: str, context: str
|
|
8
|
+
) -> None:
|
|
9
|
+
"""Write a formatted context build result to a RichLog."""
|
|
10
|
+
lines = context.split("\n")
|
|
11
|
+
log.write(
|
|
12
|
+
f"[bold green]Context built[/bold green] for [bold]{ticker}[/bold] "
|
|
13
|
+
f"({interval}) - {len(context):,} chars, {len(lines)} lines\n"
|
|
14
|
+
)
|
|
15
|
+
log.write("[dim]" + context + "[/dim]")
|
|
16
|
+
log.write("")
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def write_error(log: RichLog, error: Exception) -> None:
|
|
20
|
+
"""Write a formatted error to a RichLog."""
|
|
21
|
+
log.write(f"[bold red]Error:[/bold red] {error}")
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def write_export_result(log: RichLog, filepath: str, context_len: int) -> None:
|
|
25
|
+
"""Write a formatted export success message to a RichLog."""
|
|
26
|
+
log.write(
|
|
27
|
+
f"[bold green]Exported[/bold green] {context_len:,} chars to "
|
|
28
|
+
f"[bold]{filepath}[/bold]\n"
|
|
29
|
+
)
|