aipa-cli 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,69 @@
1
+ Metadata-Version: 2.4
2
+ Name: aipa-cli
3
+ Version: 0.1.0
4
+ Summary: Terminal TUI for AI-powered ticker analysis
5
+ Project-URL: Homepage, https://github.com/quanhua92/aipriceaction
6
+ Project-URL: Repository, https://github.com/quanhua92/aipriceaction
7
+ Author-email: Quan Hua <quanhua92@gmail.com>
8
+ License-Expression: MIT
9
+ License-File: LICENSE
10
+ Classifier: Development Status :: 3 - Alpha
11
+ Classifier: Environment :: Console :: Curses
12
+ Classifier: Intended Audience :: Financial and Insurance Industry
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.13
16
+ Classifier: Topic :: Office/Business :: Financial
17
+ Requires-Python: >=3.13
18
+ Requires-Dist: aipriceaction
19
+ Requires-Dist: langchain-core
20
+ Requires-Dist: langchain-openai
21
+ Requires-Dist: langgraph
22
+ Requires-Dist: textual-autocomplete>=4.0.6
23
+ Requires-Dist: textual>=3.0.0
24
+ Description-Content-Type: text/markdown
25
+
26
+ # AIPA Terminal
27
+
28
+ **Live site:** [aipriceaction.com](https://aipriceaction.com) | **GitHub:** [aipriceaction](https://github.com/quanhua92/aipriceaction) | **Frontend:** [aipriceaction-web](https://github.com/quanhua92/aipriceaction-web) | **Docker image:** [`quanhua92/aipriceaction:latest`](https://hub.docker.com/r/quanhua92/aipriceaction) | **Python SDK:** [`aipriceaction` on PyPI](https://pypi.org/project/aipriceaction/) | **AIPA Terminal:** [`aipa-cli` on PyPI](https://pypi.org/project/aipa-cli/)
29
+
30
+ Textual-based terminal interface for AI-powered ticker analysis. Features streaming chat with thinking/reasoning display, autocomplete, slash commands, and workflow tabs.
31
+
32
+ ## Install
33
+
34
+ ```bash
35
+ # Run directly (no install)
36
+ uvx aipa-cli
37
+
38
+ # Or install as a standalone tool
39
+ uv tool install aipa-cli
40
+ ```
41
+
42
+ ## Requirements
43
+
44
+ - Python 3.13+
45
+ - An OpenAI-compatible API key (`OPENAI_API_KEY`)
46
+ - Optional: set `OPENAI_BASE_URL` for custom providers like OpenRouter
47
+
48
+ ## Usage
49
+
50
+ ```
51
+ aipa # Launch the TUI
52
+ aipa analyze # Run ticker analysis from CLI
53
+ aipa get-ohlcv-data # Fetch OHLCV data from CLI
54
+ aipa deep-research # Run deep research from CLI
55
+ ```
56
+
57
+ ### TUI
58
+
59
+ The interface has three tabs:
60
+
61
+ - **Chat** — AI-powered chat with streaming responses, thinking/reasoning display, slash commands (`/analyze`, `/export`, `/clear`, `/exit`), and arrow-key history navigation
62
+ - **Workflows** — Structured analysis forms for ticker analysis and deep research
63
+ - **Tickers** — Browse and search available tickers
64
+
65
+ Press `Ctrl+O` in the Chat tab to view thinking/reasoning history.
66
+
67
+ ## License
68
+
69
+ MIT
@@ -0,0 +1,28 @@
1
+ aipriceaction_terminal/__init__.py,sha256=gPrefNl9a5TjwubXBnZDuydjl9V9rfRGjCWXW_APhho,94
2
+ aipriceaction_terminal/__main__.py,sha256=yQP0sf-rkTO-3MOotv4blvukvNGwbJFz10hQhMsxcJM,28
3
+ aipriceaction_terminal/actions.py,sha256=EP6GDpUplKBbiheB_qPP3UL8IgSCypnw6_bIlyHexfE,2424
4
+ aipriceaction_terminal/app.py,sha256=49YIi3OrY7tHKJ1yo-_ZPm6T0r6Ex17VJ7EFENJgcgo,3624
5
+ aipriceaction_terminal/bindings.py,sha256=R7clTVPBbFydXIxqg5ij_D1N9BWmONZ_lEM8Rd_pYPk,1053
6
+ aipriceaction_terminal/chat.py,sha256=EuWWe8uTTp7m-w6hEYFHnmD3zvAz4sgvzYxp8YOhtdI,12708
7
+ aipriceaction_terminal/cli.py,sha256=r53uCIKjLw1wqFGQZ93Z3_z-zA5EXwHHlsikevbx4bU,2484
8
+ aipriceaction_terminal/cli_commands.py,sha256=T6qFNH5rH1Jv1Uo40F1jr0IvUWNDkB3UT5kwZJNqBL0,1477
9
+ aipriceaction_terminal/settings_tab.py,sha256=6WCbc9WEdJXliy0M0mRthtTIN-fEVOxhkpqiQi-RTkg,2590
10
+ aipriceaction_terminal/theme.py,sha256=RKNunhKP30ign3_d6XB_XPGXHCpXAByOMhb4TAKH_eM,488
11
+ aipriceaction_terminal/ticker_data.py,sha256=MqnFM61QCHQdD5xc3P4-ym66WAhLzeTBAzhlWrqtsKc,835
12
+ aipriceaction_terminal/user_settings.py,sha256=Nk7V3PhvrIBM2RruWv85pR-ankwaziaVNbjt98j-pxw,776
13
+ aipriceaction_terminal/utils.py,sha256=hNfwyCVspY4UqLyBfshZ1eoFyBmpO4SCBORgLM9D0HE,958
14
+ aipriceaction_terminal/workflows.py,sha256=g2mq7zuHUFdUHe2r4lRifj6Q21tnUSd1lvvN8vqpFeg,5278
15
+ aipriceaction_terminal/agents/__init__.py,sha256=8XTDmU3oKVaq4S7VCXPgZhKV6R8tQYe6NOkK3vDwDic,555
16
+ aipriceaction_terminal/agents/agent.py,sha256=F0b2MmayxeduI_DksQ1K0r-5u3DNT2qif4gcqqIfVlc,6466
17
+ aipriceaction_terminal/agents/callbacks.py,sha256=rJDepq55WA59PXfXkIK-H-mrnAlNqSQc7qAdr8dSwDo,7995
18
+ aipriceaction_terminal/agents/config.py,sha256=kM_agIqBnZ7tjcw1UkcFudDOmW6nvh4qoHpiX3MDbtQ,891
19
+ aipriceaction_terminal/agents/personas.py,sha256=aE-KCKUVeN8Qh0dNhsnEYtiW8h-aawYjKOQGRU8l1hg,7200
20
+ aipriceaction_terminal/agents/tools.py,sha256=BME0M7bBT5ZioQE3PXcIZNGSDqjwGni5OWySkfK-bEo,4750
21
+ aipriceaction_terminal/widgets/__init__.py,sha256=l6VhmuYHLeOpihfnsRrmJApaH8dNy0sO0mlmM68J_-w,167
22
+ aipriceaction_terminal/widgets/chat_input.py,sha256=ljl9ZPH-sJXXcfPJRWT6lKbz4XKa877KEHUqQhV7wh0,6321
23
+ aipriceaction_terminal/widgets/ticker_select.py,sha256=wqnWwNuG4aQt7KfFjQ3OKyYDnusm2j2AxAULHhAUi9w,6241
24
+ aipa_cli-0.1.0.dist-info/METADATA,sha256=flYRarF2W4TncEobYbgvfnPmcnfdKa5Fi6KNxUWgvk8,2539
25
+ aipa_cli-0.1.0.dist-info/WHEEL,sha256=QccIxa26bgl1E6uMy58deGWi-0aeIkkangHcxk2kWfw,87
26
+ aipa_cli-0.1.0.dist-info/entry_points.txt,sha256=_n1i2eFfJd8t9MP6fMTkgbBrTouLGjfdKR-CU7MqNIo,57
27
+ aipa_cli-0.1.0.dist-info/licenses/LICENSE,sha256=HD6oXFCMoqYOrIATCk8Eh2vZx5IAFhe-NAxxtwIpLBs,1065
28
+ aipa_cli-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.29.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ aipa = aipriceaction_terminal.app:main
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Quan Hua
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,3 @@
1
+ """AIPriceAction Terminal - TUI chat interface for ticker analysis."""
2
+
3
+ __version__ = "0.1.0"
@@ -0,0 +1,3 @@
1
+ from .cli import run
2
+
3
+ run()
@@ -0,0 +1,73 @@
1
+ """Action handlers for AIPriceActionApp."""
2
+
3
+ import time
4
+
5
+ from textual.widgets import TabbedContent, Input, Select, Button, TextArea
6
+
7
+
8
+ class AppActions:
9
+ """Mixin providing all action_* methods for AIPriceActionApp."""
10
+
11
+ _quit_requested_at: float = 0.0
12
+
13
+ def action_switch_tab(self, tab_id: str) -> None:
14
+ tabs = self.query_one(TabbedContent)
15
+ tabs.active = tab_id
16
+
17
+ def action_focus_none(self) -> None:
18
+ """Blur any focused widget, or dismiss a modal if one is showing."""
19
+ from .chat import ThinkingModal
20
+ if isinstance(self.screen_stack[-1], ThinkingModal):
21
+ self.pop_screen()
22
+ return
23
+ self.set_focus(None)
24
+
25
+ def action_focus_first_input(self) -> None:
26
+ """Focus the first Input or Select in the active tab (respects nested tabs)."""
27
+ # Let Enter pass through to widgets that handle it themselves
28
+ if isinstance(self.focused, (Input, Select, Button, TextArea)):
29
+ return
30
+
31
+ tabs = self.query_one(TabbedContent)
32
+ active_pane = tabs.query(f"TabPane#{tabs.active}").first()
33
+ if active_pane is None:
34
+ return
35
+
36
+ # Find the innermost active TabbedContent within this pane
37
+ container = active_pane
38
+ try:
39
+ nested = active_pane.query_one("TabbedContent")
40
+ inner_pane = nested.query(f"TabPane#{nested.active}").first()
41
+ if inner_pane:
42
+ container = inner_pane
43
+ except Exception:
44
+ pass
45
+
46
+ try:
47
+ first_input = container.query(Input).first()
48
+ first_input.focus()
49
+ return
50
+ except Exception:
51
+ pass
52
+ try:
53
+ first_select = container.query(Select).first()
54
+ first_select.focus()
55
+ except Exception:
56
+ pass
57
+
58
+ def action_confirm_quit(self) -> None:
59
+ """Quit on second press within 2 seconds, otherwise show warning."""
60
+ now = time.monotonic()
61
+ if now - self._quit_requested_at < 2.0:
62
+ self.exit()
63
+ else:
64
+ self._quit_requested_at = now
65
+ self.notify("Press ctrl+q again to quit", severity="warning")
66
+
67
+ def action_show_help(self) -> None:
68
+ self.app.notify(
69
+ "1-6: Switch tabs | ctrl+q: Quit | "
70
+ "esc: Back | enter: Focus input | "
71
+ "Chat: /help for commands",
72
+ title="Keyboard Shortcuts",
73
+ )
@@ -0,0 +1,20 @@
1
+ """Agents module for AI-powered chat in the terminal TUI."""
2
+
3
+ from .agent import AgentSession
4
+ from .callbacks import StreamCallbackHandler, StreamEvent, StreamEventType
5
+ from .config import AgentConfig
6
+ from .personas import Persona, PersonaRegistry, get_default_persona
7
+ from .tools import ToolRegistry, get_default_tools
8
+
9
+ __all__ = [
10
+ "AgentSession",
11
+ "AgentConfig",
12
+ "Persona",
13
+ "PersonaRegistry",
14
+ "get_default_persona",
15
+ "ToolRegistry",
16
+ "get_default_tools",
17
+ "StreamCallbackHandler",
18
+ "StreamEvent",
19
+ "StreamEventType",
20
+ ]
@@ -0,0 +1,175 @@
1
+ """Agent session: wraps LangChain create_agent with streaming and retry."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ from collections.abc import AsyncIterator
7
+ from typing import TYPE_CHECKING, Any
8
+
9
+ from langchain.agents import create_agent
10
+ from langchain_core.messages import AIMessageChunk
11
+ from langchain_openai import ChatOpenAI
12
+ from langgraph.checkpoint.memory import MemorySaver
13
+
14
+ from .callbacks import StreamCallbackHandler, StreamEvent, StreamEventType
15
+ from .config import AgentConfig, TRANSIENT_ERROR_KEYWORDS
16
+
17
+ if TYPE_CHECKING:
18
+ from .personas import Persona
19
+ from .tools import ToolRegistry
20
+
21
+
22
+ class OpenRouterChatOpenAI(ChatOpenAI):
23
+ """ChatOpenAI subclass that preserves reasoning tokens from OpenRouter.
24
+
25
+ OpenRouter reasoning models (e.g. nvidia/nemotron-3-nano-omni-reasoning) return
26
+ a ``reasoning`` string field in the streaming delta. LangChain's default
27
+ ``_convert_delta_to_message_chunk`` ignores this field, so we override
28
+ ``_convert_chunk_to_generation_chunk`` to inject it into
29
+ ``AIMessageChunk.additional_kwargs["reasoning_content"]`` after the chunk is built.
30
+ """
31
+
32
+ def _convert_chunk_to_generation_chunk(
33
+ self,
34
+ chunk: dict[str, Any],
35
+ default_chunk_class: type,
36
+ base_generation_info: dict[str, Any] | None,
37
+ ) -> Any:
38
+ """Build the generation chunk, then inject reasoning into additional_kwargs."""
39
+ result = super()._convert_chunk_to_generation_chunk(
40
+ chunk, default_chunk_class, base_generation_info
41
+ )
42
+ if result is None:
43
+ return result
44
+
45
+ # Extract reasoning from the raw delta before LangChain discards it.
46
+ choices = chunk.get("choices", [])
47
+ if choices:
48
+ delta = choices[0].get("delta")
49
+ if isinstance(delta, dict):
50
+ reasoning = delta.get("reasoning")
51
+ if reasoning and isinstance(result.message, AIMessageChunk):
52
+ result.message.additional_kwargs["reasoning_content"] = reasoning
53
+
54
+ return result
55
+
56
+
57
+ class AgentSession:
58
+ """Manages a single agent session with memory, streaming, and retry."""
59
+
60
+ def __init__(
61
+ self,
62
+ config: AgentConfig,
63
+ persona: Persona | None = None,
64
+ tools: ToolRegistry | None = None,
65
+ ) -> None:
66
+ from .personas import get_default_persona
67
+ from .tools import get_default_tools
68
+
69
+ self.config = config
70
+ self.persona = persona or get_default_persona(config.lang)
71
+ self.tools = tools or get_default_tools(config.lang)
72
+ self._checkpointer = MemorySaver()
73
+ self._thread_id = "terminal-default"
74
+ self._agent = self._build_agent()
75
+
76
+ def _build_agent(self) -> object:
77
+ """Build the LangChain agent from current config/persona/tools."""
78
+ llm = OpenRouterChatOpenAI(
79
+ api_key=self.config.api_key,
80
+ base_url=self.config.base_url,
81
+ model=self.config.model,
82
+ extra_body={"reasoning": {"enabled": True}},
83
+ )
84
+ system_prompt = self.persona.build_system_prompt(self.config.lang)
85
+ lc_tools = self.tools.get_tools()
86
+
87
+ return create_agent(
88
+ llm,
89
+ lc_tools,
90
+ checkpointer=self._checkpointer,
91
+ system_prompt=system_prompt,
92
+ )
93
+
94
+ async def stream(
95
+ self,
96
+ message: str,
97
+ *,
98
+ callback: object | None = None,
99
+ ) -> AsyncIterator[StreamEvent]:
100
+ """Stream an agent response as StreamEvents.
101
+
102
+ Built-in retry with exponential backoff on transient errors.
103
+ """
104
+ handler = StreamCallbackHandler(
105
+ show_tool_calls=True,
106
+ show_tool_results=False,
107
+ )
108
+
109
+ last_error: Exception | None = None
110
+ for attempt in range(self.config.max_retries):
111
+ try:
112
+ input_dict = {"messages": [{"role": "user", "content": message}]}
113
+ config = {"configurable": {"thread_id": self._thread_id}}
114
+
115
+ async for lc_event in self._agent.astream(
116
+ input_dict,
117
+ config=config,
118
+ stream_mode="messages",
119
+ ):
120
+ for stream_event in handler.process_agent_event(lc_event):
121
+ if callback:
122
+ await callback(stream_event)
123
+ yield stream_event
124
+
125
+ yield StreamEvent(type=StreamEventType.DONE)
126
+ return
127
+
128
+ except Exception as e:
129
+ last_error = e
130
+ err_str = str(e).lower()
131
+ is_transient = any(kw in err_str for kw in TRANSIENT_ERROR_KEYWORDS)
132
+
133
+ if is_transient and attempt < self.config.max_retries - 1:
134
+ delay = self.config.base_retry_delay * (2 ** attempt)
135
+ delay = min(delay, self.config.max_retry_delay)
136
+ yield StreamEvent(
137
+ type=StreamEventType.ERROR,
138
+ content=f"Retry {attempt + 1}/{self.config.max_retries}: {type(e).__name__}",
139
+ )
140
+ await asyncio.sleep(delay)
141
+ else:
142
+ break
143
+
144
+ # All retries exhausted
145
+ yield StreamEvent(
146
+ type=StreamEventType.ERROR,
147
+ content=f"Error: {last_error}",
148
+ )
149
+ yield StreamEvent(type=StreamEventType.DONE)
150
+
151
+ async def run(self, message: str) -> str:
152
+ """Convenience wrapper: collect tokens and return the final answer."""
153
+ parts: list[str] = []
154
+ async for event in self.stream(message):
155
+ if event.type == StreamEventType.TOKEN:
156
+ parts.append(event.content)
157
+
158
+ # Return the longest part (final answer after potential re-generation)
159
+ return max(parts, key=len) if parts else ""
160
+
161
+ def switch_persona(self, persona: Persona) -> None:
162
+ """Switch to a different persona, clearing conversation history."""
163
+ self.persona = persona
164
+ self._checkpointer = MemorySaver()
165
+ self._agent = self._build_agent()
166
+
167
+ def clear_history(self) -> None:
168
+ """Clear conversation history (start fresh session)."""
169
+ self._checkpointer = MemorySaver()
170
+ self._agent = self._build_agent()
171
+
172
+ def rebuild(self) -> None:
173
+ """Rebuild agent after config change (language/model)."""
174
+ self._checkpointer = MemorySaver()
175
+ self._agent = self._build_agent()
@@ -0,0 +1,202 @@
1
+ """Stream events and callback handler for TUI integration."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ from collections.abc import Awaitable, Callable
7
+ from dataclasses import dataclass, field
8
+ from enum import Enum
9
+ from typing import Any
10
+
11
+
12
+ class StreamEventType(Enum):
13
+ TOKEN = "token"
14
+ THINKING = "thinking"
15
+ TOOL_CALL_START = "tool_call_start"
16
+ TOOL_RESULT = "tool_result"
17
+ ERROR = "error"
18
+ DONE = "done"
19
+
20
+
21
+ @dataclass
22
+ class StreamEvent:
23
+ """A single event from the agent stream."""
24
+
25
+ type: StreamEventType
26
+ content: str = ""
27
+ metadata: dict = field(default_factory=dict)
28
+
29
+
30
+ StreamCallback = Callable[[StreamEvent], Awaitable[None]]
31
+
32
+
33
+ def _extract_reasoning_content(message: Any) -> str:
34
+ """Extract reasoning/thinking content from an AIMessageChunk.
35
+
36
+ Checks multiple fields where different providers store reasoning tokens:
37
+ - ``additional_kwargs["reasoning_content"]`` — OpenRouter, DeepSeek, XAI, Groq
38
+ - ``content_blocks`` with ``type="reasoning"`` — Anthropic, OpenAI
39
+ """
40
+ # Check additional_kwargs first (most common for OpenAI-compatible providers)
41
+ ak = getattr(message, "additional_kwargs", None)
42
+ if ak and isinstance(ak, dict) and "reasoning_content" in ak:
43
+ content = ak["reasoning_content"]
44
+ if content:
45
+ return content
46
+
47
+ # Check content_blocks for type="reasoning"
48
+ blocks = getattr(message, "content_blocks", None)
49
+ if blocks:
50
+ for block in blocks:
51
+ if isinstance(block, dict) and block.get("type") == "reasoning":
52
+ text = block.get("reasoning", "")
53
+ if text:
54
+ return text
55
+ elif hasattr(block, "type") and block.type == "reasoning":
56
+ text = getattr(block, "reasoning", "")
57
+ if text:
58
+ return text
59
+
60
+ return ""
61
+
62
+
63
+ class StreamCallbackHandler:
64
+ """Converts LangChain/LangGraph stream events to StreamEvents for TUI rendering.
65
+
66
+ Supports ``stream_mode="messages"`` (token-by-token) and
67
+ ``stream_mode="updates"`` (batched per node).
68
+ """
69
+
70
+ def __init__(
71
+ self,
72
+ *,
73
+ show_tool_calls: bool = True,
74
+ show_tool_results: bool = False,
75
+ ) -> None:
76
+ self.show_tool_calls = show_tool_calls
77
+ self.show_tool_results = show_tool_results
78
+ # Buffer tool calls during streaming; keyed by tool_call_id.
79
+ # Values accumulate raw args string from tool_call_chunks.
80
+ self._pending_tool_calls: dict[str, dict[str, str]] = {}
81
+ # OpenAI streaming only sends `id` on the first chunk per tool call.
82
+ # Subsequent chunks carry only `index` + partial `args`.
83
+ # This maps index → tool_call_id so we can associate them.
84
+ self._index_to_id: dict[int, str] = {}
85
+
86
+ def process_agent_event(self, event: Any) -> list[StreamEvent]:
87
+ """Convert a single LangGraph stream event to StreamEvents.
88
+
89
+ ``stream_mode="messages"`` yields tuples::
90
+
91
+ (AIMessageChunk(content="Hello"), metadata_dict)
92
+
93
+ ``stream_mode="updates"`` yields ``UpdatesStreamPart`` dicts::
94
+
95
+ {"agent": {"messages": [AIMessage(...)]}}
96
+ """
97
+ events: list[StreamEvent] = []
98
+
99
+ # Handle stream_mode="messages" — yields (message, metadata) tuples
100
+ if isinstance(event, tuple) and len(event) == 2:
101
+ message, _metadata = event
102
+ return self._process_message(message)
103
+
104
+ # Handle stream_mode="updates" — yields dicts like {"agent": {"messages": [...]}}
105
+ if isinstance(event, dict):
106
+ for _node_name, update in event.items():
107
+ for msg in update.get("messages", []):
108
+ events.extend(self._process_message(msg))
109
+ return events
110
+
111
+ return events
112
+
113
+ def _process_message(self, message: Any) -> list[StreamEvent]:
114
+ """Process a single message (AIMessageChunk or ToolMessage)."""
115
+ events: list[StreamEvent] = []
116
+ msg_type = type(message).__name__
117
+
118
+ if msg_type in ("AIMessageChunk", "AIMessage"):
119
+ # Reasoning/thinking tokens
120
+ reasoning = _extract_reasoning_content(message)
121
+ if reasoning:
122
+ events.append(StreamEvent(
123
+ type=StreamEventType.THINKING,
124
+ content=reasoning,
125
+ ))
126
+
127
+ # Tool calls — accumulate raw chunks, emit parsed call when ToolMessage arrives
128
+ tool_call_chunks = getattr(message, "tool_call_chunks", None)
129
+ if tool_call_chunks:
130
+ for tc_chunk in tool_call_chunks:
131
+ _get = tc_chunk.get if isinstance(tc_chunk, dict) else lambda k, d=None: getattr(tc_chunk, k, d)
132
+ tc_id = _get("id") or ""
133
+ tc_index = _get("index")
134
+
135
+ # First chunk has id + index → record the mapping
136
+ if tc_id and tc_index is not None:
137
+ self._index_to_id[tc_index] = tc_id
138
+ # Subsequent chunks only have index → look up the id
139
+ elif not tc_id and tc_index is not None:
140
+ tc_id = self._index_to_id.get(tc_index, "")
141
+
142
+ if not tc_id:
143
+ continue
144
+ if tc_id not in self._pending_tool_calls:
145
+ self._pending_tool_calls[tc_id] = {"name": "", "args_str": ""}
146
+ entry = self._pending_tool_calls[tc_id]
147
+ chunk_name = _get("name") or ""
148
+ if chunk_name:
149
+ entry["name"] = chunk_name
150
+ chunk_args = _get("args") or ""
151
+ if chunk_args:
152
+ entry["args_str"] += chunk_args
153
+ elif getattr(message, "tool_calls", None):
154
+ # Fallback for non-streaming AIMessage (e.g. stream_mode="updates")
155
+ for tc in message.tool_calls:
156
+ tc_id = tc.get("id", "")
157
+ if not tc_id:
158
+ continue
159
+ if tc_id not in self._pending_tool_calls:
160
+ self._pending_tool_calls[tc_id] = {"name": "", "args_str": ""}
161
+ entry = self._pending_tool_calls[tc_id]
162
+ if tc.get("name"):
163
+ entry["name"] = tc["name"]
164
+ if isinstance(tc.get("args"), dict):
165
+ entry["args_str"] = json.dumps(tc["args"], ensure_ascii=False)
166
+
167
+ # Content tokens (partial chunks from streaming)
168
+ if message.content:
169
+ events.append(StreamEvent(
170
+ type=StreamEventType.TOKEN,
171
+ content=message.content,
172
+ ))
173
+
174
+ elif msg_type in ("ToolMessage",):
175
+ # Flush buffered tool call now that execution is complete
176
+ tc_id = getattr(message, "tool_call_id", "")
177
+ if tc_id and tc_id in self._pending_tool_calls and self.show_tool_calls:
178
+ tc_info = self._pending_tool_calls.pop(tc_id)
179
+ raw_args = tc_info.get("args_str", "")
180
+ try:
181
+ args = json.loads(raw_args) if raw_args else {}
182
+ except json.JSONDecodeError:
183
+ args = {}
184
+ args_preview = json.dumps(args, ensure_ascii=False)
185
+ events.append(StreamEvent(
186
+ type=StreamEventType.TOOL_CALL_START,
187
+ content=f"{tc_info['name']}({args_preview})",
188
+ ))
189
+
190
+ if self.show_tool_results:
191
+ events.append(StreamEvent(
192
+ type=StreamEventType.TOOL_RESULT,
193
+ content=message.content,
194
+ ))
195
+ else:
196
+ char_count = len(message.content)
197
+ events.append(StreamEvent(
198
+ type=StreamEventType.TOOL_RESULT,
199
+ content=f"[{char_count:,} chars]",
200
+ ))
201
+
202
+ return events
@@ -0,0 +1,35 @@
1
+ """Agent configuration: reads from SDK settings."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass, field
6
+
7
+ from aipriceaction.settings import settings
8
+
9
+
10
+ @dataclass(frozen=True)
11
+ class AgentConfig:
12
+ """Configuration for an agent session.
13
+
14
+ Reads defaults from the SDK settings singleton (same .env the SDK uses).
15
+ """
16
+
17
+ api_key: str = field(default_factory=lambda: settings.openai_api_key)
18
+ base_url: str = field(default_factory=lambda: settings.openai_base_url)
19
+ model: str = field(default_factory=lambda: settings.openai_model)
20
+ lang: str = field(default_factory=lambda: settings.ai_context_lang)
21
+ max_retries: int = 3
22
+ base_retry_delay: float = 5.0
23
+ max_retry_delay: float = 60.0
24
+
25
+
26
+ TRANSIENT_ERROR_KEYWORDS: tuple[str, ...] = (
27
+ "429",
28
+ "500",
29
+ "502",
30
+ "503",
31
+ "504",
32
+ "timeout",
33
+ "connection",
34
+ "overloaded",
35
+ )