bog-agents-cli 0.5.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- bog_agents_cli/__init__.py +9 -0
- bog_agents_cli/__main__.py +6 -0
- bog_agents_cli/_debug.py +50 -0
- bog_agents_cli/_server_config.py +320 -0
- bog_agents_cli/_server_constants.py +4 -0
- bog_agents_cli/_testing_models.py +144 -0
- bog_agents_cli/_version.py +3 -0
- bog_agents_cli/agent.py +984 -0
- bog_agents_cli/app.py +3974 -0
- bog_agents_cli/app.tcss +233 -0
- bog_agents_cli/ask_user.py +376 -0
- bog_agents_cli/background_agents.py +310 -0
- bog_agents_cli/browser_cli.py +128 -0
- bog_agents_cli/built_in_skills/__init__.py +5 -0
- bog_agents_cli/built_in_skills/skill-creator/SKILL.md +399 -0
- bog_agents_cli/built_in_skills/skill-creator/scripts/init_skill.py +366 -0
- bog_agents_cli/built_in_skills/skill-creator/scripts/quick_validate.py +158 -0
- bog_agents_cli/clipboard.py +128 -0
- bog_agents_cli/code_intelligence_cli.py +185 -0
- bog_agents_cli/compact_selective.py +200 -0
- bog_agents_cli/config.py +1834 -0
- bog_agents_cli/configurable_model.py +133 -0
- bog_agents_cli/dashboard.py +340 -0
- bog_agents_cli/default_agent_prompt.md +12 -0
- bog_agents_cli/doctor.py +181 -0
- bog_agents_cli/enterprise_cli.py +149 -0
- bog_agents_cli/extensions.py +355 -0
- bog_agents_cli/file_ops.py +473 -0
- bog_agents_cli/hooks.py +287 -0
- bog_agents_cli/image_cli.py +101 -0
- bog_agents_cli/input.py +778 -0
- bog_agents_cli/input_shortcuts.py +93 -0
- bog_agents_cli/integrations/__init__.py +1 -0
- bog_agents_cli/integrations/daytona.py +224 -0
- bog_agents_cli/integrations/langsmith.py +294 -0
- bog_agents_cli/integrations/modal.py +228 -0
- bog_agents_cli/integrations/runloop.py +226 -0
- bog_agents_cli/integrations/sandbox_factory.py +194 -0
- bog_agents_cli/integrations/sandbox_provider.py +71 -0
- bog_agents_cli/json_output.py +181 -0
- bog_agents_cli/keybindings.py +109 -0
- bog_agents_cli/local_context.py +586 -0
- bog_agents_cli/main.py +1657 -0
- bog_agents_cli/mcp_tools.py +625 -0
- bog_agents_cli/mcp_trust.py +168 -0
- bog_agents_cli/media_utils.py +478 -0
- bog_agents_cli/model_config.py +1486 -0
- bog_agents_cli/multi_agent.py +113 -0
- bog_agents_cli/multi_model_cli.py +145 -0
- bog_agents_cli/non_interactive.py +874 -0
- bog_agents_cli/oauth_mcp.py +290 -0
- bog_agents_cli/output.py +69 -0
- bog_agents_cli/plugin_marketplace.py +192 -0
- bog_agents_cli/pr_cli.py +110 -0
- bog_agents_cli/pr_output.py +454 -0
- bog_agents_cli/profiles.py +227 -0
- bog_agents_cli/project_utils.py +188 -0
- bog_agents_cli/py.typed +0 -0
- bog_agents_cli/recommend.py +390 -0
- bog_agents_cli/remote.py +235 -0
- bog_agents_cli/remote_client.py +515 -0
- bog_agents_cli/replay.py +335 -0
- bog_agents_cli/review_command.py +131 -0
- bog_agents_cli/server.py +513 -0
- bog_agents_cli/server_graph.py +190 -0
- bog_agents_cli/server_manager.py +351 -0
- bog_agents_cli/session_fork.py +175 -0
- bog_agents_cli/session_manager.py +200 -0
- bog_agents_cli/sessions.py +1217 -0
- bog_agents_cli/skills/__init__.py +18 -0
- bog_agents_cli/skills/commands.py +1013 -0
- bog_agents_cli/skills/load.py +194 -0
- bog_agents_cli/smart_context_cli.py +95 -0
- bog_agents_cli/streaming_diff.py +124 -0
- bog_agents_cli/subagents.py +173 -0
- bog_agents_cli/system_prompt.md +238 -0
- bog_agents_cli/teach.py +242 -0
- bog_agents_cli/test_tools_cli.py +104 -0
- bog_agents_cli/textual_adapter.py +1401 -0
- bog_agents_cli/tool_display.py +306 -0
- bog_agents_cli/tools.py +236 -0
- bog_agents_cli/ui.py +388 -0
- bog_agents_cli/unicode_security.py +516 -0
- bog_agents_cli/update_check.py +102 -0
- bog_agents_cli/web_search.py +178 -0
- bog_agents_cli/widgets/__init__.py +9 -0
- bog_agents_cli/widgets/_links.py +62 -0
- bog_agents_cli/widgets/approval.py +423 -0
- bog_agents_cli/widgets/ask_user.py +399 -0
- bog_agents_cli/widgets/autocomplete.py +745 -0
- bog_agents_cli/widgets/chat_input.py +1689 -0
- bog_agents_cli/widgets/diff.py +216 -0
- bog_agents_cli/widgets/history.py +172 -0
- bog_agents_cli/widgets/loading.py +173 -0
- bog_agents_cli/widgets/mcp_viewer.py +348 -0
- bog_agents_cli/widgets/message_store.py +623 -0
- bog_agents_cli/widgets/messages.py +1395 -0
- bog_agents_cli/widgets/model_selector.py +845 -0
- bog_agents_cli/widgets/status.py +375 -0
- bog_agents_cli/widgets/thread_selector.py +1811 -0
- bog_agents_cli/widgets/tool_renderers.py +130 -0
- bog_agents_cli/widgets/tool_widgets.py +245 -0
- bog_agents_cli/widgets/welcome.py +237 -0
- bog_agents_cli-0.5.2.data/data/bog_agents_cli/default_agent_prompt.md +12 -0
- bog_agents_cli-0.5.2.dist-info/METADATA +270 -0
- bog_agents_cli-0.5.2.dist-info/RECORD +108 -0
- bog_agents_cli-0.5.2.dist-info/WHEEL +4 -0
- bog_agents_cli-0.5.2.dist-info/entry_points.txt +3 -0
bog_agents_cli/_debug.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
"""Shared debug-logging configuration for verbose file-based tracing.
|
|
2
|
+
|
|
3
|
+
When the `BOG_AGENTS_DEBUG` environment variable is set, modules that handle
|
|
4
|
+
streaming or remote communication can enable detailed file-based logging. This
|
|
5
|
+
helper centralizes the setup so the env-var name, file path, and format are
|
|
6
|
+
defined in one place.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
import logging
|
|
12
|
+
import os
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def configure_debug_logging(target: logging.Logger) -> None:
|
|
17
|
+
"""Attach a file handler to *target* when `BOG_AGENTS_DEBUG` is set.
|
|
18
|
+
|
|
19
|
+
The log file defaults to `'/tmp/bog_agents_debug.log'` but can be overridden
|
|
20
|
+
with `BOG_AGENTS_DEBUG_FILE`. The handler appends so that multiple modules
|
|
21
|
+
share the same log file across a session.
|
|
22
|
+
|
|
23
|
+
Does nothing when `BOG_AGENTS_DEBUG` is not set.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
target: Logger to configure.
|
|
27
|
+
"""
|
|
28
|
+
if not os.environ.get("BOG_AGENTS_DEBUG"):
|
|
29
|
+
return
|
|
30
|
+
|
|
31
|
+
debug_path = Path(
|
|
32
|
+
os.environ.get(
|
|
33
|
+
"BOG_AGENTS_DEBUG_FILE",
|
|
34
|
+
"/tmp/bog_agents_debug.log", # noqa: S108
|
|
35
|
+
)
|
|
36
|
+
)
|
|
37
|
+
try:
|
|
38
|
+
handler = logging.FileHandler(str(debug_path), mode="a")
|
|
39
|
+
except OSError as exc:
|
|
40
|
+
import sys
|
|
41
|
+
|
|
42
|
+
print( # noqa: T201
|
|
43
|
+
f"Warning: could not open debug log file {debug_path}: {exc}",
|
|
44
|
+
file=sys.stderr,
|
|
45
|
+
)
|
|
46
|
+
return
|
|
47
|
+
handler.setLevel(logging.DEBUG)
|
|
48
|
+
handler.setFormatter(logging.Formatter("%(asctime)s %(name)s %(message)s"))
|
|
49
|
+
target.addHandler(handler)
|
|
50
|
+
target.setLevel(logging.DEBUG)
|
|
@@ -0,0 +1,320 @@
|
|
|
1
|
+
"""Typed configuration for the CLI-to-server subprocess communication channel.
|
|
2
|
+
|
|
3
|
+
The CLI spawns a `langgraph dev` subprocess and passes configuration via
|
|
4
|
+
environment variables prefixed with `DA_SERVER_`. This module provides a single
|
|
5
|
+
`ServerConfig` dataclass that both sides share so that the set of variables,
|
|
6
|
+
their serialization format, and their default values are defined in one place.
|
|
7
|
+
The CLI writes config with `to_env()` and the server graph reads it back
|
|
8
|
+
with `from_env()`.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from __future__ import annotations
|
|
12
|
+
|
|
13
|
+
import json
|
|
14
|
+
import logging
|
|
15
|
+
import os
|
|
16
|
+
from dataclasses import dataclass
|
|
17
|
+
from pathlib import Path
|
|
18
|
+
from typing import TYPE_CHECKING, Any
|
|
19
|
+
|
|
20
|
+
from bog_agents_cli._server_constants import ENV_PREFIX as _ENV_PREFIX
|
|
21
|
+
|
|
22
|
+
if TYPE_CHECKING:
|
|
23
|
+
from bog_agents_cli.project_utils import ProjectContext
|
|
24
|
+
|
|
25
|
+
logger = logging.getLogger(__name__)
|
|
26
|
+
|
|
27
|
+
_DEFAULT_ASSISTANT_ID = "agent"
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def _read_env_bool(suffix: str, *, default: bool = False) -> bool:
|
|
31
|
+
"""Read a `DA_SERVER_*` boolean from the environment.
|
|
32
|
+
|
|
33
|
+
Boolean env vars use the `'true'` / `'false'` convention (case insensitive).
|
|
34
|
+
Missing variables fall back to *default*.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
suffix: Variable name suffix after the `DA_SERVER_` prefix.
|
|
38
|
+
default: Value when the variable is absent.
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
Parsed boolean.
|
|
42
|
+
"""
|
|
43
|
+
raw = os.environ.get(f"{_ENV_PREFIX}{suffix}")
|
|
44
|
+
if raw is None:
|
|
45
|
+
return default
|
|
46
|
+
return raw.lower() == "true"
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def _read_env_json(suffix: str) -> Any: # noqa: ANN401
|
|
50
|
+
"""Read a JSON-encoded `DA_SERVER_*` variable.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
suffix: Variable name suffix after the `DA_SERVER_` prefix.
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
Parsed JSON value, or `None` if the variable is absent.
|
|
57
|
+
|
|
58
|
+
Raises:
|
|
59
|
+
ValueError: If the variable is present but not valid JSON.
|
|
60
|
+
"""
|
|
61
|
+
raw = os.environ.get(f"{_ENV_PREFIX}{suffix}")
|
|
62
|
+
if raw is None:
|
|
63
|
+
return None
|
|
64
|
+
try:
|
|
65
|
+
return json.loads(raw)
|
|
66
|
+
except json.JSONDecodeError as exc:
|
|
67
|
+
msg = (
|
|
68
|
+
f"Failed to parse {_ENV_PREFIX}{suffix} as JSON: {exc}. "
|
|
69
|
+
f"Value was: {raw[:200]!r}"
|
|
70
|
+
)
|
|
71
|
+
raise ValueError(msg) from exc
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def _read_env_str(suffix: str) -> str | None:
|
|
75
|
+
"""Read an optional `DA_SERVER_*` string variable.
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
suffix: Variable name suffix after the `DA_SERVER_` prefix.
|
|
79
|
+
|
|
80
|
+
Returns:
|
|
81
|
+
The string value, or `None` if absent.
|
|
82
|
+
"""
|
|
83
|
+
return os.environ.get(f"{_ENV_PREFIX}{suffix}")
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def _read_env_optional_bool(suffix: str) -> bool | None:
|
|
87
|
+
"""Read a tri-state `DA_SERVER_*` boolean (`True` / `False` / `None`).
|
|
88
|
+
|
|
89
|
+
Used for settings where `None` carries a distinct meaning (e.g. "not
|
|
90
|
+
specified, use default logic").
|
|
91
|
+
|
|
92
|
+
Args:
|
|
93
|
+
suffix: Variable name suffix after the `DA_SERVER_` prefix.
|
|
94
|
+
|
|
95
|
+
Returns:
|
|
96
|
+
`True`, `False`, or `None` when the variable is absent.
|
|
97
|
+
"""
|
|
98
|
+
raw = os.environ.get(f"{_ENV_PREFIX}{suffix}")
|
|
99
|
+
if raw is None:
|
|
100
|
+
return None
|
|
101
|
+
return raw.lower() == "true"
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
@dataclass(frozen=True)
|
|
105
|
+
class ServerConfig:
|
|
106
|
+
"""Full configuration payload passed from the CLI to the server subprocess.
|
|
107
|
+
|
|
108
|
+
Serialized to/from `DA_SERVER_*` environment variables so that the server
|
|
109
|
+
graph (which runs in a separate Python interpreter) can reconstruct the
|
|
110
|
+
CLI's intent without sharing memory.
|
|
111
|
+
"""
|
|
112
|
+
|
|
113
|
+
model: str | None = None
|
|
114
|
+
model_params: dict[str, Any] | None = None
|
|
115
|
+
assistant_id: str = _DEFAULT_ASSISTANT_ID
|
|
116
|
+
system_prompt: str | None = None
|
|
117
|
+
auto_approve: bool = False
|
|
118
|
+
interactive: bool = True
|
|
119
|
+
enable_shell: bool = True
|
|
120
|
+
enable_ask_user: bool = False
|
|
121
|
+
enable_memory: bool = True
|
|
122
|
+
enable_skills: bool = True
|
|
123
|
+
sandbox_type: str | None = None
|
|
124
|
+
sandbox_id: str | None = None
|
|
125
|
+
sandbox_setup: str | None = None
|
|
126
|
+
cwd: str | None = None
|
|
127
|
+
project_root: str | None = None
|
|
128
|
+
mcp_config_path: str | None = None
|
|
129
|
+
no_mcp: bool = False
|
|
130
|
+
trust_project_mcp: bool | None = None
|
|
131
|
+
|
|
132
|
+
def __post_init__(self) -> None:
|
|
133
|
+
"""Normalize fields that have canonical representations."""
|
|
134
|
+
if self.sandbox_type == "none":
|
|
135
|
+
object.__setattr__(self, "sandbox_type", None)
|
|
136
|
+
|
|
137
|
+
# ------------------------------------------------------------------
|
|
138
|
+
# Serialization
|
|
139
|
+
# ------------------------------------------------------------------
|
|
140
|
+
|
|
141
|
+
def to_env(self) -> dict[str, str | None]:
|
|
142
|
+
"""Serialize this config to a `DA_SERVER_*` env-var mapping.
|
|
143
|
+
|
|
144
|
+
`None` values signal that the variable should be *cleared* from the
|
|
145
|
+
environment (rather than set to an empty string), so callers can
|
|
146
|
+
iterate and set or clear each variable in `os.environ`.
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
Dict mapping env-var suffixes (without the prefix) to their
|
|
150
|
+
string values or `None`.
|
|
151
|
+
"""
|
|
152
|
+
return {
|
|
153
|
+
"MODEL": self.model,
|
|
154
|
+
"MODEL_PARAMS": (
|
|
155
|
+
json.dumps(self.model_params) if self.model_params is not None else None
|
|
156
|
+
),
|
|
157
|
+
"ASSISTANT_ID": self.assistant_id,
|
|
158
|
+
"SYSTEM_PROMPT": self.system_prompt,
|
|
159
|
+
"AUTO_APPROVE": str(self.auto_approve).lower(),
|
|
160
|
+
"INTERACTIVE": str(self.interactive).lower(),
|
|
161
|
+
"ENABLE_SHELL": str(self.enable_shell).lower(),
|
|
162
|
+
"ENABLE_ASK_USER": str(self.enable_ask_user).lower(),
|
|
163
|
+
"ENABLE_MEMORY": str(self.enable_memory).lower(),
|
|
164
|
+
"ENABLE_SKILLS": str(self.enable_skills).lower(),
|
|
165
|
+
"SANDBOX_TYPE": self.sandbox_type,
|
|
166
|
+
"SANDBOX_ID": self.sandbox_id,
|
|
167
|
+
"SANDBOX_SETUP": self.sandbox_setup,
|
|
168
|
+
"CWD": self.cwd,
|
|
169
|
+
"PROJECT_ROOT": self.project_root,
|
|
170
|
+
"MCP_CONFIG_PATH": self.mcp_config_path,
|
|
171
|
+
"NO_MCP": str(self.no_mcp).lower(),
|
|
172
|
+
"TRUST_PROJECT_MCP": (
|
|
173
|
+
str(self.trust_project_mcp).lower()
|
|
174
|
+
if self.trust_project_mcp is not None
|
|
175
|
+
else None
|
|
176
|
+
),
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
@classmethod
|
|
180
|
+
def from_env(cls) -> ServerConfig:
|
|
181
|
+
"""Reconstruct a `ServerConfig` from the current `DA_SERVER_*` env vars.
|
|
182
|
+
|
|
183
|
+
This is the inverse of `to_env()` and is called inside the server
|
|
184
|
+
subprocess to recover the CLI's configuration.
|
|
185
|
+
|
|
186
|
+
Returns:
|
|
187
|
+
A `ServerConfig` populated from the environment.
|
|
188
|
+
"""
|
|
189
|
+
return cls(
|
|
190
|
+
model=_read_env_str("MODEL"),
|
|
191
|
+
model_params=_read_env_json("MODEL_PARAMS"),
|
|
192
|
+
assistant_id=_read_env_str("ASSISTANT_ID") or _DEFAULT_ASSISTANT_ID,
|
|
193
|
+
system_prompt=_read_env_str("SYSTEM_PROMPT"),
|
|
194
|
+
auto_approve=_read_env_bool("AUTO_APPROVE"),
|
|
195
|
+
interactive=_read_env_bool("INTERACTIVE", default=True),
|
|
196
|
+
enable_shell=_read_env_bool("ENABLE_SHELL", default=True),
|
|
197
|
+
enable_ask_user=_read_env_bool("ENABLE_ASK_USER"),
|
|
198
|
+
enable_memory=_read_env_bool("ENABLE_MEMORY", default=True),
|
|
199
|
+
enable_skills=_read_env_bool("ENABLE_SKILLS", default=True),
|
|
200
|
+
sandbox_type=_read_env_str("SANDBOX_TYPE"),
|
|
201
|
+
sandbox_id=_read_env_str("SANDBOX_ID"),
|
|
202
|
+
sandbox_setup=_read_env_str("SANDBOX_SETUP"),
|
|
203
|
+
cwd=_read_env_str("CWD"),
|
|
204
|
+
project_root=_read_env_str("PROJECT_ROOT"),
|
|
205
|
+
mcp_config_path=_read_env_str("MCP_CONFIG_PATH"),
|
|
206
|
+
no_mcp=_read_env_bool("NO_MCP"),
|
|
207
|
+
trust_project_mcp=_read_env_optional_bool("TRUST_PROJECT_MCP"),
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
# ------------------------------------------------------------------
|
|
211
|
+
# Factory
|
|
212
|
+
# ------------------------------------------------------------------
|
|
213
|
+
|
|
214
|
+
@classmethod
|
|
215
|
+
def from_cli_args(
|
|
216
|
+
cls,
|
|
217
|
+
*,
|
|
218
|
+
project_context: ProjectContext | None,
|
|
219
|
+
model_name: str | None,
|
|
220
|
+
model_params: dict[str, Any] | None,
|
|
221
|
+
assistant_id: str,
|
|
222
|
+
auto_approve: bool,
|
|
223
|
+
sandbox_type: str,
|
|
224
|
+
sandbox_id: str | None,
|
|
225
|
+
sandbox_setup: str | None,
|
|
226
|
+
enable_shell: bool,
|
|
227
|
+
enable_ask_user: bool,
|
|
228
|
+
mcp_config_path: str | None,
|
|
229
|
+
no_mcp: bool,
|
|
230
|
+
trust_project_mcp: bool | None,
|
|
231
|
+
interactive: bool,
|
|
232
|
+
) -> ServerConfig:
|
|
233
|
+
"""Build a `ServerConfig` from parsed CLI arguments.
|
|
234
|
+
|
|
235
|
+
Handles path normalization (e.g. resolving relative MCP config paths
|
|
236
|
+
against the user's working directory) so that the raw serialized values
|
|
237
|
+
are always absolute and unambiguous.
|
|
238
|
+
|
|
239
|
+
Args:
|
|
240
|
+
project_context: Explicit user/project path context.
|
|
241
|
+
model_name: Model spec string.
|
|
242
|
+
model_params: Extra model kwargs.
|
|
243
|
+
assistant_id: Agent identifier.
|
|
244
|
+
auto_approve: Auto-approve all tools.
|
|
245
|
+
sandbox_type: Sandbox type.
|
|
246
|
+
sandbox_id: Existing sandbox ID to reuse.
|
|
247
|
+
sandbox_setup: Path to setup script for the sandbox.
|
|
248
|
+
enable_shell: Enable shell execution tools.
|
|
249
|
+
enable_ask_user: Enable ask_user tool.
|
|
250
|
+
mcp_config_path: Path to MCP config.
|
|
251
|
+
no_mcp: Disable MCP.
|
|
252
|
+
trust_project_mcp: Trust project MCP servers.
|
|
253
|
+
interactive: Whether the agent is interactive.
|
|
254
|
+
|
|
255
|
+
Returns:
|
|
256
|
+
A fully resolved `ServerConfig`.
|
|
257
|
+
"""
|
|
258
|
+
normalized_mcp = _normalize_path(mcp_config_path, project_context, "MCP config")
|
|
259
|
+
|
|
260
|
+
return cls(
|
|
261
|
+
model=model_name,
|
|
262
|
+
model_params=model_params,
|
|
263
|
+
assistant_id=assistant_id,
|
|
264
|
+
auto_approve=auto_approve,
|
|
265
|
+
interactive=interactive,
|
|
266
|
+
enable_shell=enable_shell,
|
|
267
|
+
enable_ask_user=enable_ask_user,
|
|
268
|
+
sandbox_type=sandbox_type,
|
|
269
|
+
sandbox_id=sandbox_id,
|
|
270
|
+
sandbox_setup=_normalize_path(
|
|
271
|
+
sandbox_setup, project_context, "sandbox setup"
|
|
272
|
+
),
|
|
273
|
+
cwd=(
|
|
274
|
+
str(project_context.user_cwd) if project_context is not None else None
|
|
275
|
+
),
|
|
276
|
+
project_root=(
|
|
277
|
+
str(project_context.project_root)
|
|
278
|
+
if project_context is not None
|
|
279
|
+
and project_context.project_root is not None
|
|
280
|
+
else None
|
|
281
|
+
),
|
|
282
|
+
mcp_config_path=normalized_mcp,
|
|
283
|
+
no_mcp=no_mcp,
|
|
284
|
+
trust_project_mcp=trust_project_mcp,
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
|
|
288
|
+
def _normalize_path(
|
|
289
|
+
raw_path: str | None,
|
|
290
|
+
project_context: ProjectContext | None,
|
|
291
|
+
label: str,
|
|
292
|
+
) -> str | None:
|
|
293
|
+
"""Resolve a possibly-relative path to absolute.
|
|
294
|
+
|
|
295
|
+
The server subprocess runs in a different working directory, so relative
|
|
296
|
+
paths must be resolved against the user's original cwd before serialization.
|
|
297
|
+
|
|
298
|
+
Args:
|
|
299
|
+
raw_path: Path from CLI arguments (may be relative).
|
|
300
|
+
project_context: User/project context for path resolution.
|
|
301
|
+
label: Human-readable label for error messages (e.g. "MCP config").
|
|
302
|
+
|
|
303
|
+
Returns:
|
|
304
|
+
Absolute path string, or `None` when *raw_path* is `None` or empty.
|
|
305
|
+
|
|
306
|
+
Raises:
|
|
307
|
+
ValueError: If the path cannot be resolved.
|
|
308
|
+
"""
|
|
309
|
+
if not raw_path:
|
|
310
|
+
return None
|
|
311
|
+
try:
|
|
312
|
+
if project_context is not None:
|
|
313
|
+
return str(project_context.resolve_user_path(raw_path))
|
|
314
|
+
return str(Path(raw_path).expanduser().resolve())
|
|
315
|
+
except OSError as exc:
|
|
316
|
+
msg = (
|
|
317
|
+
f"Could not resolve {label} path {raw_path!r}: {exc}. "
|
|
318
|
+
"Ensure the path exists and is accessible."
|
|
319
|
+
)
|
|
320
|
+
raise ValueError(msg) from exc
|
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
"""Internal chat models used by local integration tests."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import TYPE_CHECKING, Any
|
|
6
|
+
|
|
7
|
+
from langchain_core.language_models.fake_chat_models import GenericFakeChatModel
|
|
8
|
+
from langchain_core.messages import AIMessage, BaseMessage
|
|
9
|
+
from langchain_core.outputs import ChatGeneration, ChatResult
|
|
10
|
+
from pydantic import Field
|
|
11
|
+
|
|
12
|
+
if TYPE_CHECKING:
|
|
13
|
+
from collections.abc import Callable, Sequence
|
|
14
|
+
|
|
15
|
+
from langchain_core.callbacks import CallbackManagerForLLMRun
|
|
16
|
+
from langchain_core.language_models import LanguageModelInput
|
|
17
|
+
from langchain_core.runnables import Runnable
|
|
18
|
+
from langchain_core.tools import BaseTool
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class DeterministicIntegrationChatModel(GenericFakeChatModel):
|
|
22
|
+
"""Deterministic chat model for CLI integration tests.
|
|
23
|
+
|
|
24
|
+
This subclasses LangChain's `GenericFakeChatModel` so the implementation
|
|
25
|
+
stays aligned with the core fake-chat-model test surface, while overriding
|
|
26
|
+
generation to remain prompt-driven and restart-safe for real CLI server
|
|
27
|
+
integration tests.
|
|
28
|
+
|
|
29
|
+
Why the existing `langchain_core` fakes cannot be reused here:
|
|
30
|
+
|
|
31
|
+
1. Every core fake (`GenericFakeChatModel`, `FakeListChatModel`,
|
|
32
|
+
`FakeMessagesListChatModel`) pops from an iterator or cycles an index —
|
|
33
|
+
the actual prompt is ignored. CLI integration tests start and stop the
|
|
34
|
+
server process, which resets in-memory state. An iterator-based model
|
|
35
|
+
either raises `StopIteration` or replays from the beginning after a
|
|
36
|
+
restart, producing wrong or missing responses. This model derives output
|
|
37
|
+
solely from the prompt text, so identical input always produces
|
|
38
|
+
identical output regardless of process lifecycle.
|
|
39
|
+
|
|
40
|
+
2. The agent runtime calls `model.bind_tools(schemas)` during
|
|
41
|
+
initialization. None of the core fakes implement `bind_tools`, so they
|
|
42
|
+
raise `AttributeError` in any agent-loop context. This model provides a
|
|
43
|
+
no-op passthrough.
|
|
44
|
+
|
|
45
|
+
3. The CLI server reads `model.profile` for capability negotiation (e.g.
|
|
46
|
+
`tool_calling`, `max_input_tokens`). Core fakes have no such attribute,
|
|
47
|
+
causing `AttributeError` or silent misconfiguration at runtime.
|
|
48
|
+
|
|
49
|
+
Additionally, the compact middleware issues summarization prompts mid-
|
|
50
|
+
conversation. A list-based model cannot distinguish these from normal user
|
|
51
|
+
turns without pre-knowledge of exact call ordering, whereas this model
|
|
52
|
+
detects summary requests by inspecting the prompt content.
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
model: str = "fake"
|
|
56
|
+
# Required by `GenericFakeChatModel`, but our override does not consume it.
|
|
57
|
+
messages: object = Field(default_factory=lambda: iter(()))
|
|
58
|
+
profile: dict[str, Any] | None = Field(
|
|
59
|
+
default_factory=lambda: {
|
|
60
|
+
"tool_calling": True,
|
|
61
|
+
"max_input_tokens": 8000,
|
|
62
|
+
}
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
def bind_tools(
|
|
66
|
+
self,
|
|
67
|
+
tools: Sequence[dict[str, Any] | type | Callable | BaseTool], # noqa: ARG002
|
|
68
|
+
*,
|
|
69
|
+
tool_choice: str | None = None, # noqa: ARG002
|
|
70
|
+
**kwargs: Any, # noqa: ARG002
|
|
71
|
+
) -> Runnable[LanguageModelInput, AIMessage]:
|
|
72
|
+
"""Return self so the agent can bind tool schemas during tests."""
|
|
73
|
+
return self
|
|
74
|
+
|
|
75
|
+
def _generate(
|
|
76
|
+
self,
|
|
77
|
+
messages: list[BaseMessage],
|
|
78
|
+
stop: list[str] | None = None, # noqa: ARG002
|
|
79
|
+
run_manager: CallbackManagerForLLMRun | None = None, # noqa: ARG002
|
|
80
|
+
**kwargs: Any, # noqa: ARG002
|
|
81
|
+
) -> ChatResult:
|
|
82
|
+
"""Produce a deterministic reply derived from the prompt text.
|
|
83
|
+
|
|
84
|
+
Returns:
|
|
85
|
+
A single-message `ChatResult` with deterministic content.
|
|
86
|
+
"""
|
|
87
|
+
prompt = "\n".join(
|
|
88
|
+
text
|
|
89
|
+
for message in messages
|
|
90
|
+
if (text := self._stringify_message(message)).strip()
|
|
91
|
+
)
|
|
92
|
+
if self._looks_like_summary_request(prompt):
|
|
93
|
+
content = "integration summary"
|
|
94
|
+
else:
|
|
95
|
+
excerpt = " ".join(prompt.split()[-18:])
|
|
96
|
+
if excerpt:
|
|
97
|
+
content = f"integration reply: {excerpt}"
|
|
98
|
+
else:
|
|
99
|
+
content = "integration reply"
|
|
100
|
+
|
|
101
|
+
return ChatResult(
|
|
102
|
+
generations=[ChatGeneration(message=AIMessage(content=content))]
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
@property
|
|
106
|
+
def _llm_type(self) -> str:
|
|
107
|
+
"""Return the LangChain model type identifier."""
|
|
108
|
+
return "deterministic-integration"
|
|
109
|
+
|
|
110
|
+
@staticmethod
|
|
111
|
+
def _stringify_message(message: BaseMessage) -> str:
|
|
112
|
+
"""Flatten message content into plain text for deterministic responses.
|
|
113
|
+
|
|
114
|
+
Returns:
|
|
115
|
+
Plain-text content extracted from the message.
|
|
116
|
+
"""
|
|
117
|
+
content = message.content
|
|
118
|
+
if isinstance(content, str):
|
|
119
|
+
return content
|
|
120
|
+
if isinstance(content, list):
|
|
121
|
+
parts: list[str] = []
|
|
122
|
+
for block in content:
|
|
123
|
+
if isinstance(block, str):
|
|
124
|
+
parts.append(block)
|
|
125
|
+
elif isinstance(block, dict) and block.get("type") == "text":
|
|
126
|
+
text = block.get("text")
|
|
127
|
+
if isinstance(text, str):
|
|
128
|
+
parts.append(text)
|
|
129
|
+
return " ".join(parts)
|
|
130
|
+
return str(content)
|
|
131
|
+
|
|
132
|
+
@staticmethod
|
|
133
|
+
def _looks_like_summary_request(prompt: str) -> bool:
|
|
134
|
+
"""Detect the middleware's summary-generation prompt.
|
|
135
|
+
|
|
136
|
+
Returns:
|
|
137
|
+
`True` when the prompt appears to be a summarization request.
|
|
138
|
+
"""
|
|
139
|
+
lowered = prompt.lower()
|
|
140
|
+
return (
|
|
141
|
+
"messages to summarize" in lowered
|
|
142
|
+
or "condense the following conversation" in lowered
|
|
143
|
+
or "<summary>" in lowered
|
|
144
|
+
)
|