pygent 0.1.10__tar.gz → 0.1.11__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pygent-0.1.10 → pygent-0.1.11}/PKG-INFO +1 -1
- {pygent-0.1.10 → pygent-0.1.11}/pygent/__init__.py +2 -1
- {pygent-0.1.10 → pygent-0.1.11}/pygent/agent.py +10 -4
- pygent-0.1.11/pygent/errors.py +6 -0
- {pygent-0.1.10 → pygent-0.1.11}/pygent/models.py +11 -7
- {pygent-0.1.10 → pygent-0.1.11}/pygent/openai_compat.py +12 -3
- {pygent-0.1.10 → pygent-0.1.11}/pygent/runtime.py +28 -20
- {pygent-0.1.10 → pygent-0.1.11}/pygent/tools.py +10 -0
- {pygent-0.1.10 → pygent-0.1.11}/pygent.egg-info/PKG-INFO +1 -1
- {pygent-0.1.10 → pygent-0.1.11}/pygent.egg-info/SOURCES.txt +2 -0
- {pygent-0.1.10 → pygent-0.1.11}/pyproject.toml +2 -2
- {pygent-0.1.10 → pygent-0.1.11}/tests/test_autorun.py +13 -1
- {pygent-0.1.10 → pygent-0.1.11}/tests/test_custom_model.py +13 -1
- pygent-0.1.11/tests/test_error_handling.py +41 -0
- pygent-0.1.11/tests/test_runtime.py +41 -0
- {pygent-0.1.10 → pygent-0.1.11}/tests/test_tools.py +13 -1
- {pygent-0.1.10 → pygent-0.1.11}/tests/test_version.py +12 -1
- pygent-0.1.10/tests/test_runtime.py +0 -25
- {pygent-0.1.10 → pygent-0.1.11}/LICENSE +0 -0
- {pygent-0.1.10 → pygent-0.1.11}/README.md +0 -0
- {pygent-0.1.10 → pygent-0.1.11}/pygent/__main__.py +0 -0
- {pygent-0.1.10 → pygent-0.1.11}/pygent/cli.py +0 -0
- {pygent-0.1.10 → pygent-0.1.11}/pygent/py.typed +0 -0
- {pygent-0.1.10 → pygent-0.1.11}/pygent/ui.py +0 -0
- {pygent-0.1.10 → pygent-0.1.11}/pygent.egg-info/dependency_links.txt +0 -0
- {pygent-0.1.10 → pygent-0.1.11}/pygent.egg-info/entry_points.txt +0 -0
- {pygent-0.1.10 → pygent-0.1.11}/pygent.egg-info/requires.txt +0 -0
- {pygent-0.1.10 → pygent-0.1.11}/pygent.egg-info/top_level.txt +0 -0
- {pygent-0.1.10 → pygent-0.1.11}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: pygent
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.11
|
4
4
|
Summary: Pygent is a minimalist coding assistant that runs commands in a Docker container when available and falls back to local execution. See https://marianochaves.github.io/pygent for documentation and https://github.com/marianochaves/pygent for the source code.
|
5
5
|
Author-email: Mariano Chaves <mchaves.software@gmail.com>
|
6
6
|
Project-URL: Documentation, https://marianochaves.github.io/pygent
|
@@ -8,5 +8,6 @@ except _metadata.PackageNotFoundError: # pragma: no cover - fallback for tests
|
|
8
8
|
|
9
9
|
from .agent import Agent, run_interactive # noqa: E402,F401, must come after __version__
|
10
10
|
from .models import Model, OpenAIModel # noqa: E402,F401
|
11
|
+
from .errors import PygentError, APIError # noqa: E402,F401
|
11
12
|
|
12
|
-
__all__ = ["Agent", "run_interactive", "Model", "OpenAIModel"]
|
13
|
+
__all__ = ["Agent", "run_interactive", "Model", "OpenAIModel", "PygentError", "APIError"]
|
@@ -10,6 +10,7 @@ from typing import Any, Dict, List
|
|
10
10
|
|
11
11
|
from rich.console import Console
|
12
12
|
from rich.panel import Panel
|
13
|
+
from rich.markdown import Markdown
|
13
14
|
|
14
15
|
from .runtime import Runtime
|
15
16
|
from .tools import TOOL_SCHEMAS, execute_tool
|
@@ -19,6 +20,10 @@ DEFAULT_MODEL = os.getenv("PYGENT_MODEL", "gpt-4.1-mini")
|
|
19
20
|
SYSTEM_MSG = (
|
20
21
|
"You are Pygent, a sandboxed coding assistant.\n"
|
21
22
|
"Respond with JSON when you need to use a tool."
|
23
|
+
"If you need to stop, call the `stop` tool.\n"
|
24
|
+
"You can use the following tools:\n"
|
25
|
+
f"{json.dumps(TOOL_SCHEMAS, indent=2)}\n"
|
26
|
+
"You can also use the `continue` tool to continue the conversation.\n"
|
22
27
|
)
|
23
28
|
|
24
29
|
console = Console()
|
@@ -46,7 +51,8 @@ class Agent:
|
|
46
51
|
self.history.append({"role": "tool", "content": output, "tool_call_id": call.id})
|
47
52
|
console.print(Panel(output, title=f"tool:{call.function.name}"))
|
48
53
|
else:
|
49
|
-
|
54
|
+
markdown_response = Markdown(assistant_msg.content)
|
55
|
+
console.print(Panel(markdown_response, title="Resposta do Agente", title_align="left", border_style="cyan"))
|
50
56
|
return assistant_msg
|
51
57
|
|
52
58
|
def run_until_stop(self, user_msg: str, max_steps: int = 10) -> None:
|
@@ -56,7 +62,7 @@ class Agent:
|
|
56
62
|
for _ in range(max_steps):
|
57
63
|
assistant_msg = self.step(msg)
|
58
64
|
calls = assistant_msg.tool_calls or []
|
59
|
-
if any(c.function.name
|
65
|
+
if any(c.function.name in ("stop", "continue") for c in calls):
|
60
66
|
break
|
61
67
|
msg = "continue"
|
62
68
|
|
@@ -66,9 +72,9 @@ def run_interactive(use_docker: bool | None = None) -> None: # pragma: no cover
|
|
66
72
|
console.print("[bold green]Pygent[/] iniciado. (digite /exit para sair)")
|
67
73
|
try:
|
68
74
|
while True:
|
69
|
-
user_msg = console.input("[cyan]
|
75
|
+
user_msg = console.input("[cyan]user> [/]" )
|
70
76
|
if user_msg.strip() in {"/exit", "quit", "q"}:
|
71
77
|
break
|
72
|
-
agent.
|
78
|
+
agent.run_until_stop(user_msg)
|
73
79
|
finally:
|
74
80
|
agent.runtime.cleanup()
|
@@ -10,6 +10,7 @@ except ModuleNotFoundError: # pragma: no cover - fallback to bundled client
|
|
10
10
|
from . import openai_compat as openai
|
11
11
|
|
12
12
|
from .openai_compat import Message
|
13
|
+
from .errors import APIError
|
13
14
|
|
14
15
|
|
15
16
|
class Model(Protocol):
|
@@ -24,10 +25,13 @@ class OpenAIModel:
|
|
24
25
|
"""Default model using the OpenAI-compatible API."""
|
25
26
|
|
26
27
|
def chat(self, messages: List[Dict[str, Any]], model: str, tools: Any) -> Message:
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
28
|
+
try:
|
29
|
+
resp = openai.chat.completions.create(
|
30
|
+
model=model,
|
31
|
+
messages=messages,
|
32
|
+
tools=tools,
|
33
|
+
tool_choice="auto",
|
34
|
+
)
|
35
|
+
return resp.choices[0].message
|
36
|
+
except Exception as exc:
|
37
|
+
raise APIError(str(exc)) from exc
|
@@ -2,7 +2,9 @@ import os
|
|
2
2
|
import json
|
3
3
|
from dataclasses import dataclass
|
4
4
|
from typing import Any, Dict, List
|
5
|
-
from urllib import request
|
5
|
+
from urllib import request, error
|
6
|
+
|
7
|
+
from .errors import APIError
|
6
8
|
|
7
9
|
OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1")
|
8
10
|
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
|
@@ -39,8 +41,15 @@ def _post(path: str, payload: Dict[str, Any]) -> Dict[str, Any]:
|
|
39
41
|
if OPENAI_API_KEY:
|
40
42
|
headers["Authorization"] = f"Bearer {OPENAI_API_KEY}"
|
41
43
|
req = request.Request(f"{OPENAI_BASE_URL}{path}", data=data, headers=headers)
|
42
|
-
|
43
|
-
|
44
|
+
try:
|
45
|
+
with request.urlopen(req) as resp:
|
46
|
+
return json.loads(resp.read().decode())
|
47
|
+
except error.HTTPError as exc: # pragma: no cover - network dependent
|
48
|
+
raise APIError(f"HTTP error {exc.code}: {exc.reason}") from exc
|
49
|
+
except error.URLError as exc: # pragma: no cover - network dependent
|
50
|
+
raise APIError(f"Connection error: {exc.reason}") from exc
|
51
|
+
except Exception as exc: # pragma: no cover - fallback
|
52
|
+
raise APIError(str(exc)) from exc
|
44
53
|
|
45
54
|
|
46
55
|
class _ChatCompletions:
|
@@ -54,29 +54,37 @@ class Runtime:
|
|
54
54
|
caller can display what was run.
|
55
55
|
"""
|
56
56
|
if self._use_docker and self.container is not None:
|
57
|
-
|
57
|
+
try:
|
58
|
+
res = self.container.exec_run(
|
59
|
+
cmd,
|
60
|
+
workdir="/workspace",
|
61
|
+
demux=True,
|
62
|
+
tty=False,
|
63
|
+
stdin=False,
|
64
|
+
timeout=timeout,
|
65
|
+
)
|
66
|
+
stdout, stderr = (
|
67
|
+
res.output if isinstance(res.output, tuple) else (res.output, b"")
|
68
|
+
)
|
69
|
+
output = (stdout or b"").decode() + (stderr or b"").decode()
|
70
|
+
return f"$ {cmd}\n{output}"
|
71
|
+
except Exception as exc:
|
72
|
+
return f"$ {cmd}\n[error] {exc}"
|
73
|
+
try:
|
74
|
+
proc = subprocess.run(
|
58
75
|
cmd,
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
76
|
+
shell=True,
|
77
|
+
cwd=self.base_dir,
|
78
|
+
capture_output=True,
|
79
|
+
text=True,
|
80
|
+
stdin=subprocess.DEVNULL,
|
63
81
|
timeout=timeout,
|
64
82
|
)
|
65
|
-
stdout
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
return f"$ {cmd}\n{
|
70
|
-
proc = subprocess.run(
|
71
|
-
cmd,
|
72
|
-
shell=True,
|
73
|
-
cwd=self.base_dir,
|
74
|
-
capture_output=True,
|
75
|
-
text=True,
|
76
|
-
stdin=subprocess.DEVNULL,
|
77
|
-
timeout=timeout,
|
78
|
-
)
|
79
|
-
return f"$ {cmd}\n{proc.stdout + proc.stderr}"
|
83
|
+
return f"$ {cmd}\n{proc.stdout + proc.stderr}"
|
84
|
+
except subprocess.TimeoutExpired:
|
85
|
+
return f"$ {cmd}\n[timeout after {timeout}s]"
|
86
|
+
except Exception as exc:
|
87
|
+
return f"$ {cmd}\n[error] {exc}"
|
80
88
|
|
81
89
|
def write_file(self, path: Union[str, Path], content: str) -> str:
|
82
90
|
p = self.base_dir / path
|
@@ -43,6 +43,14 @@ TOOL_SCHEMAS = [
|
|
43
43
|
"parameters": {"type": "object", "properties": {}},
|
44
44
|
},
|
45
45
|
},
|
46
|
+
{
|
47
|
+
"type": "function",
|
48
|
+
"function": {
|
49
|
+
"name": "continue",
|
50
|
+
"description": "Continue the conversation.",
|
51
|
+
"parameters": {"type": "object", "properties": {}},
|
52
|
+
},
|
53
|
+
},
|
46
54
|
]
|
47
55
|
|
48
56
|
# --------------- dispatcher ---------------
|
@@ -57,4 +65,6 @@ def execute_tool(call: Any, rt: Runtime) -> str: # pragma: no cover, Any→open
|
|
57
65
|
return rt.write_file(**args)
|
58
66
|
if name == "stop":
|
59
67
|
return "Stopping."
|
68
|
+
if name == "continue":
|
69
|
+
return "Continuing the conversation."
|
60
70
|
return f"⚠️ unknown tool {name}"
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: pygent
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.11
|
4
4
|
Summary: Pygent is a minimalist coding assistant that runs commands in a Docker container when available and falls back to local execution. See https://marianochaves.github.io/pygent for documentation and https://github.com/marianochaves/pygent for the source code.
|
5
5
|
Author-email: Mariano Chaves <mchaves.software@gmail.com>
|
6
6
|
Project-URL: Documentation, https://marianochaves.github.io/pygent
|
@@ -5,6 +5,7 @@ pygent/__init__.py
|
|
5
5
|
pygent/__main__.py
|
6
6
|
pygent/agent.py
|
7
7
|
pygent/cli.py
|
8
|
+
pygent/errors.py
|
8
9
|
pygent/models.py
|
9
10
|
pygent/openai_compat.py
|
10
11
|
pygent/py.typed
|
@@ -19,6 +20,7 @@ pygent.egg-info/requires.txt
|
|
19
20
|
pygent.egg-info/top_level.txt
|
20
21
|
tests/test_autorun.py
|
21
22
|
tests/test_custom_model.py
|
23
|
+
tests/test_error_handling.py
|
22
24
|
tests/test_runtime.py
|
23
25
|
tests/test_tools.py
|
24
26
|
tests/test_version.py
|
@@ -1,11 +1,11 @@
|
|
1
1
|
[project]
|
2
2
|
name = "pygent"
|
3
|
-
version = "0.1.
|
3
|
+
version = "0.1.11"
|
4
4
|
description = "Pygent is a minimalist coding assistant that runs commands in a Docker container when available and falls back to local execution. See https://marianochaves.github.io/pygent for documentation and https://github.com/marianochaves/pygent for the source code."
|
5
5
|
authors = [ { name = "Mariano Chaves", email = "mchaves.software@gmail.com" } ]
|
6
6
|
requires-python = ">=3.9"
|
7
7
|
dependencies = [
|
8
|
-
"rich>=13.7.0",
|
8
|
+
"rich>=13.7.0",
|
9
9
|
"openai>=1.0.0",
|
10
10
|
]
|
11
11
|
|
@@ -4,14 +4,25 @@ import types
|
|
4
4
|
|
5
5
|
sys.modules.setdefault('openai', types.ModuleType('openai'))
|
6
6
|
sys.modules.setdefault('docker', types.ModuleType('docker'))
|
7
|
+
|
8
|
+
# --- Início da correção ---
|
7
9
|
rich_mod = types.ModuleType('rich')
|
8
10
|
console_mod = types.ModuleType('console')
|
9
|
-
console_mod.Console = lambda *a, **k: type('C', (), {'print': lambda *a, **k: None})()
|
10
11
|
panel_mod = types.ModuleType('panel')
|
12
|
+
markdown_mod = types.ModuleType('markdown') # Novo mock para rich.markdown
|
13
|
+
syntax_mod = types.ModuleType('syntax') # Novo mock para rich.syntax
|
14
|
+
|
15
|
+
console_mod.Console = lambda *a, **k: type('C', (), {'print': lambda *a, **k: None})()
|
11
16
|
panel_mod.Panel = lambda *a, **k: None
|
17
|
+
markdown_mod.Markdown = lambda *a, **k: None # Mock para rich.markdown.Markdown
|
18
|
+
syntax_mod.Syntax = lambda *a, **k: None # Mock para rich.syntax.Syntax
|
19
|
+
|
12
20
|
sys.modules.setdefault('rich', rich_mod)
|
13
21
|
sys.modules.setdefault('rich.console', console_mod)
|
14
22
|
sys.modules.setdefault('rich.panel', panel_mod)
|
23
|
+
sys.modules.setdefault('rich.markdown', markdown_mod) # Adicionado
|
24
|
+
sys.modules.setdefault('rich.syntax', syntax_mod) # Adicionado
|
25
|
+
# --- Fim da correção ---
|
15
26
|
|
16
27
|
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
17
28
|
|
@@ -67,3 +78,4 @@ def test_run_until_stop():
|
|
67
78
|
for msg in ag.history
|
68
79
|
if hasattr(msg, 'tool_calls') and msg.tool_calls
|
69
80
|
for call in msg.tool_calls)
|
81
|
+
|
@@ -4,14 +4,25 @@ import types
|
|
4
4
|
|
5
5
|
sys.modules.setdefault('openai', types.ModuleType('openai'))
|
6
6
|
sys.modules.setdefault('docker', types.ModuleType('docker'))
|
7
|
+
|
8
|
+
# --- Início da correção ---
|
7
9
|
rich_mod = types.ModuleType('rich')
|
8
10
|
console_mod = types.ModuleType('console')
|
9
|
-
console_mod.Console = lambda *a, **k: type('C', (), {'print': lambda *a, **k: None})()
|
10
11
|
panel_mod = types.ModuleType('panel')
|
12
|
+
markdown_mod = types.ModuleType('markdown') # Novo mock para rich.markdown
|
13
|
+
syntax_mod = types.ModuleType('syntax') # Novo mock para rich.syntax
|
14
|
+
|
15
|
+
console_mod.Console = lambda *a, **k: type('C', (), {'print': lambda *a, **k: None})()
|
11
16
|
panel_mod.Panel = lambda *a, **k: None
|
17
|
+
markdown_mod.Markdown = lambda *a, **k: None # Mock para rich.markdown.Markdown
|
18
|
+
syntax_mod.Syntax = lambda *a, **k: None # Mock para rich.syntax.Syntax
|
19
|
+
|
12
20
|
sys.modules.setdefault('rich', rich_mod)
|
13
21
|
sys.modules.setdefault('rich.console', console_mod)
|
14
22
|
sys.modules.setdefault('rich.panel', panel_mod)
|
23
|
+
sys.modules.setdefault('rich.markdown', markdown_mod) # Adicionado
|
24
|
+
sys.modules.setdefault('rich.syntax', syntax_mod) # Adicionado
|
25
|
+
# --- Fim da correção ---
|
15
26
|
|
16
27
|
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
17
28
|
|
@@ -26,3 +37,4 @@ def test_custom_model():
|
|
26
37
|
ag = Agent(model=DummyModel())
|
27
38
|
ag.step('hi')
|
28
39
|
assert ag.history[-1].content == 'ok'
|
40
|
+
|
@@ -0,0 +1,41 @@
|
|
1
|
+
import os
|
2
|
+
import sys
|
3
|
+
import types
|
4
|
+
import pytest
|
5
|
+
|
6
|
+
# Stub external dependencies
|
7
|
+
sys.modules.setdefault('docker', types.ModuleType('docker'))
|
8
|
+
rich_mod = types.ModuleType('rich')
|
9
|
+
console_mod = types.ModuleType('console')
|
10
|
+
console_mod.Console = lambda *a, **k: None
|
11
|
+
panel_mod = types.ModuleType('panel')
|
12
|
+
panel_mod.Panel = lambda *a, **k: None
|
13
|
+
sys.modules.setdefault('rich', rich_mod)
|
14
|
+
sys.modules.setdefault('rich.console', console_mod)
|
15
|
+
sys.modules.setdefault('rich.panel', panel_mod)
|
16
|
+
|
17
|
+
|
18
|
+
def test_openai_model_error():
|
19
|
+
openai_mod = types.ModuleType('openai')
|
20
|
+
class ChatComp:
|
21
|
+
def create(*a, **k):
|
22
|
+
raise RuntimeError('boom')
|
23
|
+
chat_mod = types.ModuleType('chat')
|
24
|
+
chat_mod.completions = ChatComp()
|
25
|
+
openai_mod.chat = chat_mod
|
26
|
+
sys.modules['openai'] = openai_mod
|
27
|
+
|
28
|
+
from pygent.models import OpenAIModel
|
29
|
+
from pygent.errors import APIError
|
30
|
+
|
31
|
+
model = OpenAIModel()
|
32
|
+
with pytest.raises(APIError):
|
33
|
+
model.chat([], 'gpt', None)
|
34
|
+
|
35
|
+
|
36
|
+
def test_bash_timeout():
|
37
|
+
from pygent.runtime import Runtime
|
38
|
+
rt = Runtime(use_docker=False)
|
39
|
+
out = rt.bash('sleep 5', timeout=0)
|
40
|
+
rt.cleanup()
|
41
|
+
assert '[timeout' in out
|
@@ -0,0 +1,41 @@
|
|
1
|
+
import os
|
2
|
+
import sys
|
3
|
+
import types
|
4
|
+
|
5
|
+
# Stub external dependencies
|
6
|
+
sys.modules.setdefault('openai', types.ModuleType('openai'))
|
7
|
+
sys.modules.setdefault('docker', types.ModuleType('docker'))
|
8
|
+
|
9
|
+
# --- Início da correção ---
|
10
|
+
# Criação de módulos mock para rich e seus submódulos
|
11
|
+
rich_mod = types.ModuleType('rich')
|
12
|
+
console_mod = types.ModuleType('console')
|
13
|
+
panel_mod = types.ModuleType('panel')
|
14
|
+
markdown_mod = types.ModuleType('markdown') # Novo mock para rich.markdown
|
15
|
+
syntax_mod = types.ModuleType('syntax') # Novo mock para rich.syntax
|
16
|
+
|
17
|
+
# Mocks para as classes e funções usadas de rich
|
18
|
+
console_mod.Console = lambda *a, **k: type('C', (), {'print': lambda *a, **k: None})()
|
19
|
+
panel_mod.Panel = lambda *a, **k: None
|
20
|
+
markdown_mod.Markdown = lambda *a, **k: None # Mock para rich.markdown.Markdown
|
21
|
+
syntax_mod.Syntax = lambda *a, **k: None # Mock para rich.syntax.Syntax
|
22
|
+
|
23
|
+
# Definindo os módulos mock no sys.modules
|
24
|
+
sys.modules.setdefault('rich', rich_mod)
|
25
|
+
sys.modules.setdefault('rich.console', console_mod)
|
26
|
+
sys.modules.setdefault('rich.panel', panel_mod)
|
27
|
+
sys.modules.setdefault('rich.markdown', markdown_mod) # Adicionado
|
28
|
+
sys.modules.setdefault('rich.syntax', syntax_mod) # Adicionado
|
29
|
+
# --- Fim da correção ---
|
30
|
+
|
31
|
+
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
32
|
+
|
33
|
+
from pygent.runtime import Runtime
|
34
|
+
|
35
|
+
|
36
|
+
def test_bash_includes_command():
|
37
|
+
rt = Runtime(use_docker=False)
|
38
|
+
out = rt.bash('echo hi')
|
39
|
+
rt.cleanup()
|
40
|
+
assert out.startswith('$ echo hi\n')
|
41
|
+
|
@@ -4,14 +4,25 @@ import types
|
|
4
4
|
|
5
5
|
sys.modules.setdefault('openai', types.ModuleType('openai'))
|
6
6
|
sys.modules.setdefault('docker', types.ModuleType('docker'))
|
7
|
+
|
8
|
+
# --- Início da correção ---
|
7
9
|
rich_mod = types.ModuleType('rich')
|
8
10
|
console_mod = types.ModuleType('console')
|
9
|
-
console_mod.Console = lambda *a, **k: None
|
10
11
|
panel_mod = types.ModuleType('panel')
|
12
|
+
markdown_mod = types.ModuleType('markdown') # Novo mock para rich.markdown
|
13
|
+
syntax_mod = types.ModuleType('syntax') # Novo mock para rich.syntax
|
14
|
+
|
15
|
+
console_mod.Console = lambda *a, **k: None
|
11
16
|
panel_mod.Panel = lambda *a, **k: None
|
17
|
+
markdown_mod.Markdown = lambda *a, **k: None # Mock para rich.markdown.Markdown
|
18
|
+
syntax_mod.Syntax = lambda *a, **k: None # Mock para rich.syntax.Syntax
|
19
|
+
|
12
20
|
sys.modules.setdefault('rich', rich_mod)
|
13
21
|
sys.modules.setdefault('rich.console', console_mod)
|
14
22
|
sys.modules.setdefault('rich.panel', panel_mod)
|
23
|
+
sys.modules.setdefault('rich.markdown', markdown_mod) # Adicionado
|
24
|
+
sys.modules.setdefault('rich.syntax', syntax_mod) # Adicionado
|
25
|
+
# --- Fim da correção ---
|
15
26
|
|
16
27
|
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
17
28
|
|
@@ -41,3 +52,4 @@ def test_execute_write_file():
|
|
41
52
|
})
|
42
53
|
})()
|
43
54
|
assert tools.execute_tool(call, DummyRuntime()) == 'wrote foo.txt'
|
55
|
+
|
@@ -5,15 +5,26 @@ import types
|
|
5
5
|
# Stub external dependencies so the package can be imported without network
|
6
6
|
sys.modules.setdefault('openai', types.ModuleType('openai'))
|
7
7
|
sys.modules.setdefault('docker', types.ModuleType('docker'))
|
8
|
+
|
9
|
+
# --- Início da correção ---
|
8
10
|
rich_mod = types.ModuleType('rich')
|
9
11
|
console_mod = types.ModuleType('console')
|
10
|
-
console_mod.Console = lambda *a, **k: None
|
11
12
|
panel_mod = types.ModuleType('panel')
|
13
|
+
markdown_mod = types.ModuleType('markdown') # Novo mock para rich.markdown
|
14
|
+
syntax_mod = types.ModuleType('syntax') # Novo mock para rich.syntax
|
15
|
+
|
16
|
+
console_mod.Console = lambda *a, **k: None
|
12
17
|
panel_mod.Panel = lambda *a, **k: None
|
18
|
+
markdown_mod.Markdown = lambda *a, **k: None # Mock para rich.markdown.Markdown
|
19
|
+
syntax_mod.Syntax = lambda *a, **k: None # Mock para rich.syntax.Syntax
|
20
|
+
|
13
21
|
sys.modules.setdefault('rich', rich_mod)
|
14
22
|
sys.modules.setdefault('rich.console', console_mod)
|
15
23
|
sys.modules.setdefault('rich.panel', panel_mod)
|
24
|
+
sys.modules.setdefault('rich.markdown', markdown_mod)
|
25
|
+
sys.modules.setdefault('rich.syntax', syntax_mod)
|
16
26
|
|
17
27
|
def test_version_string():
|
18
28
|
pkg = importlib.import_module('pygent')
|
19
29
|
assert isinstance(pkg.__version__, str)
|
30
|
+
|
@@ -1,25 +0,0 @@
|
|
1
|
-
import os
|
2
|
-
import sys
|
3
|
-
import types
|
4
|
-
|
5
|
-
sys.modules.setdefault('openai', types.ModuleType('openai'))
|
6
|
-
sys.modules.setdefault('docker', types.ModuleType('docker'))
|
7
|
-
rich_mod = types.ModuleType('rich')
|
8
|
-
console_mod = types.ModuleType('console')
|
9
|
-
console_mod.Console = lambda *a, **k: None
|
10
|
-
panel_mod = types.ModuleType('panel')
|
11
|
-
panel_mod.Panel = lambda *a, **k: None
|
12
|
-
sys.modules.setdefault('rich', rich_mod)
|
13
|
-
sys.modules.setdefault('rich.console', console_mod)
|
14
|
-
sys.modules.setdefault('rich.panel', panel_mod)
|
15
|
-
|
16
|
-
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
17
|
-
|
18
|
-
from pygent.runtime import Runtime
|
19
|
-
|
20
|
-
|
21
|
-
def test_bash_includes_command():
|
22
|
-
rt = Runtime(use_docker=False)
|
23
|
-
out = rt.bash('echo hi')
|
24
|
-
rt.cleanup()
|
25
|
-
assert out.startswith('$ echo hi\n')
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|