zrb 1.13.1__py3-none-any.whl → 1.21.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- zrb/__init__.py +2 -6
- zrb/attr/type.py +8 -8
- zrb/builtin/__init__.py +2 -0
- zrb/builtin/group.py +31 -15
- zrb/builtin/http.py +7 -8
- zrb/builtin/llm/attachment.py +40 -0
- zrb/builtin/llm/chat_session.py +130 -144
- zrb/builtin/llm/chat_session_cmd.py +226 -0
- zrb/builtin/llm/chat_trigger.py +73 -0
- zrb/builtin/llm/history.py +4 -4
- zrb/builtin/llm/llm_ask.py +218 -110
- zrb/builtin/llm/tool/api.py +74 -62
- zrb/builtin/llm/tool/cli.py +35 -16
- zrb/builtin/llm/tool/code.py +49 -47
- zrb/builtin/llm/tool/file.py +262 -251
- zrb/builtin/llm/tool/note.py +84 -0
- zrb/builtin/llm/tool/rag.py +25 -18
- zrb/builtin/llm/tool/sub_agent.py +29 -22
- zrb/builtin/llm/tool/web.py +135 -143
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/entity/add_entity_util.py +7 -7
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/module/add_module_util.py +5 -5
- zrb/builtin/project/add/fastapp/fastapp_util.py +1 -1
- zrb/builtin/searxng/config/settings.yml +5671 -0
- zrb/builtin/searxng/start.py +21 -0
- zrb/builtin/setup/latex/ubuntu.py +1 -0
- zrb/builtin/setup/ubuntu.py +1 -1
- zrb/builtin/shell/autocomplete/bash.py +4 -3
- zrb/builtin/shell/autocomplete/zsh.py +4 -3
- zrb/config/config.py +255 -78
- zrb/config/default_prompt/file_extractor_system_prompt.md +109 -9
- zrb/config/default_prompt/interactive_system_prompt.md +24 -30
- zrb/config/default_prompt/persona.md +1 -1
- zrb/config/default_prompt/repo_extractor_system_prompt.md +31 -31
- zrb/config/default_prompt/repo_summarizer_system_prompt.md +27 -8
- zrb/config/default_prompt/summarization_prompt.md +8 -13
- zrb/config/default_prompt/system_prompt.md +36 -30
- zrb/config/llm_config.py +129 -24
- zrb/config/llm_context/config.py +127 -90
- zrb/config/llm_context/config_parser.py +1 -7
- zrb/config/llm_context/workflow.py +81 -0
- zrb/config/llm_rate_limitter.py +89 -45
- zrb/context/any_shared_context.py +7 -1
- zrb/context/context.py +8 -2
- zrb/context/shared_context.py +6 -8
- zrb/group/any_group.py +12 -5
- zrb/group/group.py +67 -3
- zrb/input/any_input.py +5 -1
- zrb/input/base_input.py +18 -6
- zrb/input/text_input.py +7 -24
- zrb/runner/cli.py +21 -20
- zrb/runner/common_util.py +24 -19
- zrb/runner/web_route/task_input_api_route.py +5 -5
- zrb/runner/web_route/task_session_api_route.py +1 -4
- zrb/runner/web_util/user.py +7 -3
- zrb/session/any_session.py +12 -6
- zrb/session/session.py +39 -18
- zrb/task/any_task.py +24 -3
- zrb/task/base/context.py +17 -9
- zrb/task/base/execution.py +15 -8
- zrb/task/base/lifecycle.py +8 -4
- zrb/task/base/monitoring.py +12 -7
- zrb/task/base_task.py +69 -5
- zrb/task/base_trigger.py +12 -5
- zrb/task/llm/agent.py +138 -52
- zrb/task/llm/config.py +45 -13
- zrb/task/llm/conversation_history.py +76 -6
- zrb/task/llm/conversation_history_model.py +0 -168
- zrb/task/llm/default_workflow/coding/workflow.md +41 -0
- zrb/task/llm/default_workflow/copywriting/workflow.md +68 -0
- zrb/task/llm/default_workflow/git/workflow.md +118 -0
- zrb/task/llm/default_workflow/golang/workflow.md +128 -0
- zrb/task/llm/default_workflow/html-css/workflow.md +135 -0
- zrb/task/llm/default_workflow/java/workflow.md +146 -0
- zrb/task/llm/default_workflow/javascript/workflow.md +158 -0
- zrb/task/llm/default_workflow/python/workflow.md +160 -0
- zrb/task/llm/default_workflow/researching/workflow.md +153 -0
- zrb/task/llm/default_workflow/rust/workflow.md +162 -0
- zrb/task/llm/default_workflow/shell/workflow.md +299 -0
- zrb/task/llm/file_replacement.py +206 -0
- zrb/task/llm/file_tool_model.py +57 -0
- zrb/task/llm/history_summarization.py +22 -35
- zrb/task/llm/history_summarization_tool.py +24 -0
- zrb/task/llm/print_node.py +182 -63
- zrb/task/llm/prompt.py +213 -153
- zrb/task/llm/tool_wrapper.py +210 -53
- zrb/task/llm/workflow.py +76 -0
- zrb/task/llm_task.py +98 -47
- zrb/task/make_task.py +2 -3
- zrb/task/rsync_task.py +25 -10
- zrb/task/scheduler.py +4 -4
- zrb/util/attr.py +50 -40
- zrb/util/cli/markdown.py +12 -0
- zrb/util/cli/text.py +30 -0
- zrb/util/file.py +27 -11
- zrb/util/{llm/prompt.py → markdown.py} +2 -3
- zrb/util/string/conversion.py +1 -1
- zrb/util/truncate.py +23 -0
- zrb/util/yaml.py +204 -0
- {zrb-1.13.1.dist-info → zrb-1.21.17.dist-info}/METADATA +40 -20
- {zrb-1.13.1.dist-info → zrb-1.21.17.dist-info}/RECORD +102 -79
- {zrb-1.13.1.dist-info → zrb-1.21.17.dist-info}/WHEEL +1 -1
- zrb/task/llm/default_workflow/coding.md +0 -24
- zrb/task/llm/default_workflow/copywriting.md +0 -17
- zrb/task/llm/default_workflow/researching.md +0 -18
- {zrb-1.13.1.dist-info → zrb-1.21.17.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
import os
|
|
2
|
+
|
|
3
|
+
from zrb.config.llm_context.config import llm_context_config
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def read_long_term_note() -> str:
|
|
7
|
+
"""
|
|
8
|
+
Retrieves the GLOBAL long-term memory shared across ALL sessions and projects.
|
|
9
|
+
|
|
10
|
+
CRITICAL: Consult this first for user preferences, facts, and cross-project context.
|
|
11
|
+
|
|
12
|
+
Returns:
|
|
13
|
+
str: The current global note content.
|
|
14
|
+
"""
|
|
15
|
+
contexts = llm_context_config.get_notes()
|
|
16
|
+
return contexts.get("/", "")
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def write_long_term_note(content: str) -> str:
|
|
20
|
+
"""
|
|
21
|
+
Persists CRITICAL facts to the GLOBAL long-term memory.
|
|
22
|
+
|
|
23
|
+
USE EAGERLY to save:
|
|
24
|
+
- User preferences (e.g., "I prefer Python", "No unit tests").
|
|
25
|
+
- User information (e.g., user name, user email address).
|
|
26
|
+
- Important facts (e.g., "My API key is in .env").
|
|
27
|
+
- Cross-project goals.
|
|
28
|
+
- Anything that will be useful for future interaction across projects.
|
|
29
|
+
|
|
30
|
+
WARNING: This OVERWRITES the entire global note. Always read first.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
content (str): The text to strictly memorize.
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
str: Confirmation message.
|
|
37
|
+
"""
|
|
38
|
+
llm_context_config.write_note(content, "/")
|
|
39
|
+
return "Global long-term note saved."
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def read_contextual_note(path: str | None = None) -> str:
|
|
43
|
+
"""
|
|
44
|
+
Retrieves LOCAL memory specific to a file or directory path.
|
|
45
|
+
|
|
46
|
+
Use to recall project-specific architecture, code summaries, or past decisions
|
|
47
|
+
relevant to the current working location.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
path (str | None): Target file/dir. Defaults to current working directory (CWD).
|
|
51
|
+
|
|
52
|
+
Returns:
|
|
53
|
+
str: The local note content for the path.
|
|
54
|
+
"""
|
|
55
|
+
if path is None:
|
|
56
|
+
path = os.getcwd()
|
|
57
|
+
abs_path = os.path.abspath(path)
|
|
58
|
+
contexts = llm_context_config.get_notes(cwd=abs_path)
|
|
59
|
+
return contexts.get(abs_path, "")
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def write_contextual_note(content: str, path: str | None = None) -> str:
|
|
63
|
+
"""
|
|
64
|
+
Persists LOCAL facts specific to a file or directory.
|
|
65
|
+
|
|
66
|
+
USE EAGERLY to save:
|
|
67
|
+
- Architectural patterns for this project/directory.
|
|
68
|
+
- Summaries of large files or directories.
|
|
69
|
+
- Specific guidelines for this project.
|
|
70
|
+
- Anything related to this directory that will be useful for future interaction.
|
|
71
|
+
|
|
72
|
+
WARNING: This OVERWRITES the note for the specific path. Always read first.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
content (str): The text to memorize for this location.
|
|
76
|
+
path (str | None): Target file/dir. Defaults to CWD.
|
|
77
|
+
|
|
78
|
+
Returns:
|
|
79
|
+
str: Confirmation message.
|
|
80
|
+
"""
|
|
81
|
+
if path is None:
|
|
82
|
+
path = os.getcwd()
|
|
83
|
+
llm_context_config.write_note(content, path)
|
|
84
|
+
return f"Contextual note saved for: {path}"
|
zrb/builtin/llm/tool/rag.py
CHANGED
|
@@ -5,6 +5,7 @@ import os
|
|
|
5
5
|
import sys
|
|
6
6
|
from collections.abc import Callable
|
|
7
7
|
from textwrap import dedent
|
|
8
|
+
from typing import Any
|
|
8
9
|
|
|
9
10
|
import ulid
|
|
10
11
|
|
|
@@ -44,35 +45,40 @@ def create_rag_from_directory(
|
|
|
44
45
|
openai_embedding_model: str | None = None,
|
|
45
46
|
):
|
|
46
47
|
"""
|
|
47
|
-
|
|
48
|
+
Create a powerful RAG (Retrieval-Augmented Generation) tool for querying a local
|
|
49
|
+
knowledge base.
|
|
48
50
|
|
|
49
|
-
This factory function generates a tool that
|
|
51
|
+
This factory function generates a tool that performs semantic search over a directory of
|
|
52
|
+
documents. It automatically indexes the documents into a vector database (ChromaDB) and
|
|
53
|
+
keeps it updated as files change.
|
|
50
54
|
|
|
51
|
-
The
|
|
52
|
-
|
|
53
|
-
2. Automatically update a vector database (ChromaDB) with the latest content.
|
|
54
|
-
3. Accept a user query, embed it, and perform a similarity search against the document vectors.
|
|
55
|
-
4. Return the most relevant document chunks that match the query.
|
|
55
|
+
The generated tool is ideal for answering questions based on a specific set of documents,
|
|
56
|
+
such as project documentation or internal wikis.
|
|
56
57
|
|
|
57
58
|
Args:
|
|
58
59
|
tool_name (str): The name for the generated RAG tool (e.g., "search_project_docs").
|
|
59
|
-
tool_description (str): A clear description of what the
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
60
|
+
tool_description (str): A clear description of what the tool does and when to use it.
|
|
61
|
+
This is what the LLM will see.
|
|
62
|
+
document_dir_path (str, optional): The path to the directory containing the documents
|
|
63
|
+
to be indexed.
|
|
64
|
+
vector_db_path (str, optional): The path where the ChromaDB vector database will be
|
|
65
|
+
stored.
|
|
66
|
+
vector_db_collection (str, optional): The name of the collection within the vector
|
|
67
|
+
database.
|
|
63
68
|
chunk_size (int, optional): The size of text chunks for embedding.
|
|
64
69
|
overlap (int, optional): The overlap between text chunks.
|
|
65
70
|
max_result_count (int, optional): The maximum number of search results to return.
|
|
66
|
-
file_reader (list[RAGFileReader], optional):
|
|
67
|
-
|
|
68
|
-
|
|
71
|
+
file_reader (list[RAGFileReader], optional): A list of custom file readers for
|
|
72
|
+
specific file types.
|
|
73
|
+
openai_api_key (str, optional): Your OpenAI API key for generating embeddings.
|
|
74
|
+
openai_base_url (str, optional): An optional base URL for the OpenAI API.
|
|
69
75
|
openai_embedding_model (str, optional): The embedding model to use.
|
|
70
76
|
|
|
71
77
|
Returns:
|
|
72
|
-
|
|
78
|
+
An asynchronous function that serves as the RAG tool.
|
|
73
79
|
"""
|
|
74
80
|
|
|
75
|
-
async def retrieve(query: str) -> str:
|
|
81
|
+
async def retrieve(query: str) -> dict[str, Any]:
|
|
76
82
|
# Docstring will be set dynamically below
|
|
77
83
|
from chromadb import PersistentClient
|
|
78
84
|
from chromadb.config import Settings
|
|
@@ -187,7 +193,7 @@ def create_rag_from_directory(
|
|
|
187
193
|
query_embeddings=query_vector,
|
|
188
194
|
n_results=max_result_count_val,
|
|
189
195
|
)
|
|
190
|
-
return
|
|
196
|
+
return dict(results)
|
|
191
197
|
|
|
192
198
|
retrieve.__name__ = tool_name
|
|
193
199
|
retrieve.__doc__ = dedent(
|
|
@@ -196,7 +202,8 @@ def create_rag_from_directory(
|
|
|
196
202
|
Args:
|
|
197
203
|
query (str): The user query to search for in documents.
|
|
198
204
|
Returns:
|
|
199
|
-
str:
|
|
205
|
+
dict[str, Any]: dictionary with search results:
|
|
206
|
+
{{"ids": [...], "documents": [...], ...}}
|
|
200
207
|
"""
|
|
201
208
|
).strip()
|
|
202
209
|
return retrieve
|
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
import json
|
|
2
1
|
from collections.abc import Callable
|
|
3
2
|
from textwrap import dedent
|
|
4
3
|
from typing import TYPE_CHECKING, Any, Coroutine
|
|
@@ -9,14 +8,12 @@ from zrb.task.llm.config import get_model, get_model_settings
|
|
|
9
8
|
from zrb.task.llm.prompt import get_system_and_user_prompt
|
|
10
9
|
|
|
11
10
|
if TYPE_CHECKING:
|
|
12
|
-
from pydantic_ai import
|
|
11
|
+
from pydantic_ai import Tool
|
|
13
12
|
from pydantic_ai.models import Model
|
|
14
13
|
from pydantic_ai.settings import ModelSettings
|
|
15
14
|
from pydantic_ai.toolsets import AbstractToolset
|
|
16
15
|
|
|
17
16
|
ToolOrCallable = Tool | Callable
|
|
18
|
-
else:
|
|
19
|
-
ToolOrCallable = Any
|
|
20
17
|
|
|
21
18
|
|
|
22
19
|
def create_sub_agent_tool(
|
|
@@ -25,30 +22,38 @@ def create_sub_agent_tool(
|
|
|
25
22
|
system_prompt: str | None = None,
|
|
26
23
|
model: "str | Model | None" = None,
|
|
27
24
|
model_settings: "ModelSettings | None" = None,
|
|
28
|
-
tools: list[ToolOrCallable] = [],
|
|
29
|
-
toolsets: list["AbstractToolset[
|
|
30
|
-
|
|
25
|
+
tools: "list[ToolOrCallable]" = [],
|
|
26
|
+
toolsets: list["AbstractToolset[None]"] = [],
|
|
27
|
+
yolo_mode: bool | list[str] | None = None,
|
|
28
|
+
log_indent_level: int = 2,
|
|
29
|
+
) -> Callable[[AnyContext, str], Coroutine[Any, Any, dict[str, Any]]]:
|
|
31
30
|
"""
|
|
32
|
-
|
|
31
|
+
Create a tool that is another AI agent, capable of handling complex, multi-step sub-tasks.
|
|
33
32
|
|
|
34
|
-
This
|
|
33
|
+
This factory function generates a tool that, when used, spins up a temporary, specialized
|
|
34
|
+
AI agent. This "sub-agent" has its own system prompt, tools, and context, allowing it to
|
|
35
|
+
focus on accomplishing a specific task without being distracted by the main conversation.
|
|
35
36
|
|
|
36
37
|
This is ideal for delegating complex tasks like analyzing a file or a repository.
|
|
37
38
|
|
|
38
39
|
Args:
|
|
39
40
|
tool_name (str): The name for the generated sub-agent tool.
|
|
40
|
-
tool_description (str): A clear description of the sub-agent's purpose and when to
|
|
41
|
-
|
|
41
|
+
tool_description (str): A clear description of the sub-agent's purpose and when to
|
|
42
|
+
use it. This is what the LLM will see.
|
|
43
|
+
system_prompt (str, optional): The system prompt that will guide the sub-agent's
|
|
44
|
+
behavior.
|
|
42
45
|
model (str | Model, optional): The language model the sub-agent will use.
|
|
43
46
|
model_settings (ModelSettings, optional): Specific settings for the sub-agent's model.
|
|
44
|
-
tools (list, optional): A list of tools that will be exclusively available to the
|
|
45
|
-
|
|
47
|
+
tools (list, optional): A list of tools that will be exclusively available to the
|
|
48
|
+
sub-agent.
|
|
49
|
+
toolsets (list, optional): A list of Toolsets for the sub-agent.
|
|
46
50
|
|
|
47
51
|
Returns:
|
|
48
|
-
|
|
52
|
+
An asynchronous function that serves as the sub-agent tool. When called, it runs the
|
|
53
|
+
sub-agent with a given query and returns its final result.
|
|
49
54
|
"""
|
|
50
55
|
|
|
51
|
-
async def run_sub_agent(ctx: AnyContext, query: str) -> str:
|
|
56
|
+
async def run_sub_agent(ctx: AnyContext, query: str) -> dict[str, Any]:
|
|
52
57
|
"""
|
|
53
58
|
Runs the sub-agent with the given query.
|
|
54
59
|
"""
|
|
@@ -86,6 +91,7 @@ def create_sub_agent_tool(
|
|
|
86
91
|
model_settings=resolved_model_settings,
|
|
87
92
|
tools=tools,
|
|
88
93
|
toolsets=toolsets,
|
|
94
|
+
yolo_mode=yolo_mode,
|
|
89
95
|
)
|
|
90
96
|
|
|
91
97
|
sub_agent_run = None
|
|
@@ -95,16 +101,17 @@ def create_sub_agent_tool(
|
|
|
95
101
|
ctx=ctx,
|
|
96
102
|
agent=sub_agent_agent,
|
|
97
103
|
user_prompt=query,
|
|
98
|
-
|
|
104
|
+
attachments=[],
|
|
105
|
+
history_list=[],
|
|
106
|
+
log_indent_level=log_indent_level,
|
|
99
107
|
)
|
|
100
108
|
|
|
101
109
|
# Return the sub-agent's final message content
|
|
102
110
|
if sub_agent_run and sub_agent_run.result:
|
|
103
|
-
# Return the final message content
|
|
104
|
-
return
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
return "Sub-agent failed to produce a result."
|
|
111
|
+
# Return the final message content
|
|
112
|
+
return {"result": sub_agent_run.result.output}
|
|
113
|
+
ctx.log_warning("Sub-agent run did not produce a result.")
|
|
114
|
+
raise ValueError(f"{tool_name} not returning any result")
|
|
108
115
|
|
|
109
116
|
# Set the name and docstring for the callable function
|
|
110
117
|
run_sub_agent.__name__ = tool_name
|
|
@@ -116,7 +123,7 @@ def create_sub_agent_tool(
|
|
|
116
123
|
query (str): The query or task for the sub-agent.
|
|
117
124
|
|
|
118
125
|
Returns:
|
|
119
|
-
str: The final response or result from the sub-agent.
|
|
126
|
+
dict[str, Any]: The final response or result from the sub-agent.
|
|
120
127
|
"""
|
|
121
128
|
).strip()
|
|
122
129
|
|
zrb/builtin/llm/tool/web.py
CHANGED
|
@@ -1,179 +1,171 @@
|
|
|
1
|
-
import json
|
|
2
1
|
from collections.abc import Callable
|
|
2
|
+
from typing import Any
|
|
3
|
+
from urllib.parse import urljoin
|
|
3
4
|
|
|
5
|
+
from zrb.config.config import CFG
|
|
6
|
+
from zrb.config.llm_config import llm_config
|
|
4
7
|
|
|
5
|
-
|
|
6
|
-
"""
|
|
7
|
-
Fetches and parses the textual content of a given web page URL.
|
|
8
|
-
|
|
9
|
-
Use this tool to "read" a web page. It strips away HTML tags, scripts, and other non-textual elements to provide the clean text content. It also extracts any hyperlinks found on the page. This is useful when you need to understand the content of a specific URL that you have discovered through a search or from another source.
|
|
8
|
+
_DEFAULT_USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" # noqa
|
|
10
9
|
|
|
11
|
-
Args:
|
|
12
|
-
url (str): The full URL of the web page to open (e.g., "https://example.com/article").
|
|
13
10
|
|
|
14
|
-
|
|
15
|
-
str: A JSON object containing the cleaned text `content` of the page and a list of `links_on_page`.
|
|
11
|
+
async def open_web_page(url: str) -> dict[str, Any]:
|
|
16
12
|
"""
|
|
13
|
+
Fetches, parses, and converts a web page to readable Markdown.
|
|
14
|
+
Preserves semantic structure, removes non-essentials, and extracts all absolute links.
|
|
17
15
|
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
try:
|
|
21
|
-
from playwright.async_api import async_playwright
|
|
22
|
-
|
|
23
|
-
async with async_playwright() as p:
|
|
24
|
-
browser = await p.chromium.launch(headless=True)
|
|
25
|
-
page = await browser.new_page()
|
|
26
|
-
await page.set_extra_http_headers({"User-Agent": user_agent})
|
|
27
|
-
try:
|
|
28
|
-
# Navigate to the URL with a timeout of 30 seconds
|
|
29
|
-
await page.goto(page_url, wait_until="networkidle", timeout=30000)
|
|
30
|
-
# Wait for the content to load
|
|
31
|
-
await page.wait_for_load_state("domcontentloaded")
|
|
32
|
-
# Get the page content
|
|
33
|
-
content = await page.content()
|
|
34
|
-
# Extract all links from the page
|
|
35
|
-
links = await page.eval_on_selector_all(
|
|
36
|
-
"a[href]",
|
|
37
|
-
"""
|
|
38
|
-
(elements) => elements.map(el => {
|
|
39
|
-
const href = el.getAttribute('href');
|
|
40
|
-
if (href && !href.startsWith('#') && !href.startsWith('/')) {
|
|
41
|
-
return href;
|
|
42
|
-
}
|
|
43
|
-
return null;
|
|
44
|
-
}).filter(href => href !== null)
|
|
45
|
-
""",
|
|
46
|
-
)
|
|
47
|
-
return {"content": content, "links_on_page": links}
|
|
48
|
-
finally:
|
|
49
|
-
await browser.close()
|
|
50
|
-
except BaseException:
|
|
51
|
-
import requests
|
|
52
|
-
|
|
53
|
-
response = requests.get(url, headers={"User-Agent": user_agent})
|
|
54
|
-
if response.status_code != 200:
|
|
55
|
-
msg = f"Unable to retrieve search results. Status code: {response.status_code}"
|
|
56
|
-
raise Exception(msg)
|
|
57
|
-
return {"content": response.text, "links_on_page": []}
|
|
58
|
-
|
|
59
|
-
result = await get_page_content(url)
|
|
60
|
-
# Parse the HTML content
|
|
61
|
-
return json.dumps(parse_html_text(result["content"]))
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
def create_search_internet_tool(serp_api_key: str) -> Callable[[str, int], str]:
|
|
65
|
-
"""
|
|
66
|
-
Creates a tool that searches the internet using the SerpAPI Google Search API.
|
|
67
|
-
|
|
68
|
-
This factory returns a function that can be used to find information on the web. The generated tool is the primary way to answer general knowledge questions or to find information on topics you are unfamiliar with.
|
|
16
|
+
Example:
|
|
17
|
+
open_web_page(url='https://www.example.com/article')
|
|
69
18
|
|
|
70
19
|
Args:
|
|
71
|
-
|
|
20
|
+
url (str): The full URL of the web page.
|
|
72
21
|
|
|
73
22
|
Returns:
|
|
74
|
-
|
|
23
|
+
dict: Markdown content and a list of absolute links.
|
|
75
24
|
"""
|
|
25
|
+
html_content, links = await _fetch_page_content(url)
|
|
26
|
+
markdown_content = _convert_html_to_markdown(html_content)
|
|
27
|
+
return {"content": markdown_content, "links_on_page": links}
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def create_search_internet_tool() -> Callable:
|
|
31
|
+
if llm_config.default_search_internet_tool is not None:
|
|
32
|
+
return llm_config.default_search_internet_tool
|
|
76
33
|
|
|
77
|
-
def search_internet(query: str,
|
|
34
|
+
def search_internet(query: str, page: int = 1) -> dict[str, Any]:
|
|
78
35
|
"""
|
|
79
|
-
Performs an internet search using
|
|
36
|
+
Performs an internet search using a search engine.
|
|
37
|
+
Use to find information, answer general knowledge, or research topics.
|
|
80
38
|
|
|
81
|
-
|
|
39
|
+
Example:
|
|
40
|
+
search_internet(query='latest AI advancements', page=1)
|
|
82
41
|
|
|
83
42
|
Args:
|
|
84
43
|
query (str): The search query.
|
|
85
|
-
|
|
44
|
+
page (int, optional): Search result page number. Defaults to 1.
|
|
86
45
|
|
|
87
46
|
Returns:
|
|
88
|
-
|
|
47
|
+
dict: Summary of search results (titles, links, snippets).
|
|
89
48
|
"""
|
|
90
49
|
import requests
|
|
91
50
|
|
|
92
|
-
|
|
93
|
-
"
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
"
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
51
|
+
if (
|
|
52
|
+
CFG.SEARCH_INTERNET_METHOD.strip().lower() == "serpapi"
|
|
53
|
+
and CFG.SERPAPI_KEY != ""
|
|
54
|
+
):
|
|
55
|
+
response = requests.get(
|
|
56
|
+
"https://serpapi.com/search",
|
|
57
|
+
headers={"User-Agent": _DEFAULT_USER_AGENT},
|
|
58
|
+
params={
|
|
59
|
+
"q": query,
|
|
60
|
+
"start": (page - 1) * 10,
|
|
61
|
+
"hl": CFG.SERPAPI_LANG,
|
|
62
|
+
"safe": CFG.SERPAPI_SAFE,
|
|
63
|
+
"api_key": CFG.SERPAPI_KEY,
|
|
64
|
+
},
|
|
65
|
+
)
|
|
66
|
+
elif (
|
|
67
|
+
CFG.SEARCH_INTERNET_METHOD.strip().lower() == "brave"
|
|
68
|
+
and CFG.BRAVE_API_KEY != ""
|
|
69
|
+
):
|
|
70
|
+
response = requests.get(
|
|
71
|
+
"https://api.search.brave.com/res/v1/web/search",
|
|
72
|
+
headers={
|
|
73
|
+
"User-Agent": _DEFAULT_USER_AGENT,
|
|
74
|
+
"Accept": "application/json",
|
|
75
|
+
"x-subscription-token": CFG.BRAVE_API_KEY,
|
|
76
|
+
},
|
|
77
|
+
params={
|
|
78
|
+
"q": query,
|
|
79
|
+
"count": "10",
|
|
80
|
+
"offset": (page - 1) * 10,
|
|
81
|
+
"safesearch": CFG.BRAVE_API_SAFE,
|
|
82
|
+
"search_lang": CFG.BRAVE_API_LANG,
|
|
83
|
+
"summary": "true",
|
|
84
|
+
},
|
|
85
|
+
)
|
|
86
|
+
else:
|
|
87
|
+
response = requests.get(
|
|
88
|
+
url=f"{CFG.SEARXNG_BASE_URL}/search",
|
|
89
|
+
headers={"User-Agent": _DEFAULT_USER_AGENT},
|
|
90
|
+
params={
|
|
91
|
+
"q": query,
|
|
92
|
+
"format": "json",
|
|
93
|
+
"pageno": page,
|
|
94
|
+
"safesearch": CFG.SEARXNG_SAFE,
|
|
95
|
+
"language": CFG.SEARXNG_LANG,
|
|
96
|
+
},
|
|
97
|
+
)
|
|
105
98
|
if response.status_code != 200:
|
|
106
99
|
raise Exception(
|
|
107
100
|
f"Error: Unable to retrieve search results (status code: {response.status_code})" # noqa
|
|
108
101
|
)
|
|
109
|
-
return json
|
|
102
|
+
return response.json()
|
|
110
103
|
|
|
111
104
|
return search_internet
|
|
112
105
|
|
|
113
106
|
|
|
114
|
-
def
|
|
115
|
-
"""
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
response = requests.get("http://export.arxiv.org/api/query", params=params)
|
|
150
|
-
return response.content
|
|
151
|
-
|
|
107
|
+
async def _fetch_page_content(url: str) -> tuple[str, list[str]]:
|
|
108
|
+
"""Fetches the HTML content and all absolute links from a URL."""
|
|
109
|
+
try:
|
|
110
|
+
from playwright.async_api import async_playwright
|
|
111
|
+
|
|
112
|
+
async with async_playwright() as p:
|
|
113
|
+
browser = await p.chromium.launch(headless=True)
|
|
114
|
+
page = await browser.new_page()
|
|
115
|
+
await page.set_extra_http_headers({"User-Agent": _DEFAULT_USER_AGENT})
|
|
116
|
+
try:
|
|
117
|
+
await page.goto(url, wait_until="networkidle", timeout=30000)
|
|
118
|
+
await page.wait_for_load_state("domcontentloaded")
|
|
119
|
+
content = await page.content()
|
|
120
|
+
links = await page.eval_on_selector_all(
|
|
121
|
+
"a[href]",
|
|
122
|
+
"""
|
|
123
|
+
(elements, baseUrl) => elements.map(el => {
|
|
124
|
+
const href = el.getAttribute('href');
|
|
125
|
+
if (!href || href.startsWith('#')) return null;
|
|
126
|
+
try {
|
|
127
|
+
return new URL(href, baseUrl).href;
|
|
128
|
+
} catch (e) {
|
|
129
|
+
return null;
|
|
130
|
+
}
|
|
131
|
+
}).filter(href => href !== null)
|
|
132
|
+
""",
|
|
133
|
+
url,
|
|
134
|
+
)
|
|
135
|
+
return content, links
|
|
136
|
+
# return json.dumps({"content": content, "links": links})
|
|
137
|
+
finally:
|
|
138
|
+
await browser.close()
|
|
139
|
+
except Exception:
|
|
140
|
+
import requests
|
|
141
|
+
from bs4 import BeautifulSoup
|
|
152
142
|
|
|
153
|
-
|
|
143
|
+
response = requests.get(url, headers={"User-Agent": _DEFAULT_USER_AGENT})
|
|
144
|
+
if response.status_code != 200:
|
|
145
|
+
raise Exception(
|
|
146
|
+
f"Unable to retrieve page content. Status code: {response.status_code}"
|
|
147
|
+
)
|
|
148
|
+
content = response.text
|
|
149
|
+
soup = BeautifulSoup(content, "html.parser")
|
|
150
|
+
links = [
|
|
151
|
+
urljoin(url, a["href"])
|
|
152
|
+
for a in soup.find_all("a", href=True)
|
|
153
|
+
if not a["href"].startswith("#")
|
|
154
|
+
]
|
|
155
|
+
return content, links
|
|
156
|
+
# return json.dumps({"content": content, "links": links})
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
def _convert_html_to_markdown(html_text: str) -> str:
|
|
160
|
+
"""Converts HTML content to a clean Markdown representation."""
|
|
154
161
|
from bs4 import BeautifulSoup
|
|
162
|
+
from markdownify import markdownify as md
|
|
155
163
|
|
|
156
|
-
ignored_tags = [
|
|
157
|
-
"script",
|
|
158
|
-
"link",
|
|
159
|
-
"meta",
|
|
160
|
-
"style",
|
|
161
|
-
"code",
|
|
162
|
-
"footer",
|
|
163
|
-
"nav",
|
|
164
|
-
"header",
|
|
165
|
-
"aside",
|
|
166
|
-
]
|
|
167
164
|
soup = BeautifulSoup(html_text, "html.parser")
|
|
168
|
-
|
|
169
|
-
for
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
link: str = anchor["href"]
|
|
173
|
-
if link.startswith("#") or link.startswith("/"):
|
|
174
|
-
continue
|
|
175
|
-
links.append(link)
|
|
176
|
-
for tag in soup(ignored_tags):
|
|
165
|
+
# Remove non-content tags
|
|
166
|
+
for tag in soup(
|
|
167
|
+
["script", "link", "meta", "style", "header", "footer", "nav", "aside"]
|
|
168
|
+
):
|
|
177
169
|
tag.decompose()
|
|
178
|
-
|
|
179
|
-
return
|
|
170
|
+
# Convert the cleaned HTML to Markdown
|
|
171
|
+
return md(str(soup))
|
|
@@ -204,7 +204,7 @@ def update_migration_metadata_file(ctx: AnyContext, migration_metadata_file_path
|
|
|
204
204
|
app_name = os.path.basename(APP_DIR)
|
|
205
205
|
existing_migration_metadata_code = read_file(migration_metadata_file_path)
|
|
206
206
|
write_file(
|
|
207
|
-
|
|
207
|
+
abs_file_path=migration_metadata_file_path,
|
|
208
208
|
content=[
|
|
209
209
|
_get_migration_import_schema_code(
|
|
210
210
|
existing_migration_metadata_code, app_name, ctx.input.entity
|
|
@@ -251,7 +251,7 @@ def update_client_file(ctx: AnyContext, client_file_path: str):
|
|
|
251
251
|
snake_plural_entity_name = to_snake_case(ctx.input.plural)
|
|
252
252
|
pascal_entity_name = to_pascal_case(ctx.input.entity)
|
|
253
253
|
write_file(
|
|
254
|
-
|
|
254
|
+
abs_file_path=client_file_path,
|
|
255
255
|
content=[
|
|
256
256
|
_get_import_schema_for_client_code(
|
|
257
257
|
existing_code=existing_client_code, entity_name=ctx.input.entity
|
|
@@ -305,7 +305,7 @@ def update_api_client_file(ctx: AnyContext, api_client_file_path: str):
|
|
|
305
305
|
snake_module_name = to_snake_case(ctx.input.module)
|
|
306
306
|
pascal_module_name = to_pascal_case(ctx.input.module)
|
|
307
307
|
write_file(
|
|
308
|
-
|
|
308
|
+
abs_file_path=api_client_file_path,
|
|
309
309
|
content=[
|
|
310
310
|
f"from {app_name}.module.{snake_module_name}.service.{snake_entity_name}.{snake_entity_name}_service_factory import {snake_entity_name}_service", # noqa
|
|
311
311
|
prepend_code_to_module(
|
|
@@ -327,7 +327,7 @@ def update_direct_client_file(ctx: AnyContext, direct_client_file_path: str):
|
|
|
327
327
|
snake_module_name = to_snake_case(ctx.input.module)
|
|
328
328
|
pascal_module_name = to_pascal_case(ctx.input.module)
|
|
329
329
|
write_file(
|
|
330
|
-
|
|
330
|
+
abs_file_path=direct_client_file_path,
|
|
331
331
|
content=[
|
|
332
332
|
f"from {app_name}.module.{snake_module_name}.service.{snake_entity_name}.{snake_entity_name}_service_factory import {snake_entity_name}_service", # noqa
|
|
333
333
|
prepend_code_to_module(
|
|
@@ -348,7 +348,7 @@ def update_route_file(ctx: AnyContext, route_file_path: str):
|
|
|
348
348
|
app_name = os.path.basename(APP_DIR)
|
|
349
349
|
module_name = to_snake_case(ctx.input.module)
|
|
350
350
|
write_file(
|
|
351
|
-
|
|
351
|
+
abs_file_path=route_file_path,
|
|
352
352
|
content=[
|
|
353
353
|
f"from {app_name}.module.{module_name}.service.{entity_name}.{entity_name}_service_factory import {entity_name}_service", # noqa
|
|
354
354
|
append_code_to_function(
|
|
@@ -370,7 +370,7 @@ def update_gateway_subroute_file(ctx: AnyContext, module_gateway_subroute_path:
|
|
|
370
370
|
pascal_entity_name = to_pascal_case(ctx.input.entity)
|
|
371
371
|
existing_gateway_subroute_code = read_file(module_gateway_subroute_path)
|
|
372
372
|
write_file(
|
|
373
|
-
|
|
373
|
+
abs_file_path=module_gateway_subroute_path,
|
|
374
374
|
content=[
|
|
375
375
|
_get_import_client_for_gateway_subroute_code(
|
|
376
376
|
existing_gateway_subroute_code, module_name=ctx.input.module
|
|
@@ -456,7 +456,7 @@ def update_gateway_navigation_config_file(
|
|
|
456
456
|
},
|
|
457
457
|
).strip()
|
|
458
458
|
write_file(
|
|
459
|
-
|
|
459
|
+
abs_file_path=gateway_navigation_config_file_path,
|
|
460
460
|
content=[
|
|
461
461
|
existing_gateway_navigation_config_code,
|
|
462
462
|
new_navigation_config_code,
|