zrb 1.13.1__py3-none-any.whl → 1.21.33__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- zrb/__init__.py +2 -6
- zrb/attr/type.py +10 -7
- zrb/builtin/__init__.py +2 -0
- zrb/builtin/git.py +12 -1
- zrb/builtin/group.py +31 -15
- zrb/builtin/http.py +7 -8
- zrb/builtin/llm/attachment.py +40 -0
- zrb/builtin/llm/chat_completion.py +287 -0
- zrb/builtin/llm/chat_session.py +130 -144
- zrb/builtin/llm/chat_session_cmd.py +288 -0
- zrb/builtin/llm/chat_trigger.py +78 -0
- zrb/builtin/llm/history.py +4 -4
- zrb/builtin/llm/llm_ask.py +218 -110
- zrb/builtin/llm/tool/api.py +74 -62
- zrb/builtin/llm/tool/cli.py +56 -21
- zrb/builtin/llm/tool/code.py +57 -47
- zrb/builtin/llm/tool/file.py +292 -255
- zrb/builtin/llm/tool/note.py +84 -0
- zrb/builtin/llm/tool/rag.py +25 -18
- zrb/builtin/llm/tool/search/__init__.py +1 -0
- zrb/builtin/llm/tool/search/brave.py +66 -0
- zrb/builtin/llm/tool/search/searxng.py +61 -0
- zrb/builtin/llm/tool/search/serpapi.py +61 -0
- zrb/builtin/llm/tool/sub_agent.py +53 -26
- zrb/builtin/llm/tool/web.py +94 -157
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/entity/add_entity_util.py +7 -7
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/module/add_module_util.py +5 -5
- zrb/builtin/project/add/fastapp/fastapp_util.py +1 -1
- zrb/builtin/searxng/config/settings.yml +5671 -0
- zrb/builtin/searxng/start.py +21 -0
- zrb/builtin/setup/latex/ubuntu.py +1 -0
- zrb/builtin/setup/ubuntu.py +1 -1
- zrb/builtin/shell/autocomplete/bash.py +4 -3
- zrb/builtin/shell/autocomplete/zsh.py +4 -3
- zrb/config/config.py +297 -79
- zrb/config/default_prompt/file_extractor_system_prompt.md +109 -9
- zrb/config/default_prompt/interactive_system_prompt.md +25 -28
- zrb/config/default_prompt/persona.md +1 -1
- zrb/config/default_prompt/repo_extractor_system_prompt.md +31 -31
- zrb/config/default_prompt/repo_summarizer_system_prompt.md +27 -8
- zrb/config/default_prompt/summarization_prompt.md +57 -16
- zrb/config/default_prompt/system_prompt.md +29 -25
- zrb/config/llm_config.py +129 -24
- zrb/config/llm_context/config.py +127 -90
- zrb/config/llm_context/config_parser.py +1 -7
- zrb/config/llm_context/workflow.py +81 -0
- zrb/config/llm_rate_limitter.py +100 -47
- zrb/context/any_shared_context.py +7 -1
- zrb/context/context.py +8 -2
- zrb/context/shared_context.py +6 -8
- zrb/group/any_group.py +12 -5
- zrb/group/group.py +67 -3
- zrb/input/any_input.py +5 -1
- zrb/input/base_input.py +18 -6
- zrb/input/option_input.py +13 -1
- zrb/input/text_input.py +7 -24
- zrb/runner/cli.py +21 -20
- zrb/runner/common_util.py +24 -19
- zrb/runner/web_route/task_input_api_route.py +5 -5
- zrb/runner/web_route/task_session_api_route.py +1 -4
- zrb/runner/web_util/user.py +7 -3
- zrb/session/any_session.py +12 -6
- zrb/session/session.py +39 -18
- zrb/task/any_task.py +24 -3
- zrb/task/base/context.py +17 -9
- zrb/task/base/execution.py +15 -8
- zrb/task/base/lifecycle.py +8 -4
- zrb/task/base/monitoring.py +12 -7
- zrb/task/base_task.py +69 -5
- zrb/task/base_trigger.py +12 -5
- zrb/task/llm/agent.py +130 -145
- zrb/task/llm/agent_runner.py +152 -0
- zrb/task/llm/config.py +45 -13
- zrb/task/llm/conversation_history.py +110 -29
- zrb/task/llm/conversation_history_model.py +4 -179
- zrb/task/llm/default_workflow/coding/workflow.md +41 -0
- zrb/task/llm/default_workflow/copywriting/workflow.md +68 -0
- zrb/task/llm/default_workflow/git/workflow.md +118 -0
- zrb/task/llm/default_workflow/golang/workflow.md +128 -0
- zrb/task/llm/default_workflow/html-css/workflow.md +135 -0
- zrb/task/llm/default_workflow/java/workflow.md +146 -0
- zrb/task/llm/default_workflow/javascript/workflow.md +158 -0
- zrb/task/llm/default_workflow/python/workflow.md +160 -0
- zrb/task/llm/default_workflow/researching/workflow.md +153 -0
- zrb/task/llm/default_workflow/rust/workflow.md +162 -0
- zrb/task/llm/default_workflow/shell/workflow.md +299 -0
- zrb/task/llm/file_replacement.py +206 -0
- zrb/task/llm/file_tool_model.py +57 -0
- zrb/task/llm/history_processor.py +206 -0
- zrb/task/llm/history_summarization.py +2 -192
- zrb/task/llm/print_node.py +192 -64
- zrb/task/llm/prompt.py +198 -153
- zrb/task/llm/subagent_conversation_history.py +41 -0
- zrb/task/llm/tool_confirmation_completer.py +41 -0
- zrb/task/llm/tool_wrapper.py +216 -55
- zrb/task/llm/workflow.py +76 -0
- zrb/task/llm_task.py +122 -70
- zrb/task/make_task.py +2 -3
- zrb/task/rsync_task.py +25 -10
- zrb/task/scheduler.py +4 -4
- zrb/util/attr.py +54 -39
- zrb/util/cli/markdown.py +12 -0
- zrb/util/cli/text.py +30 -0
- zrb/util/file.py +27 -11
- zrb/util/git.py +2 -2
- zrb/util/{llm/prompt.py → markdown.py} +2 -3
- zrb/util/string/conversion.py +1 -1
- zrb/util/truncate.py +23 -0
- zrb/util/yaml.py +204 -0
- zrb/xcom/xcom.py +10 -0
- {zrb-1.13.1.dist-info → zrb-1.21.33.dist-info}/METADATA +40 -20
- {zrb-1.13.1.dist-info → zrb-1.21.33.dist-info}/RECORD +114 -83
- {zrb-1.13.1.dist-info → zrb-1.21.33.dist-info}/WHEEL +1 -1
- zrb/task/llm/default_workflow/coding.md +0 -24
- zrb/task/llm/default_workflow/copywriting.md +0 -17
- zrb/task/llm/default_workflow/researching.md +0 -18
- {zrb-1.13.1.dist-info → zrb-1.21.33.dist-info}/entry_points.txt +0 -0
zrb/builtin/llm/tool/web.py
CHANGED
|
@@ -1,179 +1,116 @@
|
|
|
1
|
-
import json
|
|
2
1
|
from collections.abc import Callable
|
|
2
|
+
from typing import Any
|
|
3
|
+
from urllib.parse import urljoin
|
|
3
4
|
|
|
5
|
+
from zrb.builtin.llm.tool.search import brave, searxng, serpapi
|
|
6
|
+
from zrb.config.config import CFG
|
|
7
|
+
from zrb.config.llm_config import llm_config
|
|
4
8
|
|
|
5
|
-
|
|
6
|
-
"""
|
|
7
|
-
Fetches and parses the textual content of a given web page URL.
|
|
8
|
-
|
|
9
|
-
Use this tool to "read" a web page. It strips away HTML tags, scripts, and other non-textual elements to provide the clean text content. It also extracts any hyperlinks found on the page. This is useful when you need to understand the content of a specific URL that you have discovered through a search or from another source.
|
|
9
|
+
_DEFAULT_USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" # noqa
|
|
10
10
|
|
|
11
|
-
Args:
|
|
12
|
-
url (str): The full URL of the web page to open (e.g., "https://example.com/article").
|
|
13
11
|
|
|
14
|
-
|
|
15
|
-
str: A JSON object containing the cleaned text `content` of the page and a list of `links_on_page`.
|
|
12
|
+
async def open_web_page(url: str) -> dict[str, Any]:
|
|
16
13
|
"""
|
|
14
|
+
Fetches, parses, and converts a web page to readable Markdown.
|
|
15
|
+
Preserves semantic structure, removes non-essentials, and extracts all absolute links.
|
|
17
16
|
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
async with async_playwright() as p:
|
|
24
|
-
browser = await p.chromium.launch(headless=True)
|
|
25
|
-
page = await browser.new_page()
|
|
26
|
-
await page.set_extra_http_headers({"User-Agent": user_agent})
|
|
27
|
-
try:
|
|
28
|
-
# Navigate to the URL with a timeout of 30 seconds
|
|
29
|
-
await page.goto(page_url, wait_until="networkidle", timeout=30000)
|
|
30
|
-
# Wait for the content to load
|
|
31
|
-
await page.wait_for_load_state("domcontentloaded")
|
|
32
|
-
# Get the page content
|
|
33
|
-
content = await page.content()
|
|
34
|
-
# Extract all links from the page
|
|
35
|
-
links = await page.eval_on_selector_all(
|
|
36
|
-
"a[href]",
|
|
37
|
-
"""
|
|
38
|
-
(elements) => elements.map(el => {
|
|
39
|
-
const href = el.getAttribute('href');
|
|
40
|
-
if (href && !href.startsWith('#') && !href.startsWith('/')) {
|
|
41
|
-
return href;
|
|
42
|
-
}
|
|
43
|
-
return null;
|
|
44
|
-
}).filter(href => href !== null)
|
|
45
|
-
""",
|
|
46
|
-
)
|
|
47
|
-
return {"content": content, "links_on_page": links}
|
|
48
|
-
finally:
|
|
49
|
-
await browser.close()
|
|
50
|
-
except BaseException:
|
|
51
|
-
import requests
|
|
52
|
-
|
|
53
|
-
response = requests.get(url, headers={"User-Agent": user_agent})
|
|
54
|
-
if response.status_code != 200:
|
|
55
|
-
msg = f"Unable to retrieve search results. Status code: {response.status_code}"
|
|
56
|
-
raise Exception(msg)
|
|
57
|
-
return {"content": response.text, "links_on_page": []}
|
|
58
|
-
|
|
59
|
-
result = await get_page_content(url)
|
|
60
|
-
# Parse the HTML content
|
|
61
|
-
return json.dumps(parse_html_text(result["content"]))
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
def create_search_internet_tool(serp_api_key: str) -> Callable[[str, int], str]:
|
|
65
|
-
"""
|
|
66
|
-
Creates a tool that searches the internet using the SerpAPI Google Search API.
|
|
17
|
+
**EFFICIENCY TIP:**
|
|
18
|
+
Use this tool to read the full content of a specific search result or article.
|
|
19
|
+
It returns clean Markdown and a list of links, which is perfect for deep-diving
|
|
20
|
+
into a topic without navigating a browser UI.
|
|
67
21
|
|
|
68
|
-
|
|
22
|
+
Example:
|
|
23
|
+
open_web_page(url='https://www.example.com/article')
|
|
69
24
|
|
|
70
25
|
Args:
|
|
71
|
-
|
|
26
|
+
url (str): The full URL of the web page.
|
|
72
27
|
|
|
73
28
|
Returns:
|
|
74
|
-
|
|
29
|
+
dict: Markdown content and a list of absolute links.
|
|
75
30
|
"""
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
31
|
+
html_content, links = await _fetch_page_content(url)
|
|
32
|
+
markdown_content = _convert_html_to_markdown(html_content)
|
|
33
|
+
return {"content": markdown_content, "links_on_page": links}
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def create_search_internet_tool() -> Callable:
|
|
37
|
+
if llm_config.default_search_internet_tool is not None:
|
|
38
|
+
return llm_config.default_search_internet_tool
|
|
39
|
+
if (
|
|
40
|
+
CFG.SEARCH_INTERNET_METHOD.strip().lower() == "serpapi"
|
|
41
|
+
and CFG.SERPAPI_KEY != ""
|
|
42
|
+
):
|
|
43
|
+
return serpapi.search_internet
|
|
44
|
+
if (
|
|
45
|
+
CFG.SEARCH_INTERNET_METHOD.strip().lower() == "brave"
|
|
46
|
+
and CFG.BRAVE_API_KEY != ""
|
|
47
|
+
):
|
|
48
|
+
return brave.search_internet
|
|
49
|
+
return searxng.search_internet
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
async def _fetch_page_content(url: str) -> tuple[str, list[str]]:
|
|
53
|
+
"""Fetches the HTML content and all absolute links from a URL."""
|
|
54
|
+
try:
|
|
55
|
+
from playwright.async_api import async_playwright
|
|
56
|
+
|
|
57
|
+
async with async_playwright() as p:
|
|
58
|
+
browser = await p.chromium.launch(headless=True)
|
|
59
|
+
page = await browser.new_page()
|
|
60
|
+
await page.set_extra_http_headers({"User-Agent": _DEFAULT_USER_AGENT})
|
|
61
|
+
try:
|
|
62
|
+
await page.goto(url, wait_until="networkidle", timeout=30000)
|
|
63
|
+
await page.wait_for_load_state("domcontentloaded")
|
|
64
|
+
content = await page.content()
|
|
65
|
+
links = await page.eval_on_selector_all(
|
|
66
|
+
"a[href]",
|
|
67
|
+
"""
|
|
68
|
+
(elements, baseUrl) => elements.map(el => {
|
|
69
|
+
const href = el.getAttribute('href');
|
|
70
|
+
if (!href || href.startsWith('#')) return null;
|
|
71
|
+
try {
|
|
72
|
+
return new URL(href, baseUrl).href;
|
|
73
|
+
} catch (e) {
|
|
74
|
+
return null;
|
|
75
|
+
}
|
|
76
|
+
}).filter(href => href !== null)
|
|
77
|
+
""",
|
|
78
|
+
url,
|
|
79
|
+
)
|
|
80
|
+
return content, links
|
|
81
|
+
# return json.dumps({"content": content, "links": links})
|
|
82
|
+
finally:
|
|
83
|
+
await browser.close()
|
|
84
|
+
except Exception:
|
|
90
85
|
import requests
|
|
86
|
+
from bs4 import BeautifulSoup
|
|
91
87
|
|
|
92
|
-
response = requests.get(
|
|
93
|
-
"https://serpapi.com/search",
|
|
94
|
-
headers={
|
|
95
|
-
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" # noqa
|
|
96
|
-
},
|
|
97
|
-
params={
|
|
98
|
-
"q": query,
|
|
99
|
-
"num": num_results,
|
|
100
|
-
"hl": "en",
|
|
101
|
-
"safe": "off",
|
|
102
|
-
"api_key": serp_api_key,
|
|
103
|
-
},
|
|
104
|
-
)
|
|
88
|
+
response = requests.get(url, headers={"User-Agent": _DEFAULT_USER_AGENT})
|
|
105
89
|
if response.status_code != 200:
|
|
106
90
|
raise Exception(
|
|
107
|
-
f"
|
|
91
|
+
f"Unable to retrieve page content. Status code: {response.status_code}"
|
|
108
92
|
)
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
Returns:
|
|
124
|
-
str: The raw JSON response from the Wikipedia API, containing a list of search results.
|
|
125
|
-
"""
|
|
126
|
-
import requests
|
|
127
|
-
|
|
128
|
-
params = {"action": "query", "list": "search", "srsearch": query, "format": "json"}
|
|
129
|
-
response = requests.get("https://en.wikipedia.org/w/api.php", params=params)
|
|
130
|
-
return response.json()
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
def search_arxiv(query: str, num_results: int = 10) -> str:
|
|
134
|
-
"""
|
|
135
|
-
Searches for academic papers and preprints on ArXiv.
|
|
136
|
-
|
|
137
|
-
Use this tool when the user's query is scientific or technical in nature and they are likely looking for research papers, articles, or academic publications.
|
|
138
|
-
|
|
139
|
-
Args:
|
|
140
|
-
query (str): The search query, which can include keywords, author names, or titles.
|
|
141
|
-
num_results (int, optional): The maximum number of results to return. Defaults to 10.
|
|
142
|
-
|
|
143
|
-
Returns:
|
|
144
|
-
str: The raw XML response from the ArXiv API, containing a list of matching papers.
|
|
145
|
-
"""
|
|
146
|
-
import requests
|
|
147
|
-
|
|
148
|
-
params = {"search_query": f"all:{query}", "start": 0, "max_results": num_results}
|
|
149
|
-
response = requests.get("http://export.arxiv.org/api/query", params=params)
|
|
150
|
-
return response.content
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
def parse_html_text(html_text: str) -> dict[str, str]:
|
|
93
|
+
content = response.text
|
|
94
|
+
soup = BeautifulSoup(content, "html.parser")
|
|
95
|
+
links = [
|
|
96
|
+
urljoin(url, a["href"])
|
|
97
|
+
for a in soup.find_all("a", href=True)
|
|
98
|
+
if not a["href"].startswith("#")
|
|
99
|
+
]
|
|
100
|
+
return content, links
|
|
101
|
+
# return json.dumps({"content": content, "links": links})
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def _convert_html_to_markdown(html_text: str) -> str:
|
|
105
|
+
"""Converts HTML content to a clean Markdown representation."""
|
|
154
106
|
from bs4 import BeautifulSoup
|
|
107
|
+
from markdownify import markdownify as md
|
|
155
108
|
|
|
156
|
-
ignored_tags = [
|
|
157
|
-
"script",
|
|
158
|
-
"link",
|
|
159
|
-
"meta",
|
|
160
|
-
"style",
|
|
161
|
-
"code",
|
|
162
|
-
"footer",
|
|
163
|
-
"nav",
|
|
164
|
-
"header",
|
|
165
|
-
"aside",
|
|
166
|
-
]
|
|
167
109
|
soup = BeautifulSoup(html_text, "html.parser")
|
|
168
|
-
|
|
169
|
-
for
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
link: str = anchor["href"]
|
|
173
|
-
if link.startswith("#") or link.startswith("/"):
|
|
174
|
-
continue
|
|
175
|
-
links.append(link)
|
|
176
|
-
for tag in soup(ignored_tags):
|
|
110
|
+
# Remove non-content tags
|
|
111
|
+
for tag in soup(
|
|
112
|
+
["script", "link", "meta", "style", "header", "footer", "nav", "aside"]
|
|
113
|
+
):
|
|
177
114
|
tag.decompose()
|
|
178
|
-
|
|
179
|
-
return
|
|
115
|
+
# Convert the cleaned HTML to Markdown
|
|
116
|
+
return md(str(soup))
|
|
@@ -204,7 +204,7 @@ def update_migration_metadata_file(ctx: AnyContext, migration_metadata_file_path
|
|
|
204
204
|
app_name = os.path.basename(APP_DIR)
|
|
205
205
|
existing_migration_metadata_code = read_file(migration_metadata_file_path)
|
|
206
206
|
write_file(
|
|
207
|
-
|
|
207
|
+
abs_file_path=migration_metadata_file_path,
|
|
208
208
|
content=[
|
|
209
209
|
_get_migration_import_schema_code(
|
|
210
210
|
existing_migration_metadata_code, app_name, ctx.input.entity
|
|
@@ -251,7 +251,7 @@ def update_client_file(ctx: AnyContext, client_file_path: str):
|
|
|
251
251
|
snake_plural_entity_name = to_snake_case(ctx.input.plural)
|
|
252
252
|
pascal_entity_name = to_pascal_case(ctx.input.entity)
|
|
253
253
|
write_file(
|
|
254
|
-
|
|
254
|
+
abs_file_path=client_file_path,
|
|
255
255
|
content=[
|
|
256
256
|
_get_import_schema_for_client_code(
|
|
257
257
|
existing_code=existing_client_code, entity_name=ctx.input.entity
|
|
@@ -305,7 +305,7 @@ def update_api_client_file(ctx: AnyContext, api_client_file_path: str):
|
|
|
305
305
|
snake_module_name = to_snake_case(ctx.input.module)
|
|
306
306
|
pascal_module_name = to_pascal_case(ctx.input.module)
|
|
307
307
|
write_file(
|
|
308
|
-
|
|
308
|
+
abs_file_path=api_client_file_path,
|
|
309
309
|
content=[
|
|
310
310
|
f"from {app_name}.module.{snake_module_name}.service.{snake_entity_name}.{snake_entity_name}_service_factory import {snake_entity_name}_service", # noqa
|
|
311
311
|
prepend_code_to_module(
|
|
@@ -327,7 +327,7 @@ def update_direct_client_file(ctx: AnyContext, direct_client_file_path: str):
|
|
|
327
327
|
snake_module_name = to_snake_case(ctx.input.module)
|
|
328
328
|
pascal_module_name = to_pascal_case(ctx.input.module)
|
|
329
329
|
write_file(
|
|
330
|
-
|
|
330
|
+
abs_file_path=direct_client_file_path,
|
|
331
331
|
content=[
|
|
332
332
|
f"from {app_name}.module.{snake_module_name}.service.{snake_entity_name}.{snake_entity_name}_service_factory import {snake_entity_name}_service", # noqa
|
|
333
333
|
prepend_code_to_module(
|
|
@@ -348,7 +348,7 @@ def update_route_file(ctx: AnyContext, route_file_path: str):
|
|
|
348
348
|
app_name = os.path.basename(APP_DIR)
|
|
349
349
|
module_name = to_snake_case(ctx.input.module)
|
|
350
350
|
write_file(
|
|
351
|
-
|
|
351
|
+
abs_file_path=route_file_path,
|
|
352
352
|
content=[
|
|
353
353
|
f"from {app_name}.module.{module_name}.service.{entity_name}.{entity_name}_service_factory import {entity_name}_service", # noqa
|
|
354
354
|
append_code_to_function(
|
|
@@ -370,7 +370,7 @@ def update_gateway_subroute_file(ctx: AnyContext, module_gateway_subroute_path:
|
|
|
370
370
|
pascal_entity_name = to_pascal_case(ctx.input.entity)
|
|
371
371
|
existing_gateway_subroute_code = read_file(module_gateway_subroute_path)
|
|
372
372
|
write_file(
|
|
373
|
-
|
|
373
|
+
abs_file_path=module_gateway_subroute_path,
|
|
374
374
|
content=[
|
|
375
375
|
_get_import_client_for_gateway_subroute_code(
|
|
376
376
|
existing_gateway_subroute_code, module_name=ctx.input.module
|
|
@@ -456,7 +456,7 @@ def update_gateway_navigation_config_file(
|
|
|
456
456
|
},
|
|
457
457
|
).strip()
|
|
458
458
|
write_file(
|
|
459
|
-
|
|
459
|
+
abs_file_path=gateway_navigation_config_file_path,
|
|
460
460
|
content=[
|
|
461
461
|
existing_gateway_navigation_config_code,
|
|
462
462
|
new_navigation_config_code,
|
|
@@ -73,7 +73,7 @@ def update_app_zrb_config_file(ctx: AnyContext, zrb_config_file_path: str):
|
|
|
73
73
|
]
|
|
74
74
|
)
|
|
75
75
|
write_file(
|
|
76
|
-
|
|
76
|
+
abs_file_path=zrb_config_file_path,
|
|
77
77
|
content=append_key_to_dict(
|
|
78
78
|
original_code=existing_zrb_config_code,
|
|
79
79
|
dictionary_name="MICROSERVICES_ENV_VARS",
|
|
@@ -86,7 +86,7 @@ def update_app_zrb_config_file(ctx: AnyContext, zrb_config_file_path: str):
|
|
|
86
86
|
def update_app_zrb_task_file(ctx: AnyContext, zrb_task_file_path: str):
|
|
87
87
|
existing_zrb_task_code = read_file(zrb_task_file_path)
|
|
88
88
|
write_file(
|
|
89
|
-
|
|
89
|
+
abs_file_path=zrb_task_file_path,
|
|
90
90
|
content=[
|
|
91
91
|
existing_zrb_task_code.strip(),
|
|
92
92
|
"",
|
|
@@ -126,7 +126,7 @@ def _get_task_definition_code(existing_code: str, module_name: str) -> str | Non
|
|
|
126
126
|
def update_app_main_file(ctx: AnyContext, app_main_file_path: str):
|
|
127
127
|
existing_app_main_code = read_file(app_main_file_path)
|
|
128
128
|
write_file(
|
|
129
|
-
|
|
129
|
+
abs_file_path=app_main_file_path,
|
|
130
130
|
content=[
|
|
131
131
|
_get_import_module_route_code(existing_app_main_code, ctx.input.module),
|
|
132
132
|
existing_app_main_code,
|
|
@@ -187,7 +187,7 @@ def update_gateway_route_file(ctx: AnyContext, gateway_route_file_path: str):
|
|
|
187
187
|
existing_gateway_route_code = read_file(gateway_route_file_path)
|
|
188
188
|
snake_module_name = to_snake_case(ctx.input.module)
|
|
189
189
|
write_file(
|
|
190
|
-
|
|
190
|
+
abs_file_path=gateway_route_file_path,
|
|
191
191
|
content=[
|
|
192
192
|
_get_module_subroute_import(existing_gateway_route_code, ctx.input.module),
|
|
193
193
|
append_code_to_function(
|
|
@@ -233,7 +233,7 @@ def update_gateway_navigation_config_file(
|
|
|
233
233
|
},
|
|
234
234
|
).strip()
|
|
235
235
|
write_file(
|
|
236
|
-
|
|
236
|
+
abs_file_path=gateway_navigation_config_file_path,
|
|
237
237
|
content=[
|
|
238
238
|
existing_gateway_navigation_config_code,
|
|
239
239
|
new_navigation_config_code,
|
|
@@ -22,7 +22,7 @@ def is_project_zrb_init_file(ctx: AnyContext, file_path: str) -> bool:
|
|
|
22
22
|
def update_project_zrb_init_file(ctx: AnyContext, zrb_init_path: str):
|
|
23
23
|
existing_zrb_init_code = read_file(zrb_init_path)
|
|
24
24
|
write_file(
|
|
25
|
-
|
|
25
|
+
abs_file_path=zrb_init_path,
|
|
26
26
|
content=[
|
|
27
27
|
_get_import_load_file_code(existing_zrb_init_code),
|
|
28
28
|
existing_zrb_init_code.strip(),
|