zrb 1.2.1__py3-none-any.whl → 1.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. zrb/builtin/llm/llm_chat.py +68 -9
  2. zrb/builtin/llm/tool/api.py +4 -2
  3. zrb/builtin/llm/tool/file.py +39 -0
  4. zrb/builtin/llm/tool/rag.py +37 -22
  5. zrb/builtin/llm/tool/web.py +46 -20
  6. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/column/add_column_util.py +28 -6
  7. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/entity/template/app_template/module/gateway/view/content/my-module/my-entity.html +206 -178
  8. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/entity/template/app_template/schema/my_entity.py +3 -1
  9. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/auth/service/role/repository/role_db_repository.py +18 -1
  10. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/auth/service/role/repository/role_repository.py +4 -0
  11. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/auth/service/role/role_service.py +20 -11
  12. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/auth/service/user/repository/user_db_repository.py +17 -2
  13. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/auth/service/user/repository/user_repository.py +4 -0
  14. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/auth/service/user/user_service.py +19 -11
  15. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/gateway/view/content/auth/permission.html +209 -180
  16. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/gateway/view/content/auth/role.html +362 -0
  17. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/gateway/view/content/auth/user.html +377 -0
  18. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/gateway/view/static/common/util.js +68 -13
  19. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/gateway/view/static/crud/util.js +50 -29
  20. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/schema/permission.py +3 -1
  21. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/schema/role.py +6 -5
  22. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/schema/user.py +9 -3
  23. zrb/config.py +3 -1
  24. zrb/content_transformer/content_transformer.py +7 -1
  25. zrb/context/context.py +8 -2
  26. zrb/input/any_input.py +5 -0
  27. zrb/input/base_input.py +6 -0
  28. zrb/input/bool_input.py +2 -0
  29. zrb/input/float_input.py +2 -0
  30. zrb/input/int_input.py +2 -0
  31. zrb/input/option_input.py +2 -0
  32. zrb/input/password_input.py +2 -0
  33. zrb/input/text_input.py +11 -5
  34. zrb/runner/cli.py +1 -1
  35. zrb/runner/common_util.py +3 -3
  36. zrb/runner/web_route/task_input_api_route.py +1 -1
  37. zrb/task/llm_task.py +103 -16
  38. {zrb-1.2.1.dist-info → zrb-1.3.0.dist-info}/METADATA +85 -18
  39. {zrb-1.2.1.dist-info → zrb-1.3.0.dist-info}/RECORD +41 -40
  40. {zrb-1.2.1.dist-info → zrb-1.3.0.dist-info}/WHEEL +0 -0
  41. {zrb-1.2.1.dist-info → zrb-1.3.0.dist-info}/entry_points.txt +0 -0
@@ -2,22 +2,40 @@ import json
2
2
  import os
3
3
  from typing import Any
4
4
 
5
+ from pydantic_ai.models import Model
6
+
5
7
  from zrb.builtin.group import llm_group
6
8
  from zrb.builtin.llm.tool.api import get_current_location, get_current_weather
7
9
  from zrb.builtin.llm.tool.cli import run_shell_command
8
- from zrb.builtin.llm.tool.web import open_web_route, query_internet
10
+ from zrb.builtin.llm.tool.file import (
11
+ list_file,
12
+ read_source_code,
13
+ read_text_file,
14
+ write_text_file,
15
+ )
16
+ from zrb.builtin.llm.tool.web import (
17
+ create_search_internet_tool,
18
+ open_web_page,
19
+ search_arxiv,
20
+ search_wikipedia,
21
+ )
9
22
  from zrb.config import (
10
23
  LLM_ALLOW_ACCESS_INTERNET,
24
+ LLM_ALLOW_ACCESS_LOCAL_FILE,
11
25
  LLM_ALLOW_ACCESS_SHELL,
12
26
  LLM_HISTORY_DIR,
13
27
  LLM_MODEL,
14
28
  LLM_SYSTEM_PROMPT,
29
+ SERP_API_KEY,
15
30
  )
31
+ from zrb.context.any_context import AnyContext
16
32
  from zrb.context.any_shared_context import AnySharedContext
33
+ from zrb.input.any_input import AnyInput
17
34
  from zrb.input.bool_input import BoolInput
18
35
  from zrb.input.str_input import StrInput
19
36
  from zrb.input.text_input import TextInput
20
37
  from zrb.task.llm_task import LLMTask
38
+ from zrb.util.attr import get_attr
21
39
  from zrb.util.file import read_file, write_file
22
40
  from zrb.util.string.conversion import to_pascal_case
23
41
 
@@ -72,23 +90,53 @@ def _write_chat_conversation(
72
90
  write_file(last_session_file_path, current_session_name)
73
91
 
74
92
 
75
- llm_chat: LLMTask = llm_group.add_task(
76
- LLMTask(
77
- name="llm-chat",
78
- input=[
93
+ class _LLMChat(LLMTask):
94
+
95
+ _default_model: Model | str | None = None
96
+
97
+ def set_default_model(self, model: Model | str):
98
+ self._default_model = model
99
+
100
+ @property
101
+ def inputs(self) -> list[AnyInput]:
102
+ task_inputs = super().inputs
103
+ model_input_default = LLM_MODEL if self._default_model is None else "default"
104
+ return [
79
105
  StrInput(
80
106
  "model",
81
107
  description="LLM Model",
82
108
  prompt="LLM Model",
83
- default=LLM_MODEL,
109
+ default=model_input_default,
84
110
  allow_positional_parsing=False,
111
+ always_prompt=False,
85
112
  ),
113
+ *task_inputs,
114
+ ]
115
+
116
+ def _get_model(self, ctx: AnyContext) -> str | Model | None:
117
+ if ctx.input.model == "default":
118
+ if self._default_model is not None:
119
+ return self._default_model
120
+ return super()._get_model(ctx)
121
+ model = get_attr(
122
+ ctx, ctx.input.model, "ollama_chat/llama3.1", auto_render=self._render_model
123
+ )
124
+ if isinstance(model, (Model, str)) or model is None:
125
+ return model
126
+ raise ValueError("Invalid model")
127
+
128
+
129
+ llm_chat: LLMTask = llm_group.add_task(
130
+ _LLMChat(
131
+ name="llm-chat",
132
+ input=[
86
133
  TextInput(
87
134
  "system-prompt",
88
135
  description="System prompt",
89
136
  prompt="System prompt",
90
137
  default=LLM_SYSTEM_PROMPT,
91
138
  allow_positional_parsing=False,
139
+ always_prompt=False,
92
140
  ),
93
141
  BoolInput(
94
142
  "start-new",
@@ -96,6 +144,7 @@ llm_chat: LLMTask = llm_group.add_task(
96
144
  prompt="Start new conversation (LLM will forget everything)",
97
145
  default=False,
98
146
  allow_positional_parsing=False,
147
+ always_prompt=False,
99
148
  ),
100
149
  TextInput("message", description="User message", prompt="Your message"),
101
150
  PreviousSessionInput(
@@ -104,12 +153,12 @@ llm_chat: LLMTask = llm_group.add_task(
104
153
  prompt="Previous conversation session (can be empty)",
105
154
  allow_positional_parsing=False,
106
155
  allow_empty=True,
156
+ always_prompt=False,
107
157
  ),
108
158
  ],
109
159
  conversation_history_reader=_read_chat_conversation,
110
160
  conversation_history_writer=_write_chat_conversation,
111
161
  description="Chat with LLM",
112
- model="{ctx.input.model}",
113
162
  system_prompt="{ctx.input['system-prompt']}",
114
163
  message="{ctx.input.message}",
115
164
  retries=0,
@@ -117,11 +166,21 @@ llm_chat: LLMTask = llm_group.add_task(
117
166
  alias="chat",
118
167
  )
119
168
 
169
+
170
+ if LLM_ALLOW_ACCESS_LOCAL_FILE:
171
+ llm_chat.add_tool(read_source_code)
172
+ llm_chat.add_tool(list_file)
173
+ llm_chat.add_tool(read_text_file)
174
+ llm_chat.add_tool(write_text_file)
175
+
120
176
  if LLM_ALLOW_ACCESS_SHELL:
121
177
  llm_chat.add_tool(run_shell_command)
122
178
 
123
179
  if LLM_ALLOW_ACCESS_INTERNET:
124
- llm_chat.add_tool(open_web_route)
125
- llm_chat.add_tool(query_internet)
180
+ llm_chat.add_tool(open_web_page)
181
+ llm_chat.add_tool(search_wikipedia)
182
+ llm_chat.add_tool(search_arxiv)
183
+ if SERP_API_KEY != "":
184
+ llm_chat.add_tool(create_search_internet_tool(SERP_API_KEY))
126
185
  llm_chat.add_tool(get_current_location)
127
186
  llm_chat.add_tool(get_current_weather)
@@ -1,13 +1,13 @@
1
1
  import json
2
2
  from typing import Annotated, Literal
3
3
 
4
- import requests
5
-
6
4
 
7
5
  def get_current_location() -> (
8
6
  Annotated[str, "JSON string representing latitude and longitude"]
9
7
  ): # noqa
10
8
  """Get the user's current location."""
9
+ import requests
10
+
11
11
  return json.dumps(requests.get("http://ip-api.com/json?fields=lat,lon").json())
12
12
 
13
13
 
@@ -17,6 +17,8 @@ def get_current_weather(
17
17
  temperature_unit: Literal["celsius", "fahrenheit"],
18
18
  ) -> str:
19
19
  """Get the current weather in a given location."""
20
+ import requests
21
+
20
22
  resp = requests.get(
21
23
  "https://api.open-meteo.com/v1/forecast",
22
24
  params={
@@ -0,0 +1,39 @@
1
+ import os
2
+
3
+ from zrb.util.file import read_file, write_file
4
+
5
+
6
+ def list_file(
7
+ directory: str = ".",
8
+ extensions: list[str] = [".py", ".go", ".js", ".ts", ".java", ".c", ".cpp"],
9
+ ) -> list[str]:
10
+ """List all files in a directory"""
11
+ all_files: list[str] = []
12
+ for root, _, files in os.walk(directory):
13
+ for filename in files:
14
+ for extension in extensions:
15
+ if filename.lower().endswith(extension):
16
+ all_files.append(os.path.join(root, filename))
17
+ return all_files
18
+
19
+
20
+ def read_text_file(file: str) -> str:
21
+ """Read a text file"""
22
+ return read_file(os.path.abspath(file))
23
+
24
+
25
+ def write_text_file(file: str, content: str):
26
+ """Write a text file"""
27
+ return write_file(os.path.abspath(file), content)
28
+
29
+
30
+ def read_source_code(
31
+ directory: str = ".",
32
+ extensions: list[str] = [".py", ".go", ".js", ".ts", ".java", ".c", ".cpp"],
33
+ ) -> list[str]:
34
+ """Read source code in a directory"""
35
+ files = list_file(directory, extensions)
36
+ for index, file in enumerate(files):
37
+ content = read_text_file(file)
38
+ files[index] = f"# {file}\n```\n{content}\n```"
39
+ return files
@@ -1,7 +1,9 @@
1
+ import fnmatch
1
2
  import hashlib
2
3
  import json
3
4
  import os
4
5
  import sys
6
+ from collections.abc import Callable
5
7
 
6
8
  import ulid
7
9
 
@@ -15,6 +17,20 @@ from zrb.util.cli.style import stylize_error, stylize_faint
15
17
  from zrb.util.file import read_file
16
18
 
17
19
 
20
+ class RAGFileReader:
21
+ def __init__(self, glob_pattern: str, read: Callable[[str], str]):
22
+ self.glob_pattern = glob_pattern
23
+ self.read = read
24
+
25
+ def is_match(self, file_name: str):
26
+ if os.sep not in self.glob_pattern and (
27
+ os.altsep is None or os.altsep not in self.glob_pattern
28
+ ):
29
+ # Pattern like "*.txt" – match only the basename.
30
+ return fnmatch.fnmatch(os.path.basename(file_name), self.glob_pattern)
31
+ return fnmatch.fnmatch(file_name, self.glob_pattern)
32
+
33
+
18
34
  def create_rag_from_directory(
19
35
  tool_name: str,
20
36
  tool_description: str,
@@ -25,6 +41,7 @@ def create_rag_from_directory(
25
41
  chunk_size: int = RAG_CHUNK_SIZE,
26
42
  overlap: int = RAG_OVERLAP,
27
43
  max_result_count: int = RAG_MAX_RESULT_COUNT,
44
+ file_reader: list[RAGFileReader] = [],
28
45
  ):
29
46
  async def retrieve(query: str) -> str:
30
47
  from chromadb import PersistentClient
@@ -36,35 +53,31 @@ def create_rag_from_directory(
36
53
  path=vector_db_path, settings=Settings(allow_reset=True)
37
54
  )
38
55
  collection = client.get_or_create_collection(vector_db_collection)
39
-
40
56
  # Track file changes using a hash-based approach
41
57
  hash_file_path = os.path.join(vector_db_path, "file_hashes.json")
42
58
  previous_hashes = _load_hashes(hash_file_path)
43
59
  current_hashes = {}
44
-
60
+ # Get updated_files
45
61
  updated_files = []
46
-
47
62
  for root, _, files in os.walk(document_dir_path):
48
63
  for file in files:
49
64
  file_path = os.path.join(root, file)
50
65
  file_hash = _compute_file_hash(file_path)
51
66
  relative_path = os.path.relpath(file_path, document_dir_path)
52
67
  current_hashes[relative_path] = file_hash
53
-
54
68
  if previous_hashes.get(relative_path) != file_hash:
55
69
  updated_files.append(file_path)
56
-
70
+ # Upsert updated_files to vector db
57
71
  if updated_files:
58
72
  print(
59
73
  stylize_faint(f"Updating {len(updated_files)} changed files"),
60
74
  file=sys.stderr,
61
75
  )
62
-
63
76
  for file_path in updated_files:
64
77
  try:
65
78
  relative_path = os.path.relpath(file_path, document_dir_path)
66
79
  collection.delete(where={"file_path": relative_path})
67
- content = _read_file_content(file_path)
80
+ content = _read_txt_content(file_path, file_reader)
68
81
  file_id = ulid.new().str
69
82
  for i in range(0, len(content), chunk_size - overlap):
70
83
  chunk = content[i : i + chunk_size]
@@ -92,14 +105,13 @@ def create_rag_from_directory(
92
105
  stylize_error(f"Error processing {file_path}: {e}"),
93
106
  file=sys.stderr,
94
107
  )
95
-
96
108
  _save_hashes(hash_file_path, current_hashes)
97
109
  else:
98
110
  print(
99
111
  stylize_faint("No changes detected. Skipping database update."),
100
112
  file=sys.stderr,
101
113
  )
102
-
114
+ # Vectorize query and get related document chunks
103
115
  print(stylize_faint("Vectorizing query"), file=sys.stderr)
104
116
  embedding_result = list(embedding_model.embed([query]))
105
117
  query_vector = embedding_result[0]
@@ -123,7 +135,22 @@ def _compute_file_hash(file_path: str) -> str:
123
135
  return hash_md5.hexdigest()
124
136
 
125
137
 
126
- def _read_file_content(file_path: str) -> str:
138
+ def _load_hashes(file_path: str) -> dict:
139
+ if os.path.exists(file_path):
140
+ with open(file_path, "r") as f:
141
+ return json.load(f)
142
+ return {}
143
+
144
+
145
+ def _save_hashes(file_path: str, hashes: dict):
146
+ with open(file_path, "w") as f:
147
+ json.dump(hashes, f)
148
+
149
+
150
+ def _read_txt_content(file_path: str, file_reader: list[RAGFileReader]):
151
+ for reader in file_reader:
152
+ if reader.is_match(file_path):
153
+ return reader.read(file_path)
127
154
  if file_path.lower().endswith(".pdf"):
128
155
  return _read_pdf(file_path)
129
156
  return read_file(file_path)
@@ -136,15 +163,3 @@ def _read_pdf(file_path: str) -> str:
136
163
  return "\n".join(
137
164
  page.extract_text() for page in pdf.pages if page.extract_text()
138
165
  )
139
-
140
-
141
- def _load_hashes(file_path: str) -> dict:
142
- if os.path.exists(file_path):
143
- with open(file_path, "r") as f:
144
- return json.load(f)
145
- return {}
146
-
147
-
148
- def _save_hashes(file_path: str, hashes: dict):
149
- with open(file_path, "w") as f:
150
- json.dump(hashes, f)
@@ -1,8 +1,9 @@
1
1
  import json
2
+ from collections.abc import Callable
2
3
  from typing import Annotated
3
4
 
4
5
 
5
- def open_web_route(url: str) -> str:
6
+ def open_web_page(url: str) -> str:
6
7
  """Get content from a web page."""
7
8
  import requests
8
9
 
@@ -19,30 +20,55 @@ def open_web_route(url: str) -> str:
19
20
  return json.dumps(parse_html_text(response.text))
20
21
 
21
22
 
22
- def query_internet(
23
+ def create_search_internet_tool(serp_api_key: str) -> Callable[[str, int], str]:
24
+ def search_internet(
25
+ query: Annotated[str, "Search query"],
26
+ num_results: Annotated[int, "Search result count, by default 10"] = 10,
27
+ ) -> str:
28
+ """Search factual information from the internet by using Google."""
29
+ import requests
30
+
31
+ response = requests.get(
32
+ "https://serpapi.com/search",
33
+ headers={
34
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" # noqa
35
+ },
36
+ params={
37
+ "q": query,
38
+ "num": num_results,
39
+ "hl": "en",
40
+ "safe": "off",
41
+ "api_key": serp_api_key,
42
+ },
43
+ )
44
+ if response.status_code != 200:
45
+ raise Exception(
46
+ f"Error: Unable to retrieve search results (status code: {response.status_code})" # noqa
47
+ )
48
+ return json.dumps(parse_html_text(response.text))
49
+
50
+ return search_internet
51
+
52
+
53
+ def search_wikipedia(query: Annotated[str, "Search query"]) -> str:
54
+ """Search on wikipedia"""
55
+ import requests
56
+
57
+ params = {"action": "query", "list": "search", "srsearch": query, "format": "json"}
58
+ response = requests.get("https://en.wikipedia.org/w/api.php", params=params)
59
+ return response.json()
60
+
61
+
62
+ def search_arxiv(
23
63
  query: Annotated[str, "Search query"],
24
64
  num_results: Annotated[int, "Search result count, by default 10"] = 10,
25
65
  ) -> str:
26
- """Search factual information from the internet by using Google."""
66
+ """Search on Arxiv"""
27
67
  import requests
28
68
 
29
- response = requests.get(
30
- "https://google.com/search",
31
- headers={
32
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" # noqa
33
- },
34
- params={
35
- "q": query,
36
- "num": num_results,
37
- "hl": "en",
38
- "safe": "off",
39
- },
40
- )
41
- if response.status_code != 200:
42
- raise Exception(
43
- f"Error: Unable to retrieve search results (status code: {response.status_code})" # noqa
44
- )
45
- return json.dumps(parse_html_text(response.text))
69
+ params = {"search_query": f"all:{query}", "start": 0, "max_results": num_results}
70
+ response = requests.get("http://export.arxiv.org/api/query", params=params)
71
+ return response.content
46
72
 
47
73
 
48
74
  def parse_html_text(html_text: str) -> dict[str, str]:
@@ -2,7 +2,7 @@ import os
2
2
  import re
3
3
  import textwrap
4
4
 
5
- from bs4 import BeautifulSoup, formatter
5
+ from bs4 import BeautifulSoup, Tag, formatter
6
6
  from my_app_name._zrb.config import APP_DIR
7
7
 
8
8
  from zrb.context.any_context import AnyContext
@@ -65,6 +65,7 @@ def update_my_app_name_ui(ctx: AnyContext):
65
65
  kebab_entity_name = to_kebab_case(ctx.input.entity)
66
66
  snake_column_name = to_snake_case(ctx.input.column)
67
67
  human_column_name = to_human_case(ctx.input.column).title()
68
+ column_type = ctx.input.type
68
69
  subroute_file_path = os.path.join(
69
70
  APP_DIR,
70
71
  "module",
@@ -85,18 +86,21 @@ def update_my_app_name_ui(ctx: AnyContext):
85
86
  form_id="crud-create-form",
86
87
  column_label=human_column_name,
87
88
  column_name=snake_column_name,
89
+ column_type=column_type,
88
90
  )
89
91
  new_code = _add_input_to_form(
90
92
  new_code,
91
93
  form_id="crud-update-form",
92
94
  column_label=human_column_name,
93
95
  column_name=snake_column_name,
96
+ column_type=column_type,
94
97
  )
95
98
  new_code = _add_input_to_form(
96
99
  new_code,
97
100
  form_id="crud-delete-form",
98
101
  column_label=human_column_name,
99
102
  column_name=snake_column_name,
103
+ column_type=column_type,
100
104
  )
101
105
  # JS Function
102
106
  new_code = _alter_js_function_returned_array(
@@ -140,7 +144,7 @@ def _add_th_before_last(html_str, table_id, th_content):
140
144
 
141
145
 
142
146
  def _add_input_to_form(
143
- html_str: str, form_id: str, column_label: str, column_name: str
147
+ html_str: str, form_id: str, column_label: str, column_name: str, column_type: str
144
148
  ) -> str:
145
149
  soup = BeautifulSoup(html_str, "html.parser")
146
150
  # Find the form by id.
@@ -151,9 +155,7 @@ def _add_input_to_form(
151
155
  new_label = soup.new_tag("label")
152
156
  new_label.append(f"{column_label}: ")
153
157
  # Create a new input element with the provided column name.
154
- new_input = soup.new_tag(
155
- "input", attrs={"type": "text", "name": column_name, "required": "required"}
156
- )
158
+ new_input = _get_html_input(soup, column_name, column_type)
157
159
  new_label.append(new_input)
158
160
  # Look for a footer element inside the form.
159
161
  footer = form.find("footer")
@@ -168,6 +170,26 @@ def _add_input_to_form(
168
170
  )
169
171
 
170
172
 
173
+ def _get_html_input(soup: BeautifulSoup, column_name: str, column_type: str) -> Tag:
174
+ # Map your custom types to HTML input types.
175
+ type_mapping = {
176
+ "str": "text",
177
+ "int": "number",
178
+ "float": "number",
179
+ "bool": "checkbox",
180
+ "datetime": "datetime-local",
181
+ "date": "date",
182
+ }
183
+ # Get the corresponding HTML input type; default to "text" if not found.
184
+ html_input_type = type_mapping.get(column_type, "text")
185
+ # Create the new input tag with the appropriate attributes.
186
+ new_input = soup.new_tag(
187
+ "input",
188
+ attrs={"type": html_input_type, "name": column_name, "required": "required"},
189
+ )
190
+ return new_input
191
+
192
+
171
193
  def _infer_html_indent_width(html_str: str) -> int:
172
194
  """
173
195
  Infer the indentation width (number of spaces) from the HTML string.
@@ -208,7 +230,7 @@ def _alter_js_function_returned_array(
208
230
  # 3. Captures the newline and leading whitespace (indent) of the return statement.
209
231
  # 4. Captures the rest of the return line.
210
232
  pattern = (
211
- r"(function\s+"
233
+ r"("
212
234
  + re.escape(js_function_name)
213
235
  + r"\s*\([^)]*\)\s*\{)" # group1: function header
214
236
  r"([\s\S]*?)" # group2: code before return