biblemate 0.0.23__tar.gz → 0.0.25__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: biblemate
3
- Version: 0.0.23
3
+ Version: 0.0.25
4
4
  Summary: BibleMate AI - Automate Your Bible Study
5
5
  Home-page: https://toolmate.ai
6
6
  Author: Eliran Wong
@@ -0,0 +1,3 @@
1
+ agent_mode=True
2
+ prompt_engineering=True
3
+ max_steps=30
@@ -3,7 +3,7 @@ from biblemate.ui.prompts import getInput
3
3
  from biblemate.ui.info import get_banner
4
4
  from biblemate import config, AGENTMAKE_CONFIG
5
5
  from pathlib import Path
6
- import asyncio, re, os, subprocess
6
+ import asyncio, re, os, subprocess, click
7
7
  from alive_progress import alive_bar
8
8
  from fastmcp import Client
9
9
  from agentmake import agentmake, getOpenCommand, getDictionaryOutput, edit_configurations, writeTextFile, getCurrentDateTime, AGENTMAKE_USER_DIR, USER_OS, DEVELOPER_MODE
@@ -14,89 +14,119 @@ from rich.terminal_theme import MONOKAI
14
14
  if not USER_OS == "Windows":
15
15
  import readline # for better input experience
16
16
 
17
- # Client to interact with the built-in Bible Study MCP server
18
- client = Client(os.path.join(os.path.dirname(os.path.realpath(__file__)), "bible_study_mcp.py"))
19
-
20
- # TODO: place in config.py
21
- MAX_STEPS = 50
17
+ # The client that interacts with the Bible Study MCP server
18
+ builtin_mcp_server = os.path.join(os.path.dirname(os.path.realpath(__file__)), "bible_study_mcp.py")
19
+ user_mcp_server = os.path.join(AGENTMAKE_USER_DIR, "biblemate", "bible_study_mcp.py") # The user path has the same basename as the built-in one; users may copy the built-in server settings to this location for customization.
20
+ client = Client(user_mcp_server if os.path.isfile(user_mcp_server) else builtin_mcp_server)
22
21
 
23
22
  def main():
24
23
  asyncio.run(main_async())
25
24
 
25
+ async def initialize_app(client):
26
+ """Initializes the application by fetching tools and prompts from the MCP server."""
27
+ await client.ping()
28
+
29
+ tools_raw = await client.list_tools()
30
+ tools = {t.name: t.description for t in tools_raw}
31
+ tools = dict(sorted(tools.items()))
32
+ tools_schema = {}
33
+ for t in tools_raw:
34
+ schema = {
35
+ "name": t.name,
36
+ "description": t.description,
37
+ "parameters": {
38
+ "type": "object",
39
+ "properties": t.inputSchema["properties"],
40
+ "required": t.inputSchema["required"],
41
+ },
42
+ }
43
+ tools_schema[t.name] = schema
44
+
45
+ available_tools = list(tools.keys())
46
+ if "get_direct_text_response" not in available_tools:
47
+ available_tools.insert(0, "get_direct_text_response")
48
+
49
+ tool_descriptions = ""
50
+ if "get_direct_text_response" not in tools:
51
+ tool_descriptions = """# TOOL DESCRIPTION: `get_direct_text_response`
52
+ Get a static text-based response directly from a text-based AI model without using any other tools. This is useful when you want to provide a simple and direct answer to a question or request, without the need for online latest updates or task execution.\n\n\n"""
53
+ for tool_name, tool_description in tools.items():
54
+ tool_descriptions += f"""# TOOL DESCRIPTION: `{tool_name}`
55
+ {tool_description}\n\n\n"""
56
+
57
+ prompts_raw = await client.list_prompts()
58
+ prompts = {p.name: p.description for p in prompts_raw}
59
+ prompts = dict(sorted(prompts.items()))
60
+
61
+ prompts_schema = {}
62
+ for p in prompts_raw:
63
+ arg_properties = {}
64
+ arg_required = []
65
+ for a in p.arguments:
66
+ arg_properties[a.name] = {
67
+ "type": "string",
68
+ "description": str(a.description) if a.description else "no description available",
69
+ }
70
+ if a.required:
71
+ arg_required.append(a.name)
72
+ schema = {
73
+ "name": p.name,
74
+ "description": p.description,
75
+ "parameters": {
76
+ "type": "object",
77
+ "properties": arg_properties,
78
+ "required": arg_required,
79
+ },
80
+ }
81
+ prompts_schema[p.name] = schema
82
+
83
+ return tools, tools_schema, available_tools, tool_descriptions, prompts, prompts_schema
84
+
85
+ def backup_conversation(console, messages, master_plan):
86
+ """Backs up the current conversation to the user's directory."""
87
+ timestamp = getCurrentDateTime()
88
+ storagePath = os.path.join(AGENTMAKE_USER_DIR, "biblemate", timestamp)
89
+ Path(storagePath).mkdir(parents=True, exist_ok=True)
90
+ # Save full conversation
91
+ conversation_file = os.path.join(storagePath, "conversation.py")
92
+ writeTextFile(conversation_file, str(messages))
93
+ # Save master plan
94
+ writeTextFile(os.path.join(storagePath, "master_plan.md"), master_plan)
95
+ # Save html
96
+ html_file = os.path.join(storagePath, "conversation.html")
97
+ console.save_html(html_file, inline_styles=True, theme=MONOKAI)
98
+ # Save markdown
99
+ console.save_text(os.path.join(storagePath, "conversation.md"))
100
+ # Inform users of the backup location
101
+ print(f"Conversation backup saved to {storagePath}")
102
+ print(f"Report saved to {html_file}\n")
103
+
104
+ def write_user_config():
105
+ """Writes the current configuration to the user's config file."""
106
+ user_config_dir = os.path.join(AGENTMAKE_USER_DIR, "biblemate")
107
+ Path(user_config_dir).mkdir(parents=True, exist_ok=True)
108
+ config_file = os.path.join(user_config_dir, "config.py")
109
+ configurations = f"""agent_mode={config.agent_mode}
110
+ prompt_engineering={config.prompt_engineering}
111
+ max_steps={config.max_steps}"""
112
+ writeTextFile(config_file, configurations)
113
+
26
114
  async def main_async():
27
115
 
116
+ APP_START = True
117
+ DEFAULT_SYSTEM = "You are BibleMate AI, an autonomous agent designed to assist users with their Bible study."
28
118
  console = Console(record=True)
29
119
  console.clear()
30
120
  console.print(get_banner())
31
121
 
32
122
  async with client:
33
- await client.ping()
34
-
35
- #resources = await client.list_resources()
36
- #print("# Resources\n\n", resources, "\n\n")
37
-
38
- # List available tools, resources, and prompts
39
- tools_raw = await client.list_tools()
40
- #print(tools_raw)
41
- tools = {t.name: t.description for t in tools_raw}
42
- tools = dict(sorted(tools.items()))
43
- tools_schema = {}
44
- for t in tools_raw:
45
- schema = {
46
- "name": t.name,
47
- "description": t.description,
48
- "parameters": {
49
- "type": "object",
50
- "properties": t.inputSchema["properties"],
51
- "required": t.inputSchema["required"],
52
- },
53
- }
54
- tools_schema[t.name] = schema
55
-
56
- available_tools = list(tools.keys())
57
- if not "get_direct_text_response" in available_tools:
58
- available_tools.insert(0, "get_direct_text_response")
123
+ tools, tools_schema, available_tools, tool_descriptions, prompts, prompts_schema = await initialize_app(client)
124
+
59
125
  available_tools_pattern = "|".join(available_tools)
60
-
61
- # add tool description for get_direct_text_response if not exists
62
- if not "get_direct_text_response" in tools:
63
- tool_descriptions = f"""# TOOL DESCRIPTION: `get_direct_text_response`
64
- Get a static text-based response directly from a text-based AI model without using any other tools. This is useful when you want to provide a simple and direct answer to a question or request, without the need for online latest updates or task execution.\n\n\n"""
65
- # add tool descriptions
66
- for tool_name, tool_description in tools.items():
67
- tool_descriptions += f"""# TOOL DESCRIPTION: `{tool_name}`
68
- {tool_description}\n\n\n"""
69
-
70
- prompts_raw = await client.list_prompts()
71
- #print("# Prompts\n\n", prompts_raw, "\n\n")
72
- prompts = {p.name: p.description for p in prompts_raw}
73
- prompts = dict(sorted(prompts.items()))
74
126
  prompt_list = [f"/{p}" for p in prompts.keys()]
75
127
  prompt_pattern = "|".join(prompt_list)
76
128
  prompt_pattern = f"""^({prompt_pattern}) """
77
129
 
78
- prompts_schema = {}
79
- for p in prompts_raw:
80
- arg_properties = {}
81
- arg_required = []
82
- for a in p.arguments:
83
- arg_properties[a.name] = {
84
- "type": "string",
85
- "description": str(a.description) if a.description else "no description available",
86
- }
87
- if a.required:
88
- arg_required.append(a.name)
89
- schema = {
90
- "name": p.name,
91
- "description": p.description,
92
- "parameters": {
93
- "type": "object",
94
- "properties": arg_properties,
95
- "required": arg_required,
96
- },
97
- }
98
- prompts_schema[p.name] = schema
99
-
100
130
  user_request = ""
101
131
  master_plan = ""
102
132
  messages = []
@@ -140,33 +170,24 @@ Get a static text-based response directly from a text-based AI model without usi
140
170
  # Await the custom async progress bar that awaits the task.
141
171
  await async_alive_bar(task)
142
172
 
143
- # backup
144
- def backup():
145
- nonlocal console, messages, master_plan
146
- timestamp = getCurrentDateTime()
147
- storagePath = os.path.join(AGENTMAKE_USER_DIR, "biblemate", timestamp)
148
- Path(storagePath).mkdir(parents=True, exist_ok=True)
149
- # Save full conversation
150
- conversation_file = os.path.join(storagePath, "conversation.py")
151
- writeTextFile(conversation_file, str(messages))
152
- # Save master plan
153
- writeTextFile(os.path.join(storagePath, "master_plan.md"), master_plan)
154
- # Save html
155
- html_file = os.path.join(storagePath, "conversation.html")
156
- console.save_html(html_file, inline_styles=True, theme=MONOKAI)
157
- # Save markdown
158
- console.save_text(os.path.join(storagePath, "conversation.md"))
159
- # Inform users of the backup location
160
- print(f"Conversation backup saved to {storagePath}")
161
- print(f"Report saved to {html_file}\n")
162
- def write_config():
163
- # TODO: support more configs
164
- config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.py")
165
- writeTextFile(config_file, f"agent_mode={config.agent_mode}")
166
-
167
173
  if messages:
168
174
  console.rule()
169
-
175
+ elif APP_START:
176
+ print()
177
+ APP_START = False
178
+ while True:
179
+ try:
180
+ agentmake("Hello!", system=DEFAULT_SYSTEM)
181
+ break
182
+ except Exception as e:
183
+ print("Connection failed! Please ensure that you have a stable internet connection and that my AI backend and model are properly configured.")
184
+ print("Viist https://github.com/eliranwong/agentmake#supported-backends for help about the backend configuration.\n")
185
+ if click.confirm("Do you want to configure my AI backend and model now?", default=True):
186
+ edit_configurations()
187
+ console.rule()
188
+ console.print("Restart to make the changes in the backend effective!", justify="center")
189
+ console.rule()
190
+ exit()
170
191
  # Original user request
171
192
  # note: `python3 -m rich.emoji` for checking emoji
172
193
  console.print("Enter your request :smiley: :" if not messages else "Enter a follow-up request :flexed_biceps: :")
@@ -177,14 +198,30 @@ Get a static text-based response directly from a text-based AI model without usi
177
198
  ".chat": "enable chat mode",
178
199
  ".agent": "enable agent mode",
179
200
  ".tools": "list available tools",
180
- #".resources": "list available resources",
181
- ".prompts": "list available prompts",
201
+ ".plans": "list available plans",
202
+ #".resources": "list available resources", # TODO explore relevant usage for this project
203
+ ".promptengineering": "toggle auto prompt engineering",
204
+ ".steps": "configure the maximum number of steps allowed",
182
205
  ".backup": "backup conversation",
183
206
  ".open": "open a file or directory",
207
+ ".help": "help page",
184
208
  }
185
209
  input_suggestions = list(action_list.keys())+[f"@{t} " for t in available_tools]+prompt_list
186
210
  user_request = await getInput("> ", input_suggestions)
187
211
  while not user_request.strip():
212
+ # Generate ideas for `prompts to try`
213
+ ideas = ""
214
+ async def generate_ideas():
215
+ nonlocal ideas
216
+ if not messages:
217
+ ideas = agentmake("Generate three `prompts to try` for bible study. Each one should be one sentence long.", **AGENTMAKE_CONFIG)[-1].get("content", "").strip()
218
+ else:
219
+ ideas = agentmake(messages, follow_up_prompt="Generate three follow-up questions according to the on-going conversation.", **AGENTMAKE_CONFIG)[-1].get("content", "").strip()
220
+ await thinking(generate_ideas, "Generating ideas ...")
221
+ console.rule()
222
+ console.print(Markdown(f"## Ideas\n\n{ideas}\n\n"))
223
+ console.rule()
224
+ # Get input agin
188
225
  user_request = await getInput("> ", input_suggestions)
189
226
 
190
227
  # system command
@@ -195,39 +232,58 @@ Get a static text-based response directly from a text-based AI model without usi
195
232
  subprocess.Popen(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
196
233
  continue
197
234
 
198
- # TODO: ui - radio list menu
235
+ # predefined operations with `.` commands
199
236
  if user_request in action_list:
200
237
  if user_request == ".backup":
201
- backup()
238
+ backup_conversation(console, messages, master_plan)
239
+ elif user_request == ".help":
240
+ console.rule()
241
+ console.print(Markdown("Viist https://github.com/eliranwong/biblemate for help."))
242
+ console.rule()
202
243
  elif user_request == ".tools":
203
244
  console.rule()
204
245
  tools_descriptions = [f"- `{name}`: {description}" for name, description in tools.items()]
205
246
  console.print(Markdown("## Available Tools\n\n"+"\n".join(tools_descriptions)))
206
247
  console.rule()
207
- elif user_request == ".prompts":
248
+ elif user_request == ".plans":
208
249
  console.rule()
209
250
  prompts_descriptions = [f"- `{name}`: {description}" for name, description in prompts.items()]
210
- console.print(Markdown("## Available Prompts\n\n"+"\n".join(prompts_descriptions)))
251
+ console.print(Markdown("## Available Plans\n\n"+"\n".join(prompts_descriptions)))
211
252
  console.rule()
212
253
  elif user_request == ".backend":
213
254
  edit_configurations()
214
255
  console.rule()
215
256
  console.print("Restart to make the changes in the backend effective!", justify="center")
216
257
  console.rule()
258
+ elif user_request == ".steps":
259
+ console.rule()
260
+ console.print("Enter the maximum number of steps allowed below:")
261
+ max_steps = await getInput("> ", number_validator=True)
262
+ if max_steps:
263
+ config.max_steps = int(max_steps)
264
+ write_user_config()
265
+ console.print("Maximum number of steps set to", config.max_steps, "steps.", justify="center")
266
+ console.rule()
267
+ elif user_request == ".promptengineering":
268
+ config.prompt_engineering = not config.prompt_engineering
269
+ write_user_config()
270
+ console.rule()
271
+ console.print("Prompt Engineering Enabled" if config.prompt_engineering else "Prompt Engineering Disabled", justify="center")
272
+ console.rule()
217
273
  elif user_request == ".chat":
218
274
  config.agent_mode = False
219
- write_config()
275
+ write_user_config()
220
276
  console.rule()
221
277
  console.print("Chat Mode Enabled", justify="center")
222
278
  console.rule()
223
279
  elif user_request == ".agent":
224
280
  config.agent_mode = True
225
- write_config()
281
+ write_user_config()
226
282
  console.rule()
227
283
  console.print("Agent Mode Enabled", justify="center")
228
284
  console.rule()
229
285
  elif user_request in (".new", ".quit"):
230
- backup() # backup
286
+ backup_conversation(console, messages, master_plan) # backup
231
287
  # reset
232
288
  if user_request == ".new":
233
289
  user_request = ""
@@ -259,7 +315,7 @@ Get a static text-based response directly from a text-based AI model without usi
259
315
  console.print(Markdown(f"# User Request\n\n{user_request}\n\n# Master plan\n\n{master_plan}"))
260
316
 
261
317
  # Prompt Engineering
262
- if not specified_tool == "@@":
318
+ if not specified_tool == "@@" and config.prompt_engineering:
263
319
  async def run_prompt_engineering():
264
320
  nonlocal user_request
265
321
  user_request = agentmake(messages if messages else user_request, follow_up_prompt=user_request if messages else None, tool="improve_prompt", **AGENTMAKE_CONFIG)[-1].get("content", "").strip()[20:-4]
@@ -267,7 +323,7 @@ Get a static text-based response directly from a text-based AI model without usi
267
323
 
268
324
  if not messages:
269
325
  messages = [
270
- {"role": "system", "content": "You are BibleMate, an autonomous AI agent."},
326
+ {"role": "system", "content": DEFAULT_SYSTEM},
271
327
  {"role": "user", "content": user_request},
272
328
  ]
273
329
  else:
@@ -411,13 +467,14 @@ Available tools are: {available_tools}.
411
467
  messages.append({"role": "user", "content": next_step})
412
468
 
413
469
  await process_tool(next_tool, next_step, step_number=step)
414
- console.print(Markdown(f"\n## Output [{step}]\n\n{messages[-1]["content"]}"))
470
+ console.print(Markdown(f"\n## Output [{step}]\n\n{messages[-1]['content']}"))
415
471
 
416
472
  # iteration count
417
473
  step += 1
418
- if step > MAX_STEPS:
419
- print("Stopped! Too many steps! `MAX_STEPS` is currently set to ", MAX_STEPS, "!")
420
- print("You can increase it in the settings, but be careful not to create an infinite loop!")
474
+ if step > config.max_steps:
475
+ console.rule()
476
+ console.print("Stopped! Too many steps! The maximum steps is currently set to", config.max_steps, "steps. Enter `.steps` to configure.")
477
+ console.rule()
421
478
  break
422
479
 
423
480
  # Get the next suggestion
@@ -433,7 +490,7 @@ Available tools are: {available_tools}.
433
490
  messages.append({"role": "assistant", "content": next_suggestion})
434
491
 
435
492
  # Backup
436
- backup()
493
+ backup_conversation(console, messages, master_plan)
437
494
 
438
495
  if __name__ == "__main__":
439
496
  asyncio.run(main())
@@ -2,4 +2,5 @@ agentmake>=1.0.70
2
2
  agentmakemcp>=0.0.8
3
3
  fastmcp[cli]
4
4
  rich
5
- alive-progress
5
+ alive-progress
6
+ click
@@ -1,9 +1,26 @@
1
1
  from agentmake.main import AGENTMAKE_USER_DIR
2
2
  from agentmake.utils.system import getCliOutput
3
- import os, shutil
3
+ from prompt_toolkit.validation import Validator, ValidationError
4
+ from biblemate import config
5
+ import os, shutil, re
4
6
 
5
7
 
6
- async def getInput(prompt:str="Instruction: ", input_suggestions:list=None):
8
+ class NumberValidator(Validator):
9
+ def validate(self, document):
10
+ text = document.text
11
+
12
+ if text and not re.search("^[0-9]+?$", text):
13
+ i = 0
14
+
15
+ # Get index of first non numeric character.
16
+ # We want to move the cursor here.
17
+ for i, c in enumerate(text):
18
+ if not c.isdigit():
19
+ break
20
+
21
+ raise ValidationError(message='This entry accepts numbers only!', cursor_position=i)
22
+
23
+ async def getInput(prompt:str="Instruction: ", input_suggestions:list=None, number_validator:bool=False):
7
24
  """
8
25
  Prompt for user input
9
26
  """
@@ -92,6 +109,8 @@ async def getInput(prompt:str="Instruction: ", input_suggestions:list=None):
92
109
  bottom_toolbar="[ENTER] submit [TAB] linebreak [Ctrl+N] new [Ctrl+Q] quit",
93
110
  completer=completer,
94
111
  key_bindings=bindings,
112
+ validator=NumberValidator() if number_validator else None,
113
+ default=str(config.max_steps) if number_validator else "",
95
114
  )
96
115
  print()
97
116
  return instruction.strip() if instruction else ""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: biblemate
3
- Version: 0.0.23
3
+ Version: 0.0.25
4
4
  Summary: BibleMate AI - Automate Your Bible Study
5
5
  Home-page: https://toolmate.ai
6
6
  Author: Eliran Wong
@@ -1,6 +1,7 @@
1
1
  agentmake>=1.0.70
2
2
  agentmakemcp>=0.0.8
3
3
  alive-progress
4
+ click
4
5
  fastmcp[cli]
5
6
  rich
6
7
 
@@ -27,7 +27,7 @@ with open(os.path.join(package, "requirements.txt"), "r") as fileObj:
27
27
  # https://packaging.python.org/en/latest/guides/distributing-packages-using-setuptools/
28
28
  setup(
29
29
  name=package,
30
- version="0.0.23",
30
+ version="0.0.25",
31
31
  python_requires=">=3.8, <3.13",
32
32
  description=f"BibleMate AI - Automate Your Bible Study",
33
33
  long_description=long_description,
@@ -1 +0,0 @@
1
- agent_mode=True
File without changes