biblemate 0.0.22__tar.gz → 0.0.23__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: biblemate
3
- Version: 0.0.22
3
+ Version: 0.0.23
4
4
  Summary: BibleMate AI - Automate Your Bible Study
5
5
  Home-page: https://toolmate.ai
6
6
  Author: Eliran Wong
@@ -1,5 +1,6 @@
1
1
  import logging
2
2
  from fastmcp import FastMCP
3
+ from fastmcp.prompts.prompt import PromptMessage, TextContent
3
4
  from agentmake import agentmake
4
5
  from biblemate import AGENTMAKE_CONFIG
5
6
 
@@ -347,4 +348,22 @@ def write_bible_sermon(request:str) -> str:
347
348
  messages = agentmake(request, **{'instruction': 'bible/sermon', 'system': 'auto'}, **AGENTMAKE_CONFIG)
348
349
  return getResponse(messages)
349
350
 
351
+ @mcp.prompt
352
+ def simple_bible_study(request:str) -> PromptMessage:
353
+ """Perform a simple bible study task; bible reference(s) must be given"""
354
+ global PromptMessage, TextContent
355
+ prompt_text = f"""You are a bible study agent. You check the user request, under the `User Request` section, and resolve it with the following steps in order:
356
+ 1. Call tool 'retrieve_english_bible_verses' for Bible text,
357
+ 2. Call tool 'retrieve_bible_cross_references' for Bible cross-references,
358
+ 3. Call tool 'study_old_testament_themes' for study old testament themes or 'study_new_testament_themes' for study old testament themes, and
359
+ 4. Call tool 'write_bible_theology' to explain its theology.
360
+
361
+ # User Request
362
+
363
+ ---
364
+ {request}
365
+ ---
366
+ """
367
+ return PromptMessage(role="user", content=TextContent(type="text", text=prompt_text))
368
+
350
369
  mcp.run(show_banner=False)
@@ -70,6 +70,7 @@ Get a static text-based response directly from a text-based AI model without usi
70
70
  prompts_raw = await client.list_prompts()
71
71
  #print("# Prompts\n\n", prompts_raw, "\n\n")
72
72
  prompts = {p.name: p.description for p in prompts_raw}
73
+ prompts = dict(sorted(prompts.items()))
73
74
  prompt_list = [f"/{p}" for p in prompts.keys()]
74
75
  prompt_pattern = "|".join(prompt_list)
75
76
  prompt_pattern = f"""^({prompt_pattern}) """
@@ -177,7 +178,7 @@ Get a static text-based response directly from a text-based AI model without usi
177
178
  ".agent": "enable agent mode",
178
179
  ".tools": "list available tools",
179
180
  #".resources": "list available resources",
180
- #".prompts": "list available prompts",
181
+ ".prompts": "list available prompts",
181
182
  ".backup": "backup conversation",
182
183
  ".open": "open a file or directory",
183
184
  }
@@ -203,6 +204,11 @@ Get a static text-based response directly from a text-based AI model without usi
203
204
  tools_descriptions = [f"- `{name}`: {description}" for name, description in tools.items()]
204
205
  console.print(Markdown("## Available Tools\n\n"+"\n".join(tools_descriptions)))
205
206
  console.rule()
207
+ elif user_request == ".prompts":
208
+ console.rule()
209
+ prompts_descriptions = [f"- `{name}`: {description}" for name, description in prompts.items()]
210
+ console.print(Markdown("## Available Prompts\n\n"+"\n".join(prompts_descriptions)))
211
+ console.rule()
206
212
  elif user_request == ".backend":
207
213
  edit_configurations()
208
214
  console.rule()
@@ -231,8 +237,12 @@ Get a static text-based response directly from a text-based AI model without usi
231
237
  continue
232
238
 
233
239
  # Check if a single tool is specified
240
+ specified_prompt = ""
234
241
  specified_tool = ""
235
- if re.search(f"""^@({available_tools_pattern}) """, user_request):
242
+ if re.search(prompt_pattern, user_request):
243
+ specified_prompt = re.search(prompt_pattern, user_request).group(1)
244
+ user_request = user_request[len(specified_prompt):]
245
+ elif re.search(f"""^@({available_tools_pattern}) """, user_request):
236
246
  specified_tool = re.search(f"""^@({available_tools_pattern}) """, user_request).group(1)
237
247
  user_request = user_request[len(specified_tool)+2:]
238
248
  elif user_request.startswith("@@"):
@@ -303,22 +313,19 @@ Get a static text-based response directly from a text-based AI model without usi
303
313
 
304
314
  # generate master plan
305
315
  if not master_plan:
306
- if re.search(prompt_pattern, user_request):
307
- prompt_name = re.search(prompt_pattern, user_request).group(1)
308
- user_request = user_request[len(prompt_name):]
316
+ if specified_prompt:
309
317
  # Call the MCP prompt
310
- prompt_schema = prompts_schema[prompt_name[1:]]
318
+ prompt_schema = prompts_schema[specified_prompt[1:]]
311
319
  prompt_properties = prompt_schema["parameters"]["properties"]
312
320
  if len(prompt_properties) == 1 and "request" in prompt_properties: # AgentMake MCP Servers or alike
313
- result = await client.get_prompt(prompt_name[1:], {"request": user_request})
321
+ result = await client.get_prompt(specified_prompt[1:], {"request": user_request})
314
322
  else:
315
323
  structured_output = getDictionaryOutput(messages=messages, schema=prompt_schema)
316
- result = await client.get_prompt(prompt_name[1:], structured_output)
324
+ result = await client.get_prompt(specified_prompt[1:], structured_output)
317
325
  #print(result, "\n\n")
318
326
  master_plan = result.messages[0].content.text
319
327
  # display info# display info
320
328
  console.print(Markdown(f"# User Request\n\n{user_request}\n\n# Master plan\n\n{master_plan}"))
321
- console.print(Markdown(f"# User Request\n\n{user_request}\n\n# Master plan\n\n{master_plan}"))
322
329
  else:
323
330
  # display info
324
331
  console.print(Markdown(f"# User Request\n\n{user_request}"), "\n")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: biblemate
3
- Version: 0.0.22
3
+ Version: 0.0.23
4
4
  Summary: BibleMate AI - Automate Your Bible Study
5
5
  Home-page: https://toolmate.ai
6
6
  Author: Eliran Wong
@@ -27,7 +27,7 @@ with open(os.path.join(package, "requirements.txt"), "r") as fileObj:
27
27
  # https://packaging.python.org/en/latest/guides/distributing-packages-using-setuptools/
28
28
  setup(
29
29
  name=package,
30
- version="0.0.22",
30
+ version="0.0.23",
31
31
  python_requires=">=3.8, <3.13",
32
32
  description=f"BibleMate AI - Automate Your Bible Study",
33
33
  long_description=long_description,
File without changes