biblemate 0.0.22__py3-none-any.whl → 0.0.23__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- biblemate/bible_study_mcp.py +19 -0
- biblemate/main.py +16 -9
- {biblemate-0.0.22.dist-info → biblemate-0.0.23.dist-info}/METADATA +1 -1
- {biblemate-0.0.22.dist-info → biblemate-0.0.23.dist-info}/RECORD +7 -7
- {biblemate-0.0.22.dist-info → biblemate-0.0.23.dist-info}/WHEEL +0 -0
- {biblemate-0.0.22.dist-info → biblemate-0.0.23.dist-info}/entry_points.txt +0 -0
- {biblemate-0.0.22.dist-info → biblemate-0.0.23.dist-info}/top_level.txt +0 -0
biblemate/bible_study_mcp.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1
1
|
import logging
|
2
2
|
from fastmcp import FastMCP
|
3
|
+
from fastmcp.prompts.prompt import PromptMessage, TextContent
|
3
4
|
from agentmake import agentmake
|
4
5
|
from biblemate import AGENTMAKE_CONFIG
|
5
6
|
|
@@ -347,4 +348,22 @@ def write_bible_sermon(request:str) -> str:
|
|
347
348
|
messages = agentmake(request, **{'instruction': 'bible/sermon', 'system': 'auto'}, **AGENTMAKE_CONFIG)
|
348
349
|
return getResponse(messages)
|
349
350
|
|
351
|
+
@mcp.prompt
|
352
|
+
def simple_bible_study(request:str) -> PromptMessage:
|
353
|
+
"""Perform a simple bible study task; bible reference(s) must be given"""
|
354
|
+
global PromptMessage, TextContent
|
355
|
+
prompt_text = f"""You are a bible study agent. You check the user request, under the `User Request` section, and resolve it with the following steps in order:
|
356
|
+
1. Call tool 'retrieve_english_bible_verses' for Bible text,
|
357
|
+
2. Call tool 'retrieve_bible_cross_references' for Bible cross-references,
|
358
|
+
3. Call tool 'study_old_testament_themes' for study old testament themes or 'study_new_testament_themes' for study old testament themes, and
|
359
|
+
4. Call tool 'write_bible_theology' to explain its theology.
|
360
|
+
|
361
|
+
# User Request
|
362
|
+
|
363
|
+
---
|
364
|
+
{request}
|
365
|
+
---
|
366
|
+
"""
|
367
|
+
return PromptMessage(role="user", content=TextContent(type="text", text=prompt_text))
|
368
|
+
|
350
369
|
mcp.run(show_banner=False)
|
biblemate/main.py
CHANGED
@@ -70,6 +70,7 @@ Get a static text-based response directly from a text-based AI model without usi
|
|
70
70
|
prompts_raw = await client.list_prompts()
|
71
71
|
#print("# Prompts\n\n", prompts_raw, "\n\n")
|
72
72
|
prompts = {p.name: p.description for p in prompts_raw}
|
73
|
+
prompts = dict(sorted(prompts.items()))
|
73
74
|
prompt_list = [f"/{p}" for p in prompts.keys()]
|
74
75
|
prompt_pattern = "|".join(prompt_list)
|
75
76
|
prompt_pattern = f"""^({prompt_pattern}) """
|
@@ -177,7 +178,7 @@ Get a static text-based response directly from a text-based AI model without usi
|
|
177
178
|
".agent": "enable agent mode",
|
178
179
|
".tools": "list available tools",
|
179
180
|
#".resources": "list available resources",
|
180
|
-
|
181
|
+
".prompts": "list available prompts",
|
181
182
|
".backup": "backup conversation",
|
182
183
|
".open": "open a file or directory",
|
183
184
|
}
|
@@ -203,6 +204,11 @@ Get a static text-based response directly from a text-based AI model without usi
|
|
203
204
|
tools_descriptions = [f"- `{name}`: {description}" for name, description in tools.items()]
|
204
205
|
console.print(Markdown("## Available Tools\n\n"+"\n".join(tools_descriptions)))
|
205
206
|
console.rule()
|
207
|
+
elif user_request == ".prompts":
|
208
|
+
console.rule()
|
209
|
+
prompts_descriptions = [f"- `{name}`: {description}" for name, description in prompts.items()]
|
210
|
+
console.print(Markdown("## Available Prompts\n\n"+"\n".join(prompts_descriptions)))
|
211
|
+
console.rule()
|
206
212
|
elif user_request == ".backend":
|
207
213
|
edit_configurations()
|
208
214
|
console.rule()
|
@@ -231,8 +237,12 @@ Get a static text-based response directly from a text-based AI model without usi
|
|
231
237
|
continue
|
232
238
|
|
233
239
|
# Check if a single tool is specified
|
240
|
+
specified_prompt = ""
|
234
241
|
specified_tool = ""
|
235
|
-
if re.search(
|
242
|
+
if re.search(prompt_pattern, user_request):
|
243
|
+
specified_prompt = re.search(prompt_pattern, user_request).group(1)
|
244
|
+
user_request = user_request[len(specified_prompt):]
|
245
|
+
elif re.search(f"""^@({available_tools_pattern}) """, user_request):
|
236
246
|
specified_tool = re.search(f"""^@({available_tools_pattern}) """, user_request).group(1)
|
237
247
|
user_request = user_request[len(specified_tool)+2:]
|
238
248
|
elif user_request.startswith("@@"):
|
@@ -303,22 +313,19 @@ Get a static text-based response directly from a text-based AI model without usi
|
|
303
313
|
|
304
314
|
# generate master plan
|
305
315
|
if not master_plan:
|
306
|
-
if
|
307
|
-
prompt_name = re.search(prompt_pattern, user_request).group(1)
|
308
|
-
user_request = user_request[len(prompt_name):]
|
316
|
+
if specified_prompt:
|
309
317
|
# Call the MCP prompt
|
310
|
-
prompt_schema = prompts_schema[
|
318
|
+
prompt_schema = prompts_schema[specified_prompt[1:]]
|
311
319
|
prompt_properties = prompt_schema["parameters"]["properties"]
|
312
320
|
if len(prompt_properties) == 1 and "request" in prompt_properties: # AgentMake MCP Servers or alike
|
313
|
-
result = await client.get_prompt(
|
321
|
+
result = await client.get_prompt(specified_prompt[1:], {"request": user_request})
|
314
322
|
else:
|
315
323
|
structured_output = getDictionaryOutput(messages=messages, schema=prompt_schema)
|
316
|
-
result = await client.get_prompt(
|
324
|
+
result = await client.get_prompt(specified_prompt[1:], structured_output)
|
317
325
|
#print(result, "\n\n")
|
318
326
|
master_plan = result.messages[0].content.text
|
319
327
|
# display info# display info
|
320
328
|
console.print(Markdown(f"# User Request\n\n{user_request}\n\n# Master plan\n\n{master_plan}"))
|
321
|
-
console.print(Markdown(f"# User Request\n\n{user_request}\n\n# Master plan\n\n{master_plan}"))
|
322
329
|
else:
|
323
330
|
# display info
|
324
331
|
console.print(Markdown(f"# User Request\n\n{user_request}"), "\n")
|
@@ -1,15 +1,15 @@
|
|
1
1
|
biblemate/README.md,sha256=0xKiXqwB-WBxHCKyUByD-2Yre0zgflTDun-wYc9o-J0,6173
|
2
2
|
biblemate/__init__.py,sha256=aFO4_EperOrwwDBdrkTKfDMt2Fh18Y0A2G_nUC_cmmM,78
|
3
|
-
biblemate/bible_study_mcp.py,sha256
|
3
|
+
biblemate/bible_study_mcp.py,sha256=-g66m7zB-6bQ-rilmUs5-PPAtXdm_qES2mH8nZ7bGG4,17453
|
4
4
|
biblemate/config.py,sha256=ktpLv_5qdbf3FErxUIdCvVc9MO6kQH4Zt_omoJ7msIs,15
|
5
|
-
biblemate/main.py,sha256=
|
5
|
+
biblemate/main.py,sha256=O7PKxdJbmjeIU4NomvkHEuaUuMSmjsttWAIot4_IFvU,22614
|
6
6
|
biblemate/package_name.txt,sha256=WkkuEEkgw7EKpXV8GshpzhZlwRor1wpotTS7vP24b_g,9
|
7
7
|
biblemate/requirements.txt,sha256=MliJX2PmogiVmgqHk4W0TMqRp2FLYXcGIkf8PS2RV94,70
|
8
8
|
biblemate/core/systems.py,sha256=nG_NgcLSRhdaHuxuCPN5ZfJUhP88kdfwhRCvRk4RLjI,1874
|
9
9
|
biblemate/ui/info.py,sha256=QRCno0CYUHVoOtVkZIxVamZONmtI7KRmOT2YoUagY5s,811
|
10
10
|
biblemate/ui/prompts.py,sha256=mxdC5BU7NMok9MOm1E39MHSrxB9gSRqGY7HsOc--rRg,3484
|
11
|
-
biblemate-0.0.
|
12
|
-
biblemate-0.0.
|
13
|
-
biblemate-0.0.
|
14
|
-
biblemate-0.0.
|
15
|
-
biblemate-0.0.
|
11
|
+
biblemate-0.0.23.dist-info/METADATA,sha256=P7UDN6G3rrS7uxAxqTqNIfCP5H-bZaorbjbPuxSRfRI,7668
|
12
|
+
biblemate-0.0.23.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
|
13
|
+
biblemate-0.0.23.dist-info/entry_points.txt,sha256=tbEfTFr6LhPR1E_zP3CsPwJsmG-G4MCnJ3FcQEMiqo0,50
|
14
|
+
biblemate-0.0.23.dist-info/top_level.txt,sha256=pq9uX0tAS0bizZcZ5GW5zIoDLQBa-b5QDlDGsdHNgiU,10
|
15
|
+
biblemate-0.0.23.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|