biblemate 0.0.18__py3-none-any.whl → 0.0.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
biblemate/README.md CHANGED
@@ -61,6 +61,10 @@ How to swap?
61
61
  * Enter `.chat` in BibleMate AI prompt to enable chat mode and disable agent mode.
62
62
  * Enter `.agent` in BibleMate AI prompt to enable agent mode and disable chat mode.
63
63
 
64
+ ## Specify Toos(s) for a Particular Task
65
+
66
+
67
+
64
68
  ## Action Menu
65
69
 
66
70
  *(Coming soon)*
@@ -32,6 +32,20 @@ def retrieve_bible_cross_references(request:str) -> str:
32
32
  messages = agentmake(request, **{'input_content_plugin': 'uba/every_single_ref', 'tool': 'uba/xref'}, **AGENTMAKE_CONFIG)
33
33
  return getResponse(messages)
34
34
 
35
+ @mcp.tool
36
+ def retrieve_pointed_hebrew_or_accented_greek_bible_verses(request:str) -> str:
37
+ """retrieve Hebrew (with pointed vowels) or Greek (with accents) Bible verses; bible verse reference(s) must be given"""
38
+ global agentmake, getResponse
39
+ messages = agentmake(request, **{'tool': 'uba/ohgb'}, **AGENTMAKE_CONFIG)
40
+ return getResponse(messages)
41
+
42
+ @mcp.tool
43
+ def retrieve_hebrew_or_greek_bible_verses(request:str) -> str:
44
+ """retrieve Hebrew (without pointed vowels) or Greek (without accents) Bible verses; bible verse reference(s) must be given"""
45
+ global agentmake, getResponse
46
+ messages = agentmake(request, **{'tool': 'uba/mob'}, **AGENTMAKE_CONFIG)
47
+ return getResponse(messages)
48
+
35
49
  @mcp.tool
36
50
  def retrieve_english_bible_verses(request:str) -> str:
37
51
  """retrieve English Bible verses; bible verse reference(s) must be given"""
biblemate/main.py CHANGED
@@ -55,6 +55,7 @@ async def main_async():
55
55
  available_tools = list(tools.keys())
56
56
  if not "get_direct_text_response" in available_tools:
57
57
  available_tools.insert(0, "get_direct_text_response")
58
+ available_tools_pattern = "|".join(available_tools)
58
59
 
59
60
  # add tool description for get_direct_text_response if not exists
60
61
  if not "get_direct_text_response" in tools:
@@ -126,13 +127,14 @@ Get a static text-based response directly from a text-based AI model without usi
126
127
  bar() # Update the bar
127
128
  await asyncio.sleep(0.01) # Yield control back to the event loop
128
129
  return task.result()
129
- async def process_step_async(step_number):
130
+ async def process_tool(tool, tool_instruction, step_number=None):
130
131
  """
131
132
  Manages the async task and the progress bar.
132
133
  """
133
- print(f"# Starting Step [{step_number}]...")
134
+ if step_number:
135
+ print(f"# Starting Step [{step_number}]...")
134
136
  # Create the async task but don't await it yet.
135
- task = asyncio.create_task(process_step())
137
+ task = asyncio.create_task(run_tool(tool, tool_instruction))
136
138
  # Await the custom async progress bar that awaits the task.
137
139
  await async_alive_bar(task)
138
140
 
@@ -173,9 +175,12 @@ Get a static text-based response directly from a text-based AI model without usi
173
175
  ".chat": "enable chat mode",
174
176
  ".agent": "enable agent mode",
175
177
  ".tools": "list available tools",
178
+ #".resources": "list available resources",
179
+ #".prompts": "list available prompts",
180
+ ".backup": "backup conversation",
176
181
  ".open": "open a file or directory",
177
182
  }
178
- input_suggestions = list(action_list.keys())+prompt_list
183
+ input_suggestions = list(action_list.keys())+[f"@{t} " for t in available_tools]+prompt_list
179
184
  user_request = await getInput("> ", input_suggestions)
180
185
  while not user_request.strip():
181
186
  user_request = await getInput("> ", input_suggestions)
@@ -190,7 +195,9 @@ Get a static text-based response directly from a text-based AI model without usi
190
195
 
191
196
  # TODO: ui - radio list menu
192
197
  if user_request in action_list:
193
- if user_request == ".tools":
198
+ if user_request == ".backup":
199
+ backup()
200
+ elif user_request == ".tools":
194
201
  console.rule()
195
202
  tools_descriptions = [f"- `{name}`: {description}" for name, description in tools.items()]
196
203
  console.print(Markdown("## Available Tools\n\n"+"\n".join(tools_descriptions)))
@@ -222,14 +229,68 @@ Get a static text-based response directly from a text-based AI model without usi
222
229
  console.print(get_banner())
223
230
  continue
224
231
 
225
- # auto prompt engineering
226
- async def run_prompt_engineering():
227
- nonlocal user_request
228
- user_request = agentmake(user_request, tool="improve_prompt", **AGENTMAKE_CONFIG)[-1].get("content", "").strip()[20:-4]
229
- await thinking(run_prompt_engineering, "Prompt Engineering ...")
232
+ # Check if a single tool is specified
233
+ specified_tool = ""
234
+ if re.search(f"""^@({available_tools_pattern}) """, user_request):
235
+ specified_tool = re.search(f"""^@({available_tools_pattern}) """, user_request).group(1)
236
+ user_request = user_request[len(specified_tool)+2:]
237
+ elif user_request.startswith("@@"):
238
+ specified_tool = "@@"
239
+ master_plan = user_request[2:].strip()
240
+ async def refine_custom_plan():
241
+ nonlocal messages, user_request, master_plan
242
+ # Prompt engineering
243
+ #master_plan = agentmake(messages if messages else master_plan, follow_up_prompt=master_plan if messages else None, tool="improve_prompt", **AGENTMAKE_CONFIG)[-1].get("content", "").strip()[20:-4]
244
+ # Summarize user request in one-sentence instruction
245
+ user_request = agentmake(master_plan, tool="biblemate/summarize_task_instruction", **AGENTMAKE_CONFIG)[-1].get("content", "").strip()[15:-4]
246
+ await thinking(refine_custom_plan)
247
+ # display info
248
+ console.print(Markdown(f"# User Request\n\n{user_request}\n\n# Master plan\n\n{master_plan}"))
249
+
250
+ # Prompt Engineering
251
+ if not specified_tool == "@@":
252
+ async def run_prompt_engineering():
253
+ nonlocal user_request
254
+ user_request = agentmake(messages if messages else user_request, follow_up_prompt=user_request if messages else None, tool="improve_prompt", **AGENTMAKE_CONFIG)[-1].get("content", "").strip()[20:-4]
255
+ await thinking(run_prompt_engineering, "Prompt Engineering ...")
256
+
257
+ if not messages:
258
+ messages = [
259
+ {"role": "system", "content": "You are BibleMate, an autonomous AI agent."},
260
+ {"role": "user", "content": user_request},
261
+ ]
262
+ else:
263
+ messages.append({"role": "user", "content": user_request})
264
+
265
+ async def run_tool(tool, tool_instruction):
266
+ nonlocal messages
267
+ if tool == "get_direct_text_response":
268
+ messages = agentmake(messages, system="auto", **AGENTMAKE_CONFIG)
269
+ else:
270
+ try:
271
+ tool_schema = tools_schema[tool]
272
+ tool_properties = tool_schema["parameters"]["properties"]
273
+ if len(tool_properties) == 1 and "request" in tool_properties: # AgentMake MCP Servers or alike
274
+ tool_result = await client.call_tool(tool, {"request": tool_instruction})
275
+ else:
276
+ structured_output = getDictionaryOutput(messages=messages, schema=tool_schema)
277
+ tool_result = await client.call_tool(tool, structured_output)
278
+ tool_result = tool_result.content[0].text
279
+ messages[-1]["content"] += f"\n\n[Using tool `{tool}`]"
280
+ messages.append({"role": "assistant", "content": tool_result if tool_result.strip() else "Tool error!"})
281
+ except Exception as e:
282
+ if DEVELOPER_MODE:
283
+ console.print(f"Error: {e}\nFallback to direct response...\n\n")
284
+ messages = agentmake(messages, system="auto", **AGENTMAKE_CONFIG)
285
+
286
+ # user specify a single tool
287
+ if specified_tool and not specified_tool == "@@":
288
+ await process_tool(specified_tool, user_request)
289
+ console.print(Markdown(f"# User Request\n\n{messages[-2]['content']}\n\n# AI Response\n\n{messages[-1]['content']}"))
290
+ continue
230
291
 
231
292
  # Chat mode
232
- if not config.agent_mode:
293
+ if not config.agent_mode and not specified_tool == "@@":
233
294
  async def run_chat_mode():
234
295
  nonlocal messages, user_request
235
296
  messages = agentmake(messages if messages else user_request, system="auto", **AGENTMAKE_CONFIG)
@@ -237,30 +298,35 @@ Get a static text-based response directly from a text-based AI model without usi
237
298
  console.print(Markdown(f"# User Request\n\n{messages[-2]['content']}\n\n# AI Response\n\n{messages[-1]['content']}"))
238
299
  continue
239
300
 
240
- if re.search(prompt_pattern, user_request):
241
- prompt_name = re.search(prompt_pattern, user_request).group(1)
242
- user_request = user_request[len(prompt_name):]
243
- # Call the MCP prompt
244
- prompt_schema = prompts_schema[prompt_name[1:]]
245
- prompt_properties = prompt_schema["parameters"]["properties"]
246
- if len(prompt_properties) == 1 and "request" in prompt_properties: # AgentMake MCP Servers or alike
247
- result = await client.get_prompt(prompt_name[1:], {"request": user_request})
301
+ # agent mode
302
+
303
+ # generate master plan
304
+ if not master_plan:
305
+ if re.search(prompt_pattern, user_request):
306
+ prompt_name = re.search(prompt_pattern, user_request).group(1)
307
+ user_request = user_request[len(prompt_name):]
308
+ # Call the MCP prompt
309
+ prompt_schema = prompts_schema[prompt_name[1:]]
310
+ prompt_properties = prompt_schema["parameters"]["properties"]
311
+ if len(prompt_properties) == 1 and "request" in prompt_properties: # AgentMake MCP Servers or alike
312
+ result = await client.get_prompt(prompt_name[1:], {"request": user_request})
313
+ else:
314
+ structured_output = getDictionaryOutput(messages=messages, schema=prompt_schema)
315
+ result = await client.get_prompt(prompt_name[1:], structured_output)
316
+ #print(result, "\n\n")
317
+ master_plan = result.messages[0].content.text
318
+ # display info# display info
319
+ console.print(Markdown(f"# User Request\n\n{user_request}\n\n# Master plan\n\n{master_plan}"))
320
+ console.print(Markdown(f"# User Request\n\n{user_request}\n\n# Master plan\n\n{master_plan}"))
248
321
  else:
249
- structured_output = getDictionaryOutput(messages=messages, schema=prompt_schema)
250
- result = await client.get_prompt(prompt_name[1:], structured_output)
251
- #print(result, "\n\n")
252
- master_plan = result.messages[0].content.text
253
- # display info
254
- console.print(Markdown(f"# User Request\n\n{user_request}\n\n# Master plan\n\n{master_plan}"))
255
- else:
256
- # display info
257
- console.print(Markdown(f"# User Request\n\n{user_request}"), "\n")
258
- # Generate master plan
259
- master_plan = ""
260
- async def generate_master_plan():
261
- nonlocal master_plan
262
- # Create initial prompt to create master plan
263
- initial_prompt = f"""Provide me with the `Preliminary Action Plan` and the `Measurable Outcome` for resolving `My Request`.
322
+ # display info
323
+ console.print(Markdown(f"# User Request\n\n{user_request}"), "\n")
324
+ # Generate master plan
325
+ master_plan = ""
326
+ async def generate_master_plan():
327
+ nonlocal master_plan
328
+ # Create initial prompt to create master plan
329
+ initial_prompt = f"""Provide me with the `Preliminary Action Plan` and the `Measurable Outcome` for resolving `My Request`.
264
330
 
265
331
  # Available Tools
266
332
 
@@ -271,13 +337,14 @@ Available tools are: {available_tools}.
271
337
  # My Request
272
338
 
273
339
  {user_request}"""
274
- console.print(Markdown("# Master plan"), "\n")
275
- print()
276
- master_plan = agentmake(messages+[{"role": "user", "content": initial_prompt}], system="create_action_plan", **AGENTMAKE_CONFIG)[-1].get("content", "").strip()
277
- await thinking(generate_master_plan)
278
- # display info
279
- console.print(Markdown(master_plan), "\n\n")
280
-
340
+ console.print(Markdown("# Master plan"), "\n")
341
+ print()
342
+ master_plan = agentmake(messages+[{"role": "user", "content": initial_prompt}], system="create_action_plan", **AGENTMAKE_CONFIG)[-1].get("content", "").strip()
343
+ await thinking(generate_master_plan)
344
+ # display info
345
+ console.print(Markdown(master_plan), "\n\n")
346
+
347
+ # Step suggestion system message
281
348
  system_suggestion = get_system_suggestion(master_plan)
282
349
 
283
350
  # Tool selection systemm message
@@ -292,14 +359,6 @@ Available tools are: {available_tools}.
292
359
  await thinking(get_first_suggestion)
293
360
  console.print(Markdown(next_suggestion), "\n\n")
294
361
 
295
- if not messages:
296
- messages = [
297
- {"role": "system", "content": "You are BibleMate, an autonomous AI agent."},
298
- {"role": "user", "content": user_request},
299
- ]
300
- else:
301
- messages.append({"role": "user", "content": user_request})
302
-
303
362
  step = 1
304
363
  while not ("DONE" in next_suggestion or re.sub("^[^A-Za-z]*?([A-Za-z]+?)[^A-Za-z]*?$", r"\1", next_suggestion).upper() == "DONE"):
305
364
 
@@ -343,28 +402,7 @@ Available tools are: {available_tools}.
343
402
  messages.append({"role": "assistant", "content": "Please provide me with an initial instruction to begin."})
344
403
  messages.append({"role": "user", "content": next_step})
345
404
 
346
- async def process_step():
347
- nonlocal messages, next_tool, next_step
348
- if next_tool == "get_direct_text_response":
349
- messages = agentmake(messages, system="auto", **AGENTMAKE_CONFIG)
350
- else:
351
- try:
352
- tool_schema = tools_schema[next_tool]
353
- tool_properties = tool_schema["parameters"]["properties"]
354
- if len(tool_properties) == 1 and "request" in tool_properties: # AgentMake MCP Servers or alike
355
- tool_result = await client.call_tool(next_tool, {"request": next_step})
356
- else:
357
- structured_output = getDictionaryOutput(messages=messages, schema=tool_schema)
358
- tool_result = await client.call_tool(next_tool, structured_output)
359
- tool_result = tool_result.content[0].text
360
- messages[-1]["content"] += f"\n\n[Using tool `{next_tool}`]"
361
- messages.append({"role": "assistant", "content": tool_result if tool_result.strip() else "Done!"})
362
- except Exception as e:
363
- if DEVELOPER_MODE:
364
- console.print(f"Error: {e}\nFallback to direct response...\n\n")
365
- messages = agentmake(messages, system="auto", **AGENTMAKE_CONFIG)
366
- await process_step_async(step)
367
-
405
+ await process_tool(next_tool, next_step, step_number=step)
368
406
  console.print(Markdown(f"\n## Output [{step}]\n\n{messages[-1]["content"]}"))
369
407
 
370
408
  # iteration count
@@ -382,6 +420,9 @@ Available tools are: {available_tools}.
382
420
  await thinking(get_next_suggestion)
383
421
  #print()
384
422
  console.print(Markdown(next_suggestion), "\n")
423
+
424
+ if messages[-1].get("role") == "user":
425
+ messages.append({"role": "assistant", "content": next_suggestion})
385
426
 
386
427
  # Backup
387
428
  backup()
@@ -1,4 +1,4 @@
1
- agentmake>=1.0.67
1
+ agentmake>=1.0.70
2
2
  agentmakemcp>=0.0.8
3
3
  fastmcp[cli]
4
4
  rich
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: biblemate
3
- Version: 0.0.18
3
+ Version: 0.0.21
4
4
  Summary: BibleMate AI - Automate Your Bible Study
5
5
  Home-page: https://toolmate.ai
6
6
  Author: Eliran Wong
@@ -23,7 +23,7 @@ Classifier: Programming Language :: Python :: 3.10
23
23
  Classifier: Programming Language :: Python :: 3.11
24
24
  Classifier: Programming Language :: Python :: 3.12
25
25
  Requires-Python: >=3.8, <3.13
26
- Requires-Dist: agentmake >=1.0.67
26
+ Requires-Dist: agentmake >=1.0.70
27
27
  Requires-Dist: agentmakemcp >=0.0.8
28
28
  Requires-Dist: alive-progress
29
29
  Requires-Dist: fastmcp[cli]
@@ -94,6 +94,10 @@ How to swap?
94
94
  * Enter `.chat` in BibleMate AI prompt to enable chat mode and disable agent mode.
95
95
  * Enter `.agent` in BibleMate AI prompt to enable agent mode and disable chat mode.
96
96
 
97
+ ## Specify Toos(s) for a Particular Task
98
+
99
+
100
+
97
101
  ## Action Menu
98
102
 
99
103
  *(Coming soon)*
@@ -0,0 +1,15 @@
1
+ biblemate/README.md,sha256=f1SRN1aIFbtSQW4RFZxeUWGBcBma_JgCw4dRFEou05k,4148
2
+ biblemate/__init__.py,sha256=aFO4_EperOrwwDBdrkTKfDMt2Fh18Y0A2G_nUC_cmmM,78
3
+ biblemate/bible_study_mcp.py,sha256=lW4pFlfO-98w1yazOIgr2KyWccPFmrNmIReQUEo3--g,16575
4
+ biblemate/config.py,sha256=ktpLv_5qdbf3FErxUIdCvVc9MO6kQH4Zt_omoJ7msIs,15
5
+ biblemate/main.py,sha256=SWQ2edxSDGcOGvt3s6urUPAunzwqLKd1p1fPkLk-76c,22214
6
+ biblemate/package_name.txt,sha256=WkkuEEkgw7EKpXV8GshpzhZlwRor1wpotTS7vP24b_g,9
7
+ biblemate/requirements.txt,sha256=MliJX2PmogiVmgqHk4W0TMqRp2FLYXcGIkf8PS2RV94,70
8
+ biblemate/core/systems.py,sha256=nG_NgcLSRhdaHuxuCPN5ZfJUhP88kdfwhRCvRk4RLjI,1874
9
+ biblemate/ui/info.py,sha256=QRCno0CYUHVoOtVkZIxVamZONmtI7KRmOT2YoUagY5s,811
10
+ biblemate/ui/prompts.py,sha256=mxdC5BU7NMok9MOm1E39MHSrxB9gSRqGY7HsOc--rRg,3484
11
+ biblemate-0.0.21.dist-info/METADATA,sha256=RoKKy2efUl2rucC6AYdw4J-JIy2Gw5e1pjen6wCfYVE,5643
12
+ biblemate-0.0.21.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
13
+ biblemate-0.0.21.dist-info/entry_points.txt,sha256=tbEfTFr6LhPR1E_zP3CsPwJsmG-G4MCnJ3FcQEMiqo0,50
14
+ biblemate-0.0.21.dist-info/top_level.txt,sha256=pq9uX0tAS0bizZcZ5GW5zIoDLQBa-b5QDlDGsdHNgiU,10
15
+ biblemate-0.0.21.dist-info/RECORD,,
@@ -1,15 +0,0 @@
1
- biblemate/README.md,sha256=JKyibK8Hdu5yAZrUkIH164W7_0SOKLmChHAYzDQQtFU,4104
2
- biblemate/__init__.py,sha256=aFO4_EperOrwwDBdrkTKfDMt2Fh18Y0A2G_nUC_cmmM,78
3
- biblemate/bible_study_mcp.py,sha256=LwdxlyaXRZ2dkieW6gzH-3emKMAej9V72N3b6HgJXlQ,15865
4
- biblemate/config.py,sha256=ktpLv_5qdbf3FErxUIdCvVc9MO6kQH4Zt_omoJ7msIs,15
5
- biblemate/main.py,sha256=Y8vBYRWhAQS5kznDaVFBPnCwLdrAG6DhHtPGfG_5TeA,19568
6
- biblemate/package_name.txt,sha256=WkkuEEkgw7EKpXV8GshpzhZlwRor1wpotTS7vP24b_g,9
7
- biblemate/requirements.txt,sha256=t7SZC3PO49bYU6W9qiy6GI74H1Ulu3OIQVpPu88yjz8,70
8
- biblemate/core/systems.py,sha256=nG_NgcLSRhdaHuxuCPN5ZfJUhP88kdfwhRCvRk4RLjI,1874
9
- biblemate/ui/info.py,sha256=QRCno0CYUHVoOtVkZIxVamZONmtI7KRmOT2YoUagY5s,811
10
- biblemate/ui/prompts.py,sha256=mxdC5BU7NMok9MOm1E39MHSrxB9gSRqGY7HsOc--rRg,3484
11
- biblemate-0.0.18.dist-info/METADATA,sha256=sTFdfREqCsR2FfF3IPdn0PkdDyd7o-BUq_tOo3W1Bz0,5599
12
- biblemate-0.0.18.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
13
- biblemate-0.0.18.dist-info/entry_points.txt,sha256=tbEfTFr6LhPR1E_zP3CsPwJsmG-G4MCnJ3FcQEMiqo0,50
14
- biblemate-0.0.18.dist-info/top_level.txt,sha256=pq9uX0tAS0bizZcZ5GW5zIoDLQBa-b5QDlDGsdHNgiU,10
15
- biblemate-0.0.18.dist-info/RECORD,,