biblemate 0.0.22__tar.gz → 0.0.24__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: biblemate
3
- Version: 0.0.22
3
+ Version: 0.0.24
4
4
  Summary: BibleMate AI - Automate Your Bible Study
5
5
  Home-page: https://toolmate.ai
6
6
  Author: Eliran Wong
@@ -1,5 +1,6 @@
1
1
  import logging
2
2
  from fastmcp import FastMCP
3
+ from fastmcp.prompts.prompt import PromptMessage, TextContent
3
4
  from agentmake import agentmake
4
5
  from biblemate import AGENTMAKE_CONFIG
5
6
 
@@ -347,4 +348,22 @@ def write_bible_sermon(request:str) -> str:
347
348
  messages = agentmake(request, **{'instruction': 'bible/sermon', 'system': 'auto'}, **AGENTMAKE_CONFIG)
348
349
  return getResponse(messages)
349
350
 
351
+ @mcp.prompt
352
+ def simple_bible_study(request:str) -> PromptMessage:
353
+ """Perform a simple bible study task; bible reference(s) must be given"""
354
+ global PromptMessage, TextContent
355
+ prompt_text = f"""You are a bible study agent. You check the user request, under the `User Request` section, and resolve it with the following steps in order:
356
+ 1. Call tool 'retrieve_english_bible_verses' for Bible text,
357
+ 2. Call tool 'retrieve_bible_cross_references' for Bible cross-references,
358
+ 3. Call tool 'study_old_testament_themes' for study old testament themes or 'study_new_testament_themes' for study old testament themes, and
359
+ 4. Call tool 'write_bible_theology' to explain its theology.
360
+
361
+ # User Request
362
+
363
+ ---
364
+ {request}
365
+ ---
366
+ """
367
+ return PromptMessage(role="user", content=TextContent(type="text", text=prompt_text))
368
+
350
369
  mcp.run(show_banner=False)
@@ -0,0 +1,3 @@
1
+ agent_mode=True
2
+ prompt_engineering=True
3
+ max_steps=30
@@ -3,7 +3,7 @@ from biblemate.ui.prompts import getInput
3
3
  from biblemate.ui.info import get_banner
4
4
  from biblemate import config, AGENTMAKE_CONFIG
5
5
  from pathlib import Path
6
- import asyncio, re, os, subprocess
6
+ import asyncio, re, os, subprocess, click
7
7
  from alive_progress import alive_bar
8
8
  from fastmcp import Client
9
9
  from agentmake import agentmake, getOpenCommand, getDictionaryOutput, edit_configurations, writeTextFile, getCurrentDateTime, AGENTMAKE_USER_DIR, USER_OS, DEVELOPER_MODE
@@ -14,88 +14,119 @@ from rich.terminal_theme import MONOKAI
14
14
  if not USER_OS == "Windows":
15
15
  import readline # for better input experience
16
16
 
17
- # Client to interact with the built-in Bible Study MCP server
18
- client = Client(os.path.join(os.path.dirname(os.path.realpath(__file__)), "bible_study_mcp.py"))
19
-
20
- # TODO: place in config.py
21
- MAX_STEPS = 50
17
+ # The client that interacts with the Bible Study MCP server
18
+ builtin_mcp_server = os.path.join(os.path.dirname(os.path.realpath(__file__)), "bible_study_mcp.py")
19
+ user_mcp_server = os.path.join(AGENTMAKE_USER_DIR, "biblemate", "bible_study_mcp.py") # The user path has the same basename as the built-in one; users may copy the built-in server settings to this location for customization.
20
+ client = Client(user_mcp_server if os.path.isfile(user_mcp_server) else builtin_mcp_server)
22
21
 
23
22
  def main():
24
23
  asyncio.run(main_async())
25
24
 
25
+ async def initialize_app(client):
26
+ """Initializes the application by fetching tools and prompts from the MCP server."""
27
+ await client.ping()
28
+
29
+ tools_raw = await client.list_tools()
30
+ tools = {t.name: t.description for t in tools_raw}
31
+ tools = dict(sorted(tools.items()))
32
+ tools_schema = {}
33
+ for t in tools_raw:
34
+ schema = {
35
+ "name": t.name,
36
+ "description": t.description,
37
+ "parameters": {
38
+ "type": "object",
39
+ "properties": t.inputSchema["properties"],
40
+ "required": t.inputSchema["required"],
41
+ },
42
+ }
43
+ tools_schema[t.name] = schema
44
+
45
+ available_tools = list(tools.keys())
46
+ if "get_direct_text_response" not in available_tools:
47
+ available_tools.insert(0, "get_direct_text_response")
48
+
49
+ tool_descriptions = ""
50
+ if "get_direct_text_response" not in tools:
51
+ tool_descriptions = """# TOOL DESCRIPTION: `get_direct_text_response`
52
+ Get a static text-based response directly from a text-based AI model without using any other tools. This is useful when you want to provide a simple and direct answer to a question or request, without the need for online latest updates or task execution.\n\n\n"""
53
+ for tool_name, tool_description in tools.items():
54
+ tool_descriptions += f"""# TOOL DESCRIPTION: `{tool_name}`
55
+ {tool_description}\n\n\n"""
56
+
57
+ prompts_raw = await client.list_prompts()
58
+ prompts = {p.name: p.description for p in prompts_raw}
59
+ prompts = dict(sorted(prompts.items()))
60
+
61
+ prompts_schema = {}
62
+ for p in prompts_raw:
63
+ arg_properties = {}
64
+ arg_required = []
65
+ for a in p.arguments:
66
+ arg_properties[a.name] = {
67
+ "type": "string",
68
+ "description": str(a.description) if a.description else "no description available",
69
+ }
70
+ if a.required:
71
+ arg_required.append(a.name)
72
+ schema = {
73
+ "name": p.name,
74
+ "description": p.description,
75
+ "parameters": {
76
+ "type": "object",
77
+ "properties": arg_properties,
78
+ "required": arg_required,
79
+ },
80
+ }
81
+ prompts_schema[p.name] = schema
82
+
83
+ return tools, tools_schema, available_tools, tool_descriptions, prompts, prompts_schema
84
+
85
+ def backup_conversation(console, messages, master_plan):
86
+ """Backs up the current conversation to the user's directory."""
87
+ timestamp = getCurrentDateTime()
88
+ storagePath = os.path.join(AGENTMAKE_USER_DIR, "biblemate", timestamp)
89
+ Path(storagePath).mkdir(parents=True, exist_ok=True)
90
+ # Save full conversation
91
+ conversation_file = os.path.join(storagePath, "conversation.py")
92
+ writeTextFile(conversation_file, str(messages))
93
+ # Save master plan
94
+ writeTextFile(os.path.join(storagePath, "master_plan.md"), master_plan)
95
+ # Save html
96
+ html_file = os.path.join(storagePath, "conversation.html")
97
+ console.save_html(html_file, inline_styles=True, theme=MONOKAI)
98
+ # Save markdown
99
+ console.save_text(os.path.join(storagePath, "conversation.md"))
100
+ # Inform users of the backup location
101
+ print(f"Conversation backup saved to {storagePath}")
102
+ print(f"Report saved to {html_file}\n")
103
+
104
+ def write_user_config():
105
+ """Writes the current configuration to the user's config file."""
106
+ user_config_dir = os.path.join(AGENTMAKE_USER_DIR, "biblemate")
107
+ Path(user_config_dir).mkdir(parents=True, exist_ok=True)
108
+ config_file = os.path.join(user_config_dir, "config.py")
109
+ configurations = f"""agent_mode={config.agent_mode}
110
+ prompt_engineering={config.prompt_engineering}
111
+ max_steps={config.max_steps}"""
112
+ writeTextFile(config_file, configurations)
113
+
26
114
  async def main_async():
27
115
 
116
+ APP_START = True
117
+ DEFAULT_SYSTEM = "You are BibleMate AI, an autonomous agent designed to assist users with their Bible study."
28
118
  console = Console(record=True)
29
119
  console.clear()
30
120
  console.print(get_banner())
31
121
 
32
122
  async with client:
33
- await client.ping()
34
-
35
- #resources = await client.list_resources()
36
- #print("# Resources\n\n", resources, "\n\n")
37
-
38
- # List available tools, resources, and prompts
39
- tools_raw = await client.list_tools()
40
- #print(tools_raw)
41
- tools = {t.name: t.description for t in tools_raw}
42
- tools = dict(sorted(tools.items()))
43
- tools_schema = {}
44
- for t in tools_raw:
45
- schema = {
46
- "name": t.name,
47
- "description": t.description,
48
- "parameters": {
49
- "type": "object",
50
- "properties": t.inputSchema["properties"],
51
- "required": t.inputSchema["required"],
52
- },
53
- }
54
- tools_schema[t.name] = schema
55
-
56
- available_tools = list(tools.keys())
57
- if not "get_direct_text_response" in available_tools:
58
- available_tools.insert(0, "get_direct_text_response")
123
+ tools, tools_schema, available_tools, tool_descriptions, prompts, prompts_schema = await initialize_app(client)
124
+
59
125
  available_tools_pattern = "|".join(available_tools)
60
-
61
- # add tool description for get_direct_text_response if not exists
62
- if not "get_direct_text_response" in tools:
63
- tool_descriptions = f"""# TOOL DESCRIPTION: `get_direct_text_response`
64
- Get a static text-based response directly from a text-based AI model without using any other tools. This is useful when you want to provide a simple and direct answer to a question or request, without the need for online latest updates or task execution.\n\n\n"""
65
- # add tool descriptions
66
- for tool_name, tool_description in tools.items():
67
- tool_descriptions += f"""# TOOL DESCRIPTION: `{tool_name}`
68
- {tool_description}\n\n\n"""
69
-
70
- prompts_raw = await client.list_prompts()
71
- #print("# Prompts\n\n", prompts_raw, "\n\n")
72
- prompts = {p.name: p.description for p in prompts_raw}
73
126
  prompt_list = [f"/{p}" for p in prompts.keys()]
74
127
  prompt_pattern = "|".join(prompt_list)
75
128
  prompt_pattern = f"""^({prompt_pattern}) """
76
129
 
77
- prompts_schema = {}
78
- for p in prompts_raw:
79
- arg_properties = {}
80
- arg_required = []
81
- for a in p.arguments:
82
- arg_properties[a.name] = {
83
- "type": "string",
84
- "description": str(a.description) if a.description else "no description available",
85
- }
86
- if a.required:
87
- arg_required.append(a.name)
88
- schema = {
89
- "name": p.name,
90
- "description": p.description,
91
- "parameters": {
92
- "type": "object",
93
- "properties": arg_properties,
94
- "required": arg_required,
95
- },
96
- }
97
- prompts_schema[p.name] = schema
98
-
99
130
  user_request = ""
100
131
  master_plan = ""
101
132
  messages = []
@@ -139,33 +170,24 @@ Get a static text-based response directly from a text-based AI model without usi
139
170
  # Await the custom async progress bar that awaits the task.
140
171
  await async_alive_bar(task)
141
172
 
142
- # backup
143
- def backup():
144
- nonlocal console, messages, master_plan
145
- timestamp = getCurrentDateTime()
146
- storagePath = os.path.join(AGENTMAKE_USER_DIR, "biblemate", timestamp)
147
- Path(storagePath).mkdir(parents=True, exist_ok=True)
148
- # Save full conversation
149
- conversation_file = os.path.join(storagePath, "conversation.py")
150
- writeTextFile(conversation_file, str(messages))
151
- # Save master plan
152
- writeTextFile(os.path.join(storagePath, "master_plan.md"), master_plan)
153
- # Save html
154
- html_file = os.path.join(storagePath, "conversation.html")
155
- console.save_html(html_file, inline_styles=True, theme=MONOKAI)
156
- # Save markdown
157
- console.save_text(os.path.join(storagePath, "conversation.md"))
158
- # Inform users of the backup location
159
- print(f"Conversation backup saved to {storagePath}")
160
- print(f"Report saved to {html_file}\n")
161
- def write_config():
162
- # TODO: support more configs
163
- config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.py")
164
- writeTextFile(config_file, f"agent_mode={config.agent_mode}")
165
-
166
173
  if messages:
167
174
  console.rule()
168
-
175
+ elif APP_START:
176
+ print()
177
+ APP_START = False
178
+ while True:
179
+ try:
180
+ agentmake("Hello!", system=DEFAULT_SYSTEM)
181
+ break
182
+ except Exception as e:
183
+ print("Connection failed! Please ensure that you have a stable internet connection and that my AI backend and model are properly configured.")
184
+ print("Viist https://github.com/eliranwong/agentmake#supported-backends for help about the backend configuration.\n")
185
+ if click.confirm("Do you want to configure my AI backend and model now?", default=True):
186
+ edit_configurations()
187
+ console.rule()
188
+ console.print("Restart to make the changes in the backend effective!", justify="center")
189
+ console.rule()
190
+ exit()
169
191
  # Original user request
170
192
  # note: `python3 -m rich.emoji` for checking emoji
171
193
  console.print("Enter your request :smiley: :" if not messages else "Enter a follow-up request :flexed_biceps: :")
@@ -176,14 +198,30 @@ Get a static text-based response directly from a text-based AI model without usi
176
198
  ".chat": "enable chat mode",
177
199
  ".agent": "enable agent mode",
178
200
  ".tools": "list available tools",
179
- #".resources": "list available resources",
180
- #".prompts": "list available prompts",
201
+ ".plans": "list available plans",
202
+ #".resources": "list available resources", # TODO explore relevant usage for this project
203
+ ".promptengineering": "toggle auto prompt engineering",
204
+ ".steps": "configure the maximum number of steps allowed",
181
205
  ".backup": "backup conversation",
182
206
  ".open": "open a file or directory",
207
+ ".help": "help page",
183
208
  }
184
209
  input_suggestions = list(action_list.keys())+[f"@{t} " for t in available_tools]+prompt_list
185
210
  user_request = await getInput("> ", input_suggestions)
186
211
  while not user_request.strip():
212
+ # Generate ideas for `prompts to try`
213
+ ideas = ""
214
+ async def generate_ideas():
215
+ nonlocal ideas
216
+ if not messages:
217
+ ideas = agentmake("Generate three `prompts to try` for bible study. Each one should be one sentence long.", **AGENTMAKE_CONFIG)[-1].get("content", "").strip()
218
+ else:
219
+ ideas = agentmake(messages, follow_up_prompt="Generate three follow-up questions according to the on-going conversation.", **AGENTMAKE_CONFIG)[-1].get("content", "").strip()
220
+ await thinking(generate_ideas, "Generating ideas ...")
221
+ console.rule()
222
+ console.print(Markdown(f"## Ideas\n\n{ideas}\n\n"))
223
+ console.rule()
224
+ # Get input agin
187
225
  user_request = await getInput("> ", input_suggestions)
188
226
 
189
227
  # system command
@@ -194,34 +232,58 @@ Get a static text-based response directly from a text-based AI model without usi
194
232
  subprocess.Popen(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
195
233
  continue
196
234
 
197
- # TODO: ui - radio list menu
235
+ # predefined operations with `.` commands
198
236
  if user_request in action_list:
199
237
  if user_request == ".backup":
200
- backup()
238
+ backup_conversation(console, messages, master_plan)
239
+ elif user_request == ".help":
240
+ console.rule()
241
+ console.print(Markdown("Viist https://github.com/eliranwong/biblemate for help."))
242
+ console.rule()
201
243
  elif user_request == ".tools":
202
244
  console.rule()
203
245
  tools_descriptions = [f"- `{name}`: {description}" for name, description in tools.items()]
204
246
  console.print(Markdown("## Available Tools\n\n"+"\n".join(tools_descriptions)))
205
247
  console.rule()
248
+ elif user_request == ".plans":
249
+ console.rule()
250
+ prompts_descriptions = [f"- `{name}`: {description}" for name, description in prompts.items()]
251
+ console.print(Markdown("## Available Plans\n\n"+"\n".join(prompts_descriptions)))
252
+ console.rule()
206
253
  elif user_request == ".backend":
207
254
  edit_configurations()
208
255
  console.rule()
209
256
  console.print("Restart to make the changes in the backend effective!", justify="center")
210
257
  console.rule()
258
+ elif user_request == ".steps":
259
+ console.rule()
260
+ console.print("Enter the maximum number of steps allowed below:")
261
+ max_steps = await getInput("> ", number_validator=True)
262
+ if max_steps:
263
+ config.max_steps = int(max_steps)
264
+ write_user_config()
265
+ console.print("Maximum number of steps set to", config.max_steps, "steps.", justify="center")
266
+ console.rule()
267
+ elif user_request == ".promptengineering":
268
+ config.prompt_engineering = not config.prompt_engineering
269
+ write_user_config()
270
+ console.rule()
271
+ console.print("Prompt Engineering Enabled" if config.prompt_engineering else "Prompt Engineering Disabled", justify="center")
272
+ console.rule()
211
273
  elif user_request == ".chat":
212
274
  config.agent_mode = False
213
- write_config()
275
+ write_user_config()
214
276
  console.rule()
215
277
  console.print("Chat Mode Enabled", justify="center")
216
278
  console.rule()
217
279
  elif user_request == ".agent":
218
280
  config.agent_mode = True
219
- write_config()
281
+ write_user_config()
220
282
  console.rule()
221
283
  console.print("Agent Mode Enabled", justify="center")
222
284
  console.rule()
223
285
  elif user_request in (".new", ".quit"):
224
- backup() # backup
286
+ backup_conversation(console, messages, master_plan) # backup
225
287
  # reset
226
288
  if user_request == ".new":
227
289
  user_request = ""
@@ -231,8 +293,12 @@ Get a static text-based response directly from a text-based AI model without usi
231
293
  continue
232
294
 
233
295
  # Check if a single tool is specified
296
+ specified_prompt = ""
234
297
  specified_tool = ""
235
- if re.search(f"""^@({available_tools_pattern}) """, user_request):
298
+ if re.search(prompt_pattern, user_request):
299
+ specified_prompt = re.search(prompt_pattern, user_request).group(1)
300
+ user_request = user_request[len(specified_prompt):]
301
+ elif re.search(f"""^@({available_tools_pattern}) """, user_request):
236
302
  specified_tool = re.search(f"""^@({available_tools_pattern}) """, user_request).group(1)
237
303
  user_request = user_request[len(specified_tool)+2:]
238
304
  elif user_request.startswith("@@"):
@@ -249,7 +315,7 @@ Get a static text-based response directly from a text-based AI model without usi
249
315
  console.print(Markdown(f"# User Request\n\n{user_request}\n\n# Master plan\n\n{master_plan}"))
250
316
 
251
317
  # Prompt Engineering
252
- if not specified_tool == "@@":
318
+ if not specified_tool == "@@" and config.prompt_engineering:
253
319
  async def run_prompt_engineering():
254
320
  nonlocal user_request
255
321
  user_request = agentmake(messages if messages else user_request, follow_up_prompt=user_request if messages else None, tool="improve_prompt", **AGENTMAKE_CONFIG)[-1].get("content", "").strip()[20:-4]
@@ -257,7 +323,7 @@ Get a static text-based response directly from a text-based AI model without usi
257
323
 
258
324
  if not messages:
259
325
  messages = [
260
- {"role": "system", "content": "You are BibleMate, an autonomous AI agent."},
326
+ {"role": "system", "content": DEFAULT_SYSTEM},
261
327
  {"role": "user", "content": user_request},
262
328
  ]
263
329
  else:
@@ -303,22 +369,19 @@ Get a static text-based response directly from a text-based AI model without usi
303
369
 
304
370
  # generate master plan
305
371
  if not master_plan:
306
- if re.search(prompt_pattern, user_request):
307
- prompt_name = re.search(prompt_pattern, user_request).group(1)
308
- user_request = user_request[len(prompt_name):]
372
+ if specified_prompt:
309
373
  # Call the MCP prompt
310
- prompt_schema = prompts_schema[prompt_name[1:]]
374
+ prompt_schema = prompts_schema[specified_prompt[1:]]
311
375
  prompt_properties = prompt_schema["parameters"]["properties"]
312
376
  if len(prompt_properties) == 1 and "request" in prompt_properties: # AgentMake MCP Servers or alike
313
- result = await client.get_prompt(prompt_name[1:], {"request": user_request})
377
+ result = await client.get_prompt(specified_prompt[1:], {"request": user_request})
314
378
  else:
315
379
  structured_output = getDictionaryOutput(messages=messages, schema=prompt_schema)
316
- result = await client.get_prompt(prompt_name[1:], structured_output)
380
+ result = await client.get_prompt(specified_prompt[1:], structured_output)
317
381
  #print(result, "\n\n")
318
382
  master_plan = result.messages[0].content.text
319
383
  # display info# display info
320
384
  console.print(Markdown(f"# User Request\n\n{user_request}\n\n# Master plan\n\n{master_plan}"))
321
- console.print(Markdown(f"# User Request\n\n{user_request}\n\n# Master plan\n\n{master_plan}"))
322
385
  else:
323
386
  # display info
324
387
  console.print(Markdown(f"# User Request\n\n{user_request}"), "\n")
@@ -408,9 +471,10 @@ Available tools are: {available_tools}.
408
471
 
409
472
  # iteration count
410
473
  step += 1
411
- if step > MAX_STEPS:
412
- print("Stopped! Too many steps! `MAX_STEPS` is currently set to ", MAX_STEPS, "!")
413
- print("You can increase it in the settings, but be careful not to create an infinite loop!")
474
+ if step > config.max_steps:
475
+ console.rule()
476
+ console.print("Stopped! Too many steps! The maximum steps is currently set to", config.max_steps, "steps. Enter `.steps` to configure.")
477
+ console.rule()
414
478
  break
415
479
 
416
480
  # Get the next suggestion
@@ -426,7 +490,7 @@ Available tools are: {available_tools}.
426
490
  messages.append({"role": "assistant", "content": next_suggestion})
427
491
 
428
492
  # Backup
429
- backup()
493
+ backup_conversation(console, messages, master_plan)
430
494
 
431
495
  if __name__ == "__main__":
432
496
  asyncio.run(main())
@@ -2,4 +2,5 @@ agentmake>=1.0.70
2
2
  agentmakemcp>=0.0.8
3
3
  fastmcp[cli]
4
4
  rich
5
- alive-progress
5
+ alive-progress
6
+ click
@@ -1,9 +1,26 @@
1
1
  from agentmake.main import AGENTMAKE_USER_DIR
2
2
  from agentmake.utils.system import getCliOutput
3
- import os, shutil
3
+ from prompt_toolkit.validation import Validator, ValidationError
4
+ from biblemate import config
5
+ import os, shutil, re
4
6
 
5
7
 
6
- async def getInput(prompt:str="Instruction: ", input_suggestions:list=None):
8
+ class NumberValidator(Validator):
9
+ def validate(self, document):
10
+ text = document.text
11
+
12
+ if text and not re.search("^[0-9]+?$", text):
13
+ i = 0
14
+
15
+ # Get index of first non numeric character.
16
+ # We want to move the cursor here.
17
+ for i, c in enumerate(text):
18
+ if not c.isdigit():
19
+ break
20
+
21
+ raise ValidationError(message='This entry accepts numbers only!', cursor_position=i)
22
+
23
+ async def getInput(prompt:str="Instruction: ", input_suggestions:list=None, number_validator:bool=False):
7
24
  """
8
25
  Prompt for user input
9
26
  """
@@ -92,6 +109,8 @@ async def getInput(prompt:str="Instruction: ", input_suggestions:list=None):
92
109
  bottom_toolbar="[ENTER] submit [TAB] linebreak [Ctrl+N] new [Ctrl+Q] quit",
93
110
  completer=completer,
94
111
  key_bindings=bindings,
112
+ validator=NumberValidator() if number_validator else None,
113
+ default=str(config.max_steps) if number_validator else "",
95
114
  )
96
115
  print()
97
116
  return instruction.strip() if instruction else ""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: biblemate
3
- Version: 0.0.22
3
+ Version: 0.0.24
4
4
  Summary: BibleMate AI - Automate Your Bible Study
5
5
  Home-page: https://toolmate.ai
6
6
  Author: Eliran Wong
@@ -1,6 +1,7 @@
1
1
  agentmake>=1.0.70
2
2
  agentmakemcp>=0.0.8
3
3
  alive-progress
4
+ click
4
5
  fastmcp[cli]
5
6
  rich
6
7
 
@@ -27,7 +27,7 @@ with open(os.path.join(package, "requirements.txt"), "r") as fileObj:
27
27
  # https://packaging.python.org/en/latest/guides/distributing-packages-using-setuptools/
28
28
  setup(
29
29
  name=package,
30
- version="0.0.22",
30
+ version="0.0.24",
31
31
  python_requires=">=3.8, <3.13",
32
32
  description=f"BibleMate AI - Automate Your Bible Study",
33
33
  long_description=long_description,
@@ -1 +0,0 @@
1
- agent_mode=True
File without changes