bibleagent 1.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,65 @@
1
+ Metadata-Version: 2.4
2
+ Name: bibleagent
3
+ Version: 1.0.0
4
+ Summary: BibleMate AI - Automate Your Bible Study; A headless version of BibleMate AI Agent Mode
5
+ Home-page: https://biblemate.ai
6
+ Author: Eliran Wong
7
+ Author-email: support@marvel.bible
8
+ License: GNU General Public License (GPL)
9
+ Project-URL: Source, https://github.com/eliranwong/biblemateagent
10
+ Project-URL: Tracker, https://github.com/eliranwong/biblemateagent/issues
11
+ Project-URL: Documentation, https://github.com/eliranwong/biblemateagent/wiki
12
+ Project-URL: Funding, https://www.paypal.me/MarvelBible
13
+ Keywords: mcp agent biblemate ai anthropic azure chatgpt cohere deepseek genai github googleai groq llamacpp mistral ollama openai vertexai xai
14
+ Classifier: Development Status :: 5 - Production/Stable
15
+ Classifier: Intended Audience :: End Users/Desktop
16
+ Classifier: Topic :: Utilities
17
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
18
+ Classifier: Topic :: Software Development :: Build Tools
19
+ Classifier: License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)
20
+ Classifier: Programming Language :: Python :: 3.10
21
+ Classifier: Programming Language :: Python :: 3.11
22
+ Classifier: Programming Language :: Python :: 3.12
23
+ Requires-Python: >=3.10, <3.13
24
+ Requires-Dist: biblemateweb>=0.3.56
25
+ Provides-Extra: genai
26
+ Requires-Dist: google-genai>=1.46.0; extra == "genai"
27
+ Dynamic: author
28
+ Dynamic: author-email
29
+ Dynamic: classifier
30
+ Dynamic: description
31
+ Dynamic: home-page
32
+ Dynamic: keywords
33
+ Dynamic: license
34
+ Dynamic: project-url
35
+ Dynamic: provides-extra
36
+ Dynamic: requires-dist
37
+ Dynamic: requires-python
38
+ Dynamic: summary
39
+
40
+ # biblemateagent
41
+ A headless version of BibleMate AI Agent Mode
42
+
43
+ ## Installation
44
+
45
+ > pip install biblemateagent
46
+
47
+ ## Set up data
48
+
49
+ > biblematedata
50
+
51
+ ## Run BibleMate Agent
52
+
53
+ > biblemateagent "Your Bible Study Request"
54
+
55
+ or
56
+
57
+ > bibleagent "Your Bible Study Request"
58
+
59
+ ## Help
60
+
61
+ > biblemateagent -h
62
+
63
+ or
64
+
65
+ > bibleagent -h
@@ -0,0 +1,26 @@
1
+ # biblemateagent
2
+ A headless version of BibleMate AI Agent Mode
3
+
4
+ ## Installation
5
+
6
+ > pip install biblemateagent
7
+
8
+ ## Set up data
9
+
10
+ > biblematedata
11
+
12
+ ## Run BibleMate Agent
13
+
14
+ > biblemateagent "Your Bible Study Request"
15
+
16
+ or
17
+
18
+ > bibleagent "Your Bible Study Request"
19
+
20
+ ## Help
21
+
22
+ > biblemateagent -h
23
+
24
+ or
25
+
26
+ > bibleagent -h
@@ -0,0 +1,26 @@
1
+ # biblemateagent
2
+ A headless version of BibleMate AI Agent Mode
3
+
4
+ ## Installation
5
+
6
+ > pip install biblemateagent
7
+
8
+ ## Set up data
9
+
10
+ > biblematedata
11
+
12
+ ## Run BibleMate Agent
13
+
14
+ > biblemateagent "Your Bible Study Request"
15
+
16
+ or
17
+
18
+ > bibleagent "Your Bible Study Request"
19
+
20
+ ## Help
21
+
22
+ > biblemateagent -h
23
+
24
+ or
25
+
26
+ > bibleagent -h
@@ -0,0 +1,292 @@
1
+ import asyncio
2
+ import re
3
+ import os
4
+ import shutil
5
+ import pypandoc
6
+ import traceback
7
+ from copy import deepcopy
8
+
9
+ from bibleagent.stream import stream_output
10
+ from biblemateweb import BIBLEMATEWEB_APP_DIR, get_translation, DEFAULT_MESSAGES, chapter2verses
11
+ from biblemateweb.mcp_tools.elements import TOOL_ELEMENTS
12
+ from biblemateweb.mcp_tools.tools import TOOLS
13
+ from biblemateweb.mcp_tools.tool_descriptions import TOOL_DESCRIPTIONS
14
+ from biblemateweb.api.api import get_api_content
15
+ from biblemate.core.systems import get_system_tool_instruction, get_system_master_plan, get_system_make_suggestion, get_system_progress, get_system_generate_title
16
+ from agentmake import agentmake, readTextFile, getCurrentDateTime, DEFAULT_AI_BACKEND
17
+
18
+ def do_export(content, filename, md_export=True, docx_export=False, output_directory=None):
19
+
20
+ # sanitize filename, but accept chinese characters
21
+ filename = re.sub(r"[^a-zA-Z0-9_\u4e00-\u9fff]+", "_", filename)
22
+
23
+ if not os.path.isdir(output_directory):
24
+ os.makedirs(output_directory)
25
+
26
+ if md_export:
27
+ md_filepath = os.path.join(output_directory, f"{filename}.md")
28
+ with open(md_filepath, "w", encoding="utf-8") as f:
29
+ f.write(content)
30
+ print(f"Exported to: {md_filepath}")
31
+
32
+ if docx_export:
33
+ if not shutil.which("pandoc"):
34
+ print("Pandoc is not installed. Skipping DOCX export.")
35
+ return
36
+ try:
37
+ docx_filepath = os.path.join(output_directory, f"{filename}.docx")
38
+ pypandoc.convert_text(content, 'docx', format='md', outputfile=docx_filepath)
39
+ print(f"Exported to: {docx_filepath}")
40
+ except ImportError:
41
+ print("pypandoc not installed. Skipping DOCX export.")
42
+ except Exception as e:
43
+ print(f"Error exporting to DOCX: {e}")
44
+
45
+ async def bible_agent(
46
+ request="",
47
+ language="eng",
48
+ improve_prompt=False,
49
+ md_export=False,
50
+ docx_export=False,
51
+ output_directory="",
52
+ developer=False,
53
+ cancel_event=None,
54
+ **kwargs
55
+ ):
56
+
57
+ if not output_directory:
58
+ output_directory = os.getcwd()
59
+
60
+ MESSAGES = None
61
+ MASTER_PLAN = None
62
+ PROGRESS_STATUS = None
63
+ ROUND = 1
64
+ MASTER_USER_REQUEST = request
65
+
66
+ SYSTEM_TOOL_SELECTION = readTextFile(os.path.join(BIBLEMATEWEB_APP_DIR, "mcp_tools", "system_tool_selection_lite.md"))
67
+
68
+ TOOL_INSTRUCTION_PROMPT = """Please transform the following suggestions into clear, precise, and actionable instructions."""
69
+ TOOL_INSTRUCTION_SUFFIX = """
70
+
71
+ # Remember
72
+
73
+ * Provide me with the instructions directly.
74
+ * Do not start your response, like, 'Here are the insturctions ...'
75
+ * Do not ask me if I want to execute the instruction."""
76
+
77
+ MASTER_PLAN_PROMPT_TEMPLATE = """Provide me with the `Preliminary Action Plan` and the `Measurable Outcome` for resolving `My Request`.
78
+
79
+ # Available Tools
80
+
81
+ Available tools are: {available_tools}.
82
+
83
+ {tool_descriptions}
84
+
85
+ # My Request
86
+
87
+ {user_request}"""
88
+
89
+ FINAL_INSTRUCTION = """# Instruction
90
+ Please provide a comprehensive response that resolves my original request, ensuring all previously completed milestones and data points are fully integrated.
91
+
92
+ # Original Request
93
+ """
94
+
95
+ if not MASTER_USER_REQUEST or not MASTER_USER_REQUEST.strip():
96
+ print("Please provide a request.")
97
+ return None
98
+
99
+ timestamp = getCurrentDateTime()
100
+ generated_title = ""
101
+ generated_title_output = agentmake(
102
+ MASTER_USER_REQUEST,
103
+ system=get_system_generate_title(),
104
+ backend=kwargs.get("backend", DEFAULT_AI_BACKEND),
105
+ model=kwargs.get("model", None),
106
+ api_key=kwargs.get("api_key", None),
107
+ api_endpoint=kwargs.get("api_endpoint", None),
108
+ max_tokens=kwargs.get("max_tokens", None),
109
+ context_window=kwargs.get("context_window", None),
110
+ temperature=kwargs.get("temperature", None),
111
+ print_on_terminal=False,
112
+ )
113
+ if generated_title_output:
114
+ generated_title = generated_title_output[-1].get("content", "").strip().replace("Title: ", "")
115
+ print(f"\n[TITLE] {generated_title}\n")
116
+ study_directory = f"{timestamp}_{generated_title}"
117
+ output_directory = os.path.join(output_directory, study_directory)
118
+ else:
119
+ study_directory = f"{timestamp}_bible_study"
120
+ output_directory = os.path.join(output_directory, study_directory)
121
+
122
+ print(f"\n[REQUEST] {MASTER_USER_REQUEST}\n")
123
+
124
+ MESSAGES = deepcopy(DEFAULT_MESSAGES)
125
+
126
+ if improve_prompt:
127
+ print("\n--- Improving Prompt ---\n")
128
+ user_request = await stream_output(
129
+ MESSAGES, MASTER_USER_REQUEST, cancel_event, system="improve_prompt_2", **kwargs
130
+ )
131
+ if user_request and user_request.strip() != "[NO_CONTENT]":
132
+ if "```" in user_request:
133
+ MASTER_USER_REQUEST = re.sub(r"^.*?(```improved_prompt|```)(.+?)```.*?$", r"\2", user_request, flags=re.DOTALL).strip()
134
+ else:
135
+ MASTER_USER_REQUEST = user_request.strip()
136
+ print(f"\n[IMPROVED REQUEST] {MASTER_USER_REQUEST}\n")
137
+
138
+ MESSAGES += [
139
+ {"role": "user", "content": MASTER_USER_REQUEST},
140
+ {"role": "assistant", "content": "Let's begin!"},
141
+ ]
142
+
143
+ # Generate master plan
144
+ print("\n--- Generating Study Plan ---\n")
145
+ master_plan_prompt = MASTER_PLAN_PROMPT_TEMPLATE.format(
146
+ available_tools=list(TOOLS.keys()),
147
+ tool_descriptions=TOOL_DESCRIPTIONS,
148
+ user_request=MASTER_USER_REQUEST
149
+ )
150
+
151
+ MASTER_PLAN = await stream_output(
152
+ MESSAGES, master_plan_prompt, cancel_event, system=get_system_master_plan(), **kwargs
153
+ )
154
+
155
+ if not MASTER_PLAN or MASTER_PLAN.strip() == "[NO_CONTENT]":
156
+ print("Failed to generate master plan.")
157
+ return None
158
+
159
+ if md_export or docx_export:
160
+ filename = f"00_request_and_master_plan"
161
+ content = f"# Title\n\n{generated_title}\n\n" if generated_title else ""
162
+ content += f"# Request\n\n{MASTER_USER_REQUEST}\n\n# Master Plan\n\n{MASTER_PLAN}"
163
+ do_export(content, filename, md_export, docx_export, output_directory)
164
+
165
+ PROGRESS_STATUS = "START"
166
+ print("\n--- Finished generating study plan. Beginning rounds. ---\n")
167
+
168
+ try:
169
+ while PROGRESS_STATUS is None or not ("STOP" in PROGRESS_STATUS or re.sub("^[^A-Za-z]*?([A-Za-z]+?)[^A-Za-z]*?$", r"\1", PROGRESS_STATUS).upper() == "STOP"):
170
+ if cancel_event and cancel_event.is_set():
171
+ break
172
+
173
+ print(f"\n### Round {ROUND} ###\n")
174
+
175
+ # suggestion
176
+ print("\n--- Suggestion ---\n")
177
+ system_make_suggestion = get_system_make_suggestion(master_plan=MASTER_PLAN)
178
+ follow_up_prompt = "Please provide me with the next step suggestion, based on the action plan."
179
+
180
+ suggestion = await stream_output(MESSAGES, follow_up_prompt, cancel_event, system=system_make_suggestion, **kwargs)
181
+
182
+ if not suggestion or suggestion.strip() == "[NO_CONTENT]":
183
+ print("\n[No suggestion generated. Stopping.]\n")
184
+ break
185
+
186
+ # tool selection
187
+ print("\n--- Tool Selection ---\n")
188
+ suggested_tools = await stream_output(DEFAULT_MESSAGES, suggestion, cancel_event, system=SYSTEM_TOOL_SELECTION, **kwargs)
189
+
190
+ if not suggested_tools or suggested_tools.strip() == "[NO_CONTENT]":
191
+ suggested_tools_list = ["get_direct_text_response"]
192
+ else:
193
+ try:
194
+ suggested_tools_str = re.sub(r"^.*?(\[.*?\]).*?$", r"\1", suggested_tools, flags=re.DOTALL)
195
+ suggested_tools_list = eval(suggested_tools_str.replace("`", "'")) if suggested_tools_str.startswith("[") and suggested_tools_str.endswith("]") else ["get_direct_text_response"]
196
+ except:
197
+ suggested_tools_list = ["get_direct_text_response"]
198
+
199
+ selected_tool = suggested_tools_list[0] if suggested_tools_list else "get_direct_text_response"
200
+ if not selected_tool in TOOLS:
201
+ selected_tool = "get_direct_text_response"
202
+
203
+ print(f"\n[Selected Tool: {selected_tool}]\n")
204
+
205
+ # tool instruction
206
+ print("\n--- Tool Instruction ---\n")
207
+ selected_tool_description = TOOLS.get(selected_tool, "No description available.")
208
+ tool_instruction_draft = TOOL_INSTRUCTION_PROMPT + "\n\n# Suggestions\n\n"+suggestion+f"\n\n# Tool Description of `{selected_tool}`\n\n"+selected_tool_description+TOOL_INSTRUCTION_SUFFIX
209
+ system_tool_instruction = get_system_tool_instruction(selected_tool, selected_tool_description)
210
+
211
+ user_request = await stream_output(MESSAGES, tool_instruction_draft, cancel_event, system=system_tool_instruction, **kwargs)
212
+
213
+ if not user_request or user_request.strip() == "[NO_CONTENT]":
214
+ print("\n[No tool instruction generated. Stopping.]\n")
215
+ break
216
+
217
+ print("\n--- Agent Execution ---\n")
218
+ answers = None
219
+ try:
220
+ if selected_tool == "get_direct_text_response":
221
+ answers = await stream_output(MESSAGES, user_request, cancel_event, system="auto", **kwargs)
222
+ else:
223
+ element = TOOL_ELEMENTS.get(selected_tool)
224
+ if isinstance(element, str):
225
+ print(f"Loading {selected_tool}...")
226
+ if not selected_tool == "search_the_whole_bible":
227
+ user_req = chapter2verses(user_request)
228
+ else:
229
+ user_req = user_request
230
+ api_query = f"{element}{user_req}"
231
+ answers = await asyncio.to_thread(get_api_content, api_query, language)
232
+ if answers:
233
+ print(answers)
234
+ else:
235
+ answers = "[NO_CONTENT]"
236
+ print(answers)
237
+ elif isinstance(element, dict):
238
+ system = element.pop("system") if "system" in element else None
239
+ answers = await stream_output(MESSAGES, user_request, cancel_event, system=system, **element, **kwargs)
240
+
241
+ except Exception as e:
242
+ answers = f"[{get_translation('Error')}: {str(e)}]"
243
+ print(f"\n{answers}\n")
244
+ if developer:
245
+ traceback.print_exc()
246
+
247
+ if answers and not (answers.strip() == "[NO_CONTENT]" or answers.startswith("[Error:")):
248
+ if md_export or docx_export:
249
+ round_str = f"{ROUND:02}"
250
+ filename = f"{round_str}_{selected_tool}"
251
+ do_export(answers, filename, md_export, docx_export, output_directory)
252
+ MESSAGES += [
253
+ {"role": "user", "content": f"[ROUND {ROUND}]\n\n{user_request}"},
254
+ {"role": "assistant", "content": f"[TOOL] {selected_tool}\n\n[RESPONSE]\n\n{answers}"},
255
+ ]
256
+
257
+ # check progress
258
+ print("\n--- Progress Check ---\n")
259
+ system_progress = get_system_progress(master_plan=MASTER_PLAN)
260
+ follow_up_prompt="Please decide either to `CONTINUE` or `STOP` the process."
261
+ PROGRESS_STATUS = await stream_output(MESSAGES, follow_up_prompt, cancel_event, system=system_progress, **kwargs)
262
+
263
+ if not PROGRESS_STATUS or PROGRESS_STATUS.strip() == "[NO_CONTENT]":
264
+ MESSAGES = MESSAGES[:-2]
265
+ break
266
+
267
+ ROUND += 1
268
+
269
+ print("\n---\n")
270
+ print("\nWrapping up...\n")
271
+ print("\n--- Final Report ---\n")
272
+ system_report = "write_final_answer"
273
+ follow_up_prompt=f"""{FINAL_INSTRUCTION}{MASTER_USER_REQUEST}"""
274
+ report = await stream_output(MESSAGES, follow_up_prompt, cancel_event, system=system_report, **kwargs)
275
+
276
+ if report and report.strip() != "[NO_CONTENT]":
277
+ if md_export or docx_export:
278
+ round_str = f"{ROUND:02}"
279
+ filename = f"{round_str}_final_report"
280
+ do_export(report, filename, md_export, docx_export, output_directory)
281
+ MESSAGES += [
282
+ {"role": "user", "content": "[FINAL] Please provide a comprehensive response that resolves my original request, ensuring all previously completed milestones and data points are fully integrated."},
283
+ {"role": "assistant", "content": f"[REPORT]\n\n{report}"},
284
+ ]
285
+ print("\n--- Finished successfully ---\n")
286
+
287
+ return MESSAGES
288
+
289
+ except Exception as e:
290
+ print(f"\nError: {e}\n")
291
+ # traceback.print_exc()
292
+ return None
File without changes
@@ -0,0 +1,49 @@
1
+ import asyncio, argparse
2
+ from agentmake import SUPPORTED_AI_BACKENDS, DEFAULT_AI_BACKEND
3
+ from bibleagent.agent import bible_agent
4
+
5
+ parser = argparse.ArgumentParser(description = f"""BibleMate AI Agent""")
6
+ parser.add_argument("default", nargs="*", default=None, help="user request")
7
+ parser.add_argument("-l", "--language", action="store", dest="language", choices=["eng", "tc", "sc"], help="language option")
8
+ parser.add_argument("-b", "--backend", action="store", dest="backend", choices=SUPPORTED_AI_BACKENDS, help="AI backend option")
9
+ parser.add_argument("-m", "--model", action="store", dest="model", help="AI model option")
10
+ parser.add_argument("-k", "--api_key", action="store", dest="api_key", help="API key option")
11
+ parser.add_argument("-e", "--api_endpoint", action="store", dest="api_endpoint", help="API endpoint option")
12
+ parser.add_argument("-mt", "--max_tokens", action="store", dest="max_tokens", type=int, help="max tokens option")
13
+ parser.add_argument("-cw", "--context_window", action="store", dest="context_window", type=int, help="context window option")
14
+ parser.add_argument("-t", "--temperature", action="store", dest="temperature", type=float, help="temperature option")
15
+ parser.add_argument("-p", "--improve_prompt", action="store_true", dest="improve_prompt", help="improve user prompt")
16
+ parser.add_argument("-d", "--developer", action="store_true", dest="developer", help="developer mode")
17
+ parser.add_argument("-md", "--md_export", action="store_true", dest="md_export", help="export outputs in markdown format")
18
+ parser.add_argument("-docx", "--docx_export", action="store_true", dest="docx_export", help="export outputs in docx format")
19
+ parser.add_argument("-o", "--output_directory", action="store", dest="output_directory", help="output directory")
20
+ args = parser.parse_args()
21
+
22
+ async def main_async():
23
+ print("Starting BibleMate AI Agent...")
24
+ kwargs = {}
25
+ kwargs["backend"] = args.backend if args.backend else DEFAULT_AI_BACKEND
26
+ kwargs["model"] = args.model if args.model else None
27
+ kwargs["api_key"] = args.api_key if args.api_key else None
28
+ kwargs["api_endpoint"] = args.api_endpoint if args.api_endpoint else None
29
+ kwargs["max_tokens"] = args.max_tokens if args.max_tokens else None
30
+ kwargs["context_window"] = args.context_window if args.context_window else None
31
+ kwargs["temperature"] = args.temperature if args.temperature else None
32
+
33
+ await bible_agent(
34
+ request=" ".join(args.default),
35
+ language=args.language if args.language else "eng",
36
+ improve_prompt=args.improve_prompt if args.improve_prompt else False,
37
+ md_export=args.md_export if args.md_export else False,
38
+ docx_export=args.docx_export if args.docx_export else False,
39
+ output_directory=args.output_directory if args.output_directory else "",
40
+ developer=args.developer if args.developer else False,
41
+ **kwargs,
42
+ )
43
+ print("Finished")
44
+
45
+ def main():
46
+ asyncio.run(main_async())
47
+
48
+ if __name__ == "__main__":
49
+ main()
@@ -0,0 +1 @@
1
+ bibleagent
@@ -0,0 +1 @@
1
+ biblemateweb>=0.3.56
@@ -0,0 +1,122 @@
1
+ import asyncio
2
+ from agentmake import agentmake, DEFAULT_AI_BACKEND, unpack_instruction_content, unpack_system_content
3
+ from agentmake.utils.text_wrapper import get_stream_event_text
4
+ from agentmake.utils.read_assistant_response import is_openai_style
5
+ from biblemateweb import DEFAULT_MESSAGES
6
+
7
+ async def stream_output(messages, user_request, cancel_event=None, system=None, **kwargs):
8
+ def get_next_chunk(iterator):
9
+ """
10
+ Runs in a separate thread.
11
+ Returns the next item, or None if the iterator is exhausted.
12
+ """
13
+ try:
14
+ return next(iterator)
15
+ except StopIteration:
16
+ return None
17
+ except Exception as e:
18
+ return e # Return the error to be handled in the main loop
19
+
20
+ if "instruction" in kwargs:
21
+ instruction_content = kwargs.pop("instruction")
22
+ instruction_content = unpack_instruction_content(instruction_content)
23
+ # refine user request
24
+ user_request = instruction_content + "\n" + user_request
25
+
26
+ if system == "auto":
27
+ system = await stream_output(DEFAULT_MESSAGES, user_request, cancel_event, system="bible/create_agent", **kwargs)
28
+ if not system or system.strip() == "[NO_CONTENT]":
29
+ return None
30
+ else:
31
+ # refine response
32
+ system = system.replace("should:", "should:\n")
33
+ system = system.replace("examples:", "examples:\n")
34
+ if system.startswith("```agent\n"):
35
+ system = system[9:]
36
+ if system.endswith("```"):
37
+ system = system[:-3].strip()
38
+ elif system is not None:
39
+ system_content = unpack_system_content(system)
40
+ system = None
41
+ # refine user request
42
+ user_request = f"""---
43
+
44
+ START OF YOUR NEW ROLE
45
+
46
+ ---
47
+
48
+ {system_content}
49
+
50
+ ---
51
+
52
+ START OF MY REQUEST
53
+
54
+ ---
55
+
56
+ {user_request}"""
57
+
58
+ # Print loading text
59
+ print("Loading...", end="\r", flush=True)
60
+ await asyncio.sleep(0)
61
+
62
+ backend=kwargs.pop("backend", DEFAULT_AI_BACKEND)
63
+ model=kwargs.pop("model", None)
64
+ api_key=kwargs.pop("api_key", None)
65
+ api_endpoint=kwargs.pop("api_endpoint", None)
66
+ max_tokens=kwargs.pop("max_tokens", None)
67
+ context_window=kwargs.pop("context_window", None)
68
+ temperature=kwargs.pop("temperature", None)
69
+
70
+ # run completion
71
+ text_chunks = ""
72
+ completion = await asyncio.to_thread(
73
+ agentmake,
74
+ messages,
75
+ system=system,
76
+ backend=backend,
77
+ model=model,
78
+ api_key=api_key,
79
+ api_endpoint=api_endpoint,
80
+ max_tokens=max_tokens,
81
+ context_window=context_window,
82
+ temperature=temperature,
83
+ follow_up_prompt=user_request,
84
+ stream=True,
85
+ print_on_terminal=False,
86
+ stream_events_only=True,
87
+ **kwargs,
88
+ )
89
+
90
+ print("Running... ", end="\r", flush=True)
91
+
92
+ try:
93
+ while cancel_event is None or not cancel_event.is_set():
94
+ event = await asyncio.to_thread(get_next_chunk, completion)
95
+
96
+ if event is None:
97
+ break
98
+ elif isinstance(event, Exception):
99
+ print(f"\nStream interrupted: {str(event)}")
100
+ break
101
+
102
+ actual_backend = backend if backend else DEFAULT_AI_BACKEND
103
+ if text_chunk := get_stream_event_text(event, openai_style=is_openai_style(actual_backend)):
104
+ text_chunks += text_chunk
105
+ print(text_chunk, end="", flush=True)
106
+ await asyncio.sleep(0)
107
+
108
+ if cancel_event is not None and cancel_event.is_set():
109
+ print("\n[Cancelled!]")
110
+ else:
111
+ print() # Print newline when done
112
+
113
+ except Exception as e:
114
+ print(f"\n[Error: {str(e)}]")
115
+ if cancel_event is not None:
116
+ cancel_event.set()
117
+
118
+ if cancel_event is not None and cancel_event.is_set():
119
+ cancel_event = None
120
+ return None
121
+
122
+ return text_chunks.replace(" ", " ").replace("‑", "-")
@@ -0,0 +1 @@
1
+ 1.0.0
@@ -0,0 +1,65 @@
1
+ Metadata-Version: 2.4
2
+ Name: bibleagent
3
+ Version: 1.0.0
4
+ Summary: BibleMate AI - Automate Your Bible Study; A headless version of BibleMate AI Agent Mode
5
+ Home-page: https://biblemate.ai
6
+ Author: Eliran Wong
7
+ Author-email: support@marvel.bible
8
+ License: GNU General Public License (GPL)
9
+ Project-URL: Source, https://github.com/eliranwong/biblemateagent
10
+ Project-URL: Tracker, https://github.com/eliranwong/biblemateagent/issues
11
+ Project-URL: Documentation, https://github.com/eliranwong/biblemateagent/wiki
12
+ Project-URL: Funding, https://www.paypal.me/MarvelBible
13
+ Keywords: mcp agent biblemate ai anthropic azure chatgpt cohere deepseek genai github googleai groq llamacpp mistral ollama openai vertexai xai
14
+ Classifier: Development Status :: 5 - Production/Stable
15
+ Classifier: Intended Audience :: End Users/Desktop
16
+ Classifier: Topic :: Utilities
17
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
18
+ Classifier: Topic :: Software Development :: Build Tools
19
+ Classifier: License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)
20
+ Classifier: Programming Language :: Python :: 3.10
21
+ Classifier: Programming Language :: Python :: 3.11
22
+ Classifier: Programming Language :: Python :: 3.12
23
+ Requires-Python: >=3.10, <3.13
24
+ Requires-Dist: biblemateweb>=0.3.56
25
+ Provides-Extra: genai
26
+ Requires-Dist: google-genai>=1.46.0; extra == "genai"
27
+ Dynamic: author
28
+ Dynamic: author-email
29
+ Dynamic: classifier
30
+ Dynamic: description
31
+ Dynamic: home-page
32
+ Dynamic: keywords
33
+ Dynamic: license
34
+ Dynamic: project-url
35
+ Dynamic: provides-extra
36
+ Dynamic: requires-dist
37
+ Dynamic: requires-python
38
+ Dynamic: summary
39
+
40
+ # biblemateagent
41
+ A headless version of BibleMate AI Agent Mode
42
+
43
+ ## Installation
44
+
45
+ > pip install biblemateagent
46
+
47
+ ## Set up data
48
+
49
+ > biblematedata
50
+
51
+ ## Run BibleMate Agent
52
+
53
+ > biblemateagent "Your Bible Study Request"
54
+
55
+ or
56
+
57
+ > bibleagent "Your Bible Study Request"
58
+
59
+ ## Help
60
+
61
+ > biblemateagent -h
62
+
63
+ or
64
+
65
+ > bibleagent -h
@@ -0,0 +1,16 @@
1
+ README.md
2
+ setup.py
3
+ bibleagent/README.md
4
+ bibleagent/agent.py
5
+ bibleagent/config.py
6
+ bibleagent/main.py
7
+ bibleagent/package_name.txt
8
+ bibleagent/requirements.txt
9
+ bibleagent/stream.py
10
+ bibleagent/version.txt
11
+ bibleagent.egg-info/PKG-INFO
12
+ bibleagent.egg-info/SOURCES.txt
13
+ bibleagent.egg-info/dependency_links.txt
14
+ bibleagent.egg-info/entry_points.txt
15
+ bibleagent.egg-info/requires.txt
16
+ bibleagent.egg-info/top_level.txt
@@ -0,0 +1,4 @@
1
+ [console_scripts]
2
+ bibleagent = bibleagent.main:main
3
+ biblemateagent = bibleagent.main:main
4
+ bmagent = bibleagent.main:main
@@ -0,0 +1,4 @@
1
+ biblemateweb>=0.3.56
2
+
3
+ [genai]
4
+ google-genai>=1.46.0
@@ -0,0 +1 @@
1
+ bibleagent
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,96 @@
1
+ from setuptools import setup
2
+ from setuptools.command.install import install
3
+ import os, shutil, platform, sys
4
+
5
+ version = "1.0.0"
6
+ with open(os.path.join("bibleagent", "version.txt"), "w", encoding="utf-8") as fileObj:
7
+ fileObj.write(version)
8
+
9
+ # package name
10
+ package_name_0 = "package_name.txt"
11
+ with open(package_name_0, "r", encoding="utf-8") as fileObj:
12
+ package = fileObj.read()
13
+ package_name_1 = os.path.join(package, "package_name.txt") # package readme
14
+ shutil.copy(package_name_0, package_name_1)
15
+
16
+ # update package readme
17
+ latest_readme = os.path.join("README.md") # github repository readme
18
+ package_readme = os.path.join(package, "README.md") # package readme
19
+ shutil.copy(latest_readme, package_readme)
20
+ with open(package_readme, "r", encoding="utf-8") as fileObj:
21
+ long_description = fileObj.read()
22
+
23
+ # get required packages
24
+ install_requires = []
25
+ with open(os.path.join(package, "requirements.txt"), "r") as fileObj:
26
+ for line in fileObj.readlines():
27
+ mod = line.strip()
28
+ if mod:
29
+ install_requires.append(mod)
30
+
31
+ # make sure config.py is empty
32
+ #open(os.path.join(package, "config.py"), "w").close()
33
+
34
+ # https://packaging.python.org/en/latest/guides/distributing-packages-using-setuptools/
35
+ setup(
36
+ name=package,
37
+ version=version,
38
+ python_requires=">=3.10, <3.13",
39
+ description=f"BibleMate AI - Automate Your Bible Study; A headless version of BibleMate AI Agent Mode",
40
+ long_description=long_description,
41
+ author="Eliran Wong",
42
+ author_email="support@marvel.bible",
43
+ packages=[
44
+ package,
45
+ ],
46
+ package_data={
47
+ package: ["*.*"],
48
+ },
49
+ license="GNU General Public License (GPL)",
50
+ install_requires=install_requires,
51
+ extras_require={
52
+ 'genai': ["google-genai>=1.46.0"], # Dependencies for running Vertex AI
53
+ },
54
+ entry_points={
55
+ "console_scripts": [
56
+ f"bmagent={package}.main:main",
57
+ f"biblemateagent={package}.main:main",
58
+ f"bibleagent={package}.main:main",
59
+ ],
60
+ },
61
+ keywords="mcp agent biblemate ai anthropic azure chatgpt cohere deepseek genai github googleai groq llamacpp mistral ollama openai vertexai xai",
62
+ url="https://biblemate.ai",
63
+ project_urls={
64
+ "Source": "https://github.com/eliranwong/biblemateagent",
65
+ "Tracker": "https://github.com/eliranwong/biblemateagent/issues",
66
+ "Documentation": "https://github.com/eliranwong/biblemateagent/wiki",
67
+ "Funding": "https://www.paypal.me/MarvelBible",
68
+ },
69
+ classifiers=[
70
+ # Reference: https://pypi.org/classifiers/
71
+
72
+ # How mature is this project? Common values are
73
+ # 3 - Alpha
74
+ # 4 - Beta
75
+ # 5 - Production/Stable
76
+ 'Development Status :: 5 - Production/Stable',
77
+
78
+ # Indicate who your project is intended for
79
+ 'Intended Audience :: End Users/Desktop',
80
+ 'Topic :: Utilities',
81
+ 'Topic :: Scientific/Engineering :: Artificial Intelligence',
82
+ 'Topic :: Software Development :: Build Tools',
83
+
84
+ # Pick your license as you wish (should match "license" above)
85
+ 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
86
+
87
+ # Specify the Python versions you support here. In particular, ensure
88
+ # that you indicate whether you support Python 2, Python 3 or both.
89
+ #'Programming Language :: Python :: 3.8',
90
+ #'Programming Language :: Python :: 3.9',
91
+ # currently, fastmcp supports 3.10-3.12
92
+ 'Programming Language :: Python :: 3.10',
93
+ 'Programming Language :: Python :: 3.11',
94
+ 'Programming Language :: Python :: 3.12',
95
+ ],
96
+ )