biblemate 0.0.20__py3-none-any.whl → 0.0.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
biblemate/README.md CHANGED
@@ -61,17 +61,63 @@ How to swap?
61
61
  * Enter `.chat` in BibleMate AI prompt to enable chat mode and disable agent mode.
62
62
  * Enter `.agent` in BibleMate AI prompt to enable agent mode and disable chat mode.
63
63
 
64
- ## Action Menu
64
+ ## Manual Tool Selection
65
+
66
+ In some cases, you may want to specify a particular tool for a simple task, rather than having a tool automatically selected in the fully automatic `agent mode`.
67
+
68
+ You can specify a single tool by prefixing a tool name with `@` at the beginning of your prompt. For example,
69
+
70
+ ```
71
+ @retrieve_bible_cross_references Deut 6:4; John 3:16
72
+ ```
73
+
74
+ Watch this video: https://youtu.be/50m1KRj6uhs
65
75
 
66
- *(Coming soon)*
76
+ ## Custom Master Plan with Multiple Tools
67
77
 
68
- ## Keyboard Shortcut
78
+ In some cases, you may want to specify a `custom plan` with multiple tools specified for different steps for a complex task, rather than having a `master plan` automatically generated in fully automatic agent mode.
69
79
 
70
- *(Coming soon)*
80
+ You can use a custom 'Master Plan' of your own, instead of one generated by BibleMate AI. To do this, start your BibleMate AI prompt with '@@' followed by your own master plan for a Bible study. For example,
71
81
 
72
- ## Customization
82
+ ```
83
+ @@ Analyze John 3:16 with the following steps:
84
+ 1. Call tool 'retrieve_english_bible_verses' for Bible text,
85
+ 2. Call tool 'retrieve_bible_cross_references' for Bible cross-references,
86
+ 3. Call tool 'interpret_new_testament_verse' for interpretation, and
87
+ 4. Call tool 'write_bible_theology' to explain its theology.
88
+ ```
89
+
90
+ Watch this video: https://youtu.be/Lejq0sAx030
91
+
92
+ The '@@' trick works even when you are using 'chat' mode with 'agent' mode disabled.
93
+
94
+ ## Action Menu
73
95
 
74
- *(Coming soon)*
96
+ There is a set of predefined entries, that starts with a dot sign `.`:
97
+
98
+ - `.new` - new conversation
99
+ - `.quit` - quit
100
+ - `.backend` - change backend
101
+ - `.chat` - enable chat mode
102
+ - `.agent` - enable agent mode
103
+ - `.tools` - list available tools
104
+ - `.backup` - backup conversation
105
+ - `.open` - open a file or directory, e.g. `.open /home/user/report.html`
106
+
107
+ ## Keyboard Shortcuts
108
+
109
+ The following key bindings are supported in BibleMate AI prompt field:
110
+
111
+ - `Ctrl+N` new conversation
112
+ - `Ctrl+Q` quit
113
+ - `Ctrl+C` copy selected prompt text
114
+ - `Ctrl+V` paste text in a prompt
115
+ - `Ctrl+I` or `TAB` new line
116
+ - `Ctrl+Z` clear prompt text
117
+ - `Esc+a` jump to the beginning of a prompt
118
+ - `Esc+z` jump to the end of a prompt
119
+ - `Esc+b` or `HOME` jump to the beginning of a line in a prompt
120
+ - `Esc+e` or `END` jump to the end of a line in a prompt
75
121
 
76
122
  ## License
77
123
 
biblemate/main.py CHANGED
@@ -39,6 +39,7 @@ async def main_async():
39
39
  tools_raw = await client.list_tools()
40
40
  #print(tools_raw)
41
41
  tools = {t.name: t.description for t in tools_raw}
42
+ tools = dict(sorted(tools.items()))
42
43
  tools_schema = {}
43
44
  for t in tools_raw:
44
45
  schema = {
@@ -55,6 +56,7 @@ async def main_async():
55
56
  available_tools = list(tools.keys())
56
57
  if not "get_direct_text_response" in available_tools:
57
58
  available_tools.insert(0, "get_direct_text_response")
59
+ available_tools_pattern = "|".join(available_tools)
58
60
 
59
61
  # add tool description for get_direct_text_response if not exists
60
62
  if not "get_direct_text_response" in tools:
@@ -126,13 +128,14 @@ Get a static text-based response directly from a text-based AI model without usi
126
128
  bar() # Update the bar
127
129
  await asyncio.sleep(0.01) # Yield control back to the event loop
128
130
  return task.result()
129
- async def process_step_async(step_number):
131
+ async def process_tool(tool, tool_instruction, step_number=None):
130
132
  """
131
133
  Manages the async task and the progress bar.
132
134
  """
133
- print(f"# Starting Step [{step_number}]...")
135
+ if step_number:
136
+ print(f"# Starting Step [{step_number}]...")
134
137
  # Create the async task but don't await it yet.
135
- task = asyncio.create_task(process_step())
138
+ task = asyncio.create_task(run_tool(tool, tool_instruction))
136
139
  # Await the custom async progress bar that awaits the task.
137
140
  await async_alive_bar(task)
138
141
 
@@ -173,9 +176,12 @@ Get a static text-based response directly from a text-based AI model without usi
173
176
  ".chat": "enable chat mode",
174
177
  ".agent": "enable agent mode",
175
178
  ".tools": "list available tools",
179
+ #".resources": "list available resources",
180
+ #".prompts": "list available prompts",
181
+ ".backup": "backup conversation",
176
182
  ".open": "open a file or directory",
177
183
  }
178
- input_suggestions = list(action_list.keys())+prompt_list
184
+ input_suggestions = list(action_list.keys())+[f"@{t} " for t in available_tools]+prompt_list
179
185
  user_request = await getInput("> ", input_suggestions)
180
186
  while not user_request.strip():
181
187
  user_request = await getInput("> ", input_suggestions)
@@ -190,7 +196,9 @@ Get a static text-based response directly from a text-based AI model without usi
190
196
 
191
197
  # TODO: ui - radio list menu
192
198
  if user_request in action_list:
193
- if user_request == ".tools":
199
+ if user_request == ".backup":
200
+ backup()
201
+ elif user_request == ".tools":
194
202
  console.rule()
195
203
  tools_descriptions = [f"- `{name}`: {description}" for name, description in tools.items()]
196
204
  console.print(Markdown("## Available Tools\n\n"+"\n".join(tools_descriptions)))
@@ -222,14 +230,68 @@ Get a static text-based response directly from a text-based AI model without usi
222
230
  console.print(get_banner())
223
231
  continue
224
232
 
225
- # auto prompt engineering
226
- async def run_prompt_engineering():
227
- nonlocal user_request
228
- user_request = agentmake(user_request, tool="improve_prompt", **AGENTMAKE_CONFIG)[-1].get("content", "").strip()[20:-4]
229
- await thinking(run_prompt_engineering, "Prompt Engineering ...")
233
+ # Check if a single tool is specified
234
+ specified_tool = ""
235
+ if re.search(f"""^@({available_tools_pattern}) """, user_request):
236
+ specified_tool = re.search(f"""^@({available_tools_pattern}) """, user_request).group(1)
237
+ user_request = user_request[len(specified_tool)+2:]
238
+ elif user_request.startswith("@@"):
239
+ specified_tool = "@@"
240
+ master_plan = user_request[2:].strip()
241
+ async def refine_custom_plan():
242
+ nonlocal messages, user_request, master_plan
243
+ # Prompt engineering
244
+ #master_plan = agentmake(messages if messages else master_plan, follow_up_prompt=master_plan if messages else None, tool="improve_prompt", **AGENTMAKE_CONFIG)[-1].get("content", "").strip()[20:-4]
245
+ # Summarize user request in one-sentence instruction
246
+ user_request = agentmake(master_plan, tool="biblemate/summarize_task_instruction", **AGENTMAKE_CONFIG)[-1].get("content", "").strip()[15:-4]
247
+ await thinking(refine_custom_plan)
248
+ # display info
249
+ console.print(Markdown(f"# User Request\n\n{user_request}\n\n# Master plan\n\n{master_plan}"))
250
+
251
+ # Prompt Engineering
252
+ if not specified_tool == "@@":
253
+ async def run_prompt_engineering():
254
+ nonlocal user_request
255
+ user_request = agentmake(messages if messages else user_request, follow_up_prompt=user_request if messages else None, tool="improve_prompt", **AGENTMAKE_CONFIG)[-1].get("content", "").strip()[20:-4]
256
+ await thinking(run_prompt_engineering, "Prompt Engineering ...")
257
+
258
+ if not messages:
259
+ messages = [
260
+ {"role": "system", "content": "You are BibleMate, an autonomous AI agent."},
261
+ {"role": "user", "content": user_request},
262
+ ]
263
+ else:
264
+ messages.append({"role": "user", "content": user_request})
265
+
266
+ async def run_tool(tool, tool_instruction):
267
+ nonlocal messages
268
+ if tool == "get_direct_text_response":
269
+ messages = agentmake(messages, system="auto", **AGENTMAKE_CONFIG)
270
+ else:
271
+ try:
272
+ tool_schema = tools_schema[tool]
273
+ tool_properties = tool_schema["parameters"]["properties"]
274
+ if len(tool_properties) == 1 and "request" in tool_properties: # AgentMake MCP Servers or alike
275
+ tool_result = await client.call_tool(tool, {"request": tool_instruction})
276
+ else:
277
+ structured_output = getDictionaryOutput(messages=messages, schema=tool_schema)
278
+ tool_result = await client.call_tool(tool, structured_output)
279
+ tool_result = tool_result.content[0].text
280
+ messages[-1]["content"] += f"\n\n[Using tool `{tool}`]"
281
+ messages.append({"role": "assistant", "content": tool_result if tool_result.strip() else "Tool error!"})
282
+ except Exception as e:
283
+ if DEVELOPER_MODE:
284
+ console.print(f"Error: {e}\nFallback to direct response...\n\n")
285
+ messages = agentmake(messages, system="auto", **AGENTMAKE_CONFIG)
286
+
287
+ # user specify a single tool
288
+ if specified_tool and not specified_tool == "@@":
289
+ await process_tool(specified_tool, user_request)
290
+ console.print(Markdown(f"# User Request\n\n{messages[-2]['content']}\n\n# AI Response\n\n{messages[-1]['content']}"))
291
+ continue
230
292
 
231
293
  # Chat mode
232
- if not config.agent_mode:
294
+ if not config.agent_mode and not specified_tool == "@@":
233
295
  async def run_chat_mode():
234
296
  nonlocal messages, user_request
235
297
  messages = agentmake(messages if messages else user_request, system="auto", **AGENTMAKE_CONFIG)
@@ -237,30 +299,35 @@ Get a static text-based response directly from a text-based AI model without usi
237
299
  console.print(Markdown(f"# User Request\n\n{messages[-2]['content']}\n\n# AI Response\n\n{messages[-1]['content']}"))
238
300
  continue
239
301
 
240
- if re.search(prompt_pattern, user_request):
241
- prompt_name = re.search(prompt_pattern, user_request).group(1)
242
- user_request = user_request[len(prompt_name):]
243
- # Call the MCP prompt
244
- prompt_schema = prompts_schema[prompt_name[1:]]
245
- prompt_properties = prompt_schema["parameters"]["properties"]
246
- if len(prompt_properties) == 1 and "request" in prompt_properties: # AgentMake MCP Servers or alike
247
- result = await client.get_prompt(prompt_name[1:], {"request": user_request})
302
+ # agent mode
303
+
304
+ # generate master plan
305
+ if not master_plan:
306
+ if re.search(prompt_pattern, user_request):
307
+ prompt_name = re.search(prompt_pattern, user_request).group(1)
308
+ user_request = user_request[len(prompt_name):]
309
+ # Call the MCP prompt
310
+ prompt_schema = prompts_schema[prompt_name[1:]]
311
+ prompt_properties = prompt_schema["parameters"]["properties"]
312
+ if len(prompt_properties) == 1 and "request" in prompt_properties: # AgentMake MCP Servers or alike
313
+ result = await client.get_prompt(prompt_name[1:], {"request": user_request})
314
+ else:
315
+ structured_output = getDictionaryOutput(messages=messages, schema=prompt_schema)
316
+ result = await client.get_prompt(prompt_name[1:], structured_output)
317
+ #print(result, "\n\n")
318
+ master_plan = result.messages[0].content.text
319
+ # display info# display info
320
+ console.print(Markdown(f"# User Request\n\n{user_request}\n\n# Master plan\n\n{master_plan}"))
321
+ console.print(Markdown(f"# User Request\n\n{user_request}\n\n# Master plan\n\n{master_plan}"))
248
322
  else:
249
- structured_output = getDictionaryOutput(messages=messages, schema=prompt_schema)
250
- result = await client.get_prompt(prompt_name[1:], structured_output)
251
- #print(result, "\n\n")
252
- master_plan = result.messages[0].content.text
253
- # display info
254
- console.print(Markdown(f"# User Request\n\n{user_request}\n\n# Master plan\n\n{master_plan}"))
255
- else:
256
- # display info
257
- console.print(Markdown(f"# User Request\n\n{user_request}"), "\n")
258
- # Generate master plan
259
- master_plan = ""
260
- async def generate_master_plan():
261
- nonlocal master_plan
262
- # Create initial prompt to create master plan
263
- initial_prompt = f"""Provide me with the `Preliminary Action Plan` and the `Measurable Outcome` for resolving `My Request`.
323
+ # display info
324
+ console.print(Markdown(f"# User Request\n\n{user_request}"), "\n")
325
+ # Generate master plan
326
+ master_plan = ""
327
+ async def generate_master_plan():
328
+ nonlocal master_plan
329
+ # Create initial prompt to create master plan
330
+ initial_prompt = f"""Provide me with the `Preliminary Action Plan` and the `Measurable Outcome` for resolving `My Request`.
264
331
 
265
332
  # Available Tools
266
333
 
@@ -271,13 +338,14 @@ Available tools are: {available_tools}.
271
338
  # My Request
272
339
 
273
340
  {user_request}"""
274
- console.print(Markdown("# Master plan"), "\n")
275
- print()
276
- master_plan = agentmake(messages+[{"role": "user", "content": initial_prompt}], system="create_action_plan", **AGENTMAKE_CONFIG)[-1].get("content", "").strip()
277
- await thinking(generate_master_plan)
278
- # display info
279
- console.print(Markdown(master_plan), "\n\n")
280
-
341
+ console.print(Markdown("# Master plan"), "\n")
342
+ print()
343
+ master_plan = agentmake(messages+[{"role": "user", "content": initial_prompt}], system="create_action_plan", **AGENTMAKE_CONFIG)[-1].get("content", "").strip()
344
+ await thinking(generate_master_plan)
345
+ # display info
346
+ console.print(Markdown(master_plan), "\n\n")
347
+
348
+ # Step suggestion system message
281
349
  system_suggestion = get_system_suggestion(master_plan)
282
350
 
283
351
  # Tool selection systemm message
@@ -292,14 +360,6 @@ Available tools are: {available_tools}.
292
360
  await thinking(get_first_suggestion)
293
361
  console.print(Markdown(next_suggestion), "\n\n")
294
362
 
295
- if not messages:
296
- messages = [
297
- {"role": "system", "content": "You are BibleMate, an autonomous AI agent."},
298
- {"role": "user", "content": user_request},
299
- ]
300
- else:
301
- messages.append({"role": "user", "content": user_request})
302
-
303
363
  step = 1
304
364
  while not ("DONE" in next_suggestion or re.sub("^[^A-Za-z]*?([A-Za-z]+?)[^A-Za-z]*?$", r"\1", next_suggestion).upper() == "DONE"):
305
365
 
@@ -343,28 +403,7 @@ Available tools are: {available_tools}.
343
403
  messages.append({"role": "assistant", "content": "Please provide me with an initial instruction to begin."})
344
404
  messages.append({"role": "user", "content": next_step})
345
405
 
346
- async def process_step():
347
- nonlocal messages, next_tool, next_step
348
- if next_tool == "get_direct_text_response":
349
- messages = agentmake(messages, system="auto", **AGENTMAKE_CONFIG)
350
- else:
351
- try:
352
- tool_schema = tools_schema[next_tool]
353
- tool_properties = tool_schema["parameters"]["properties"]
354
- if len(tool_properties) == 1 and "request" in tool_properties: # AgentMake MCP Servers or alike
355
- tool_result = await client.call_tool(next_tool, {"request": next_step})
356
- else:
357
- structured_output = getDictionaryOutput(messages=messages, schema=tool_schema)
358
- tool_result = await client.call_tool(next_tool, structured_output)
359
- tool_result = tool_result.content[0].text
360
- messages[-1]["content"] += f"\n\n[Using tool `{next_tool}`]"
361
- messages.append({"role": "assistant", "content": tool_result if tool_result.strip() else "Done!"})
362
- except Exception as e:
363
- if DEVELOPER_MODE:
364
- console.print(f"Error: {e}\nFallback to direct response...\n\n")
365
- messages = agentmake(messages, system="auto", **AGENTMAKE_CONFIG)
366
- await process_step_async(step)
367
-
406
+ await process_tool(next_tool, next_step, step_number=step)
368
407
  console.print(Markdown(f"\n## Output [{step}]\n\n{messages[-1]["content"]}"))
369
408
 
370
409
  # iteration count
@@ -382,6 +421,9 @@ Available tools are: {available_tools}.
382
421
  await thinking(get_next_suggestion)
383
422
  #print()
384
423
  console.print(Markdown(next_suggestion), "\n")
424
+
425
+ if messages[-1].get("role") == "user":
426
+ messages.append({"role": "assistant", "content": next_suggestion})
385
427
 
386
428
  # Backup
387
429
  backup()
@@ -1,4 +1,4 @@
1
- agentmake>=1.0.69
1
+ agentmake>=1.0.70
2
2
  agentmakemcp>=0.0.8
3
3
  fastmcp[cli]
4
4
  rich
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: biblemate
3
- Version: 0.0.20
3
+ Version: 0.0.22
4
4
  Summary: BibleMate AI - Automate Your Bible Study
5
5
  Home-page: https://toolmate.ai
6
6
  Author: Eliran Wong
@@ -23,7 +23,7 @@ Classifier: Programming Language :: Python :: 3.10
23
23
  Classifier: Programming Language :: Python :: 3.11
24
24
  Classifier: Programming Language :: Python :: 3.12
25
25
  Requires-Python: >=3.8, <3.13
26
- Requires-Dist: agentmake >=1.0.69
26
+ Requires-Dist: agentmake >=1.0.70
27
27
  Requires-Dist: agentmakemcp >=0.0.8
28
28
  Requires-Dist: alive-progress
29
29
  Requires-Dist: fastmcp[cli]
@@ -94,17 +94,63 @@ How to swap?
94
94
  * Enter `.chat` in BibleMate AI prompt to enable chat mode and disable agent mode.
95
95
  * Enter `.agent` in BibleMate AI prompt to enable agent mode and disable chat mode.
96
96
 
97
- ## Action Menu
97
+ ## Manual Tool Selection
98
+
99
+ In some cases, you may want to specify a particular tool for a simple task, rather than having a tool automatically selected in the fully automatic `agent mode`.
100
+
101
+ You can specify a single tool by prefixing a tool name with `@` at the beginning of your prompt. For example,
102
+
103
+ ```
104
+ @retrieve_bible_cross_references Deut 6:4; John 3:16
105
+ ```
106
+
107
+ Watch this video: https://youtu.be/50m1KRj6uhs
98
108
 
99
- *(Coming soon)*
109
+ ## Custom Master Plan with Multiple Tools
100
110
 
101
- ## Keyboard Shortcut
111
+ In some cases, you may want to specify a `custom plan` with multiple tools specified for different steps for a complex task, rather than having a `master plan` automatically generated in fully automatic agent mode.
102
112
 
103
- *(Coming soon)*
113
+ You can use a custom 'Master Plan' of your own, instead of one generated by BibleMate AI. To do this, start your BibleMate AI prompt with '@@' followed by your own master plan for a Bible study. For example,
104
114
 
105
- ## Customization
115
+ ```
116
+ @@ Analyze John 3:16 with the following steps:
117
+ 1. Call tool 'retrieve_english_bible_verses' for Bible text,
118
+ 2. Call tool 'retrieve_bible_cross_references' for Bible cross-references,
119
+ 3. Call tool 'interpret_new_testament_verse' for interpretation, and
120
+ 4. Call tool 'write_bible_theology' to explain its theology.
121
+ ```
122
+
123
+ Watch this video: https://youtu.be/Lejq0sAx030
124
+
125
+ The '@@' trick works even when you are using 'chat' mode with 'agent' mode disabled.
126
+
127
+ ## Action Menu
106
128
 
107
- *(Coming soon)*
129
+ There is a set of predefined entries, that starts with a dot sign `.`:
130
+
131
+ - `.new` - new conversation
132
+ - `.quit` - quit
133
+ - `.backend` - change backend
134
+ - `.chat` - enable chat mode
135
+ - `.agent` - enable agent mode
136
+ - `.tools` - list available tools
137
+ - `.backup` - backup conversation
138
+ - `.open` - open a file or directory, e.g. `.open /home/user/report.html`
139
+
140
+ ## Keyboard Shortcuts
141
+
142
+ The following key bindings are supported in BibleMate AI prompt field:
143
+
144
+ - `Ctrl+N` new conversation
145
+ - `Ctrl+Q` quit
146
+ - `Ctrl+C` copy selected prompt text
147
+ - `Ctrl+V` paste text in a prompt
148
+ - `Ctrl+I` or `TAB` new line
149
+ - `Ctrl+Z` clear prompt text
150
+ - `Esc+a` jump to the beginning of a prompt
151
+ - `Esc+z` jump to the end of a prompt
152
+ - `Esc+b` or `HOME` jump to the beginning of a line in a prompt
153
+ - `Esc+e` or `END` jump to the end of a line in a prompt
108
154
 
109
155
  ## License
110
156
 
@@ -1,15 +1,15 @@
1
- biblemate/README.md,sha256=JKyibK8Hdu5yAZrUkIH164W7_0SOKLmChHAYzDQQtFU,4104
1
+ biblemate/README.md,sha256=0xKiXqwB-WBxHCKyUByD-2Yre0zgflTDun-wYc9o-J0,6173
2
2
  biblemate/__init__.py,sha256=aFO4_EperOrwwDBdrkTKfDMt2Fh18Y0A2G_nUC_cmmM,78
3
3
  biblemate/bible_study_mcp.py,sha256=lW4pFlfO-98w1yazOIgr2KyWccPFmrNmIReQUEo3--g,16575
4
4
  biblemate/config.py,sha256=ktpLv_5qdbf3FErxUIdCvVc9MO6kQH4Zt_omoJ7msIs,15
5
- biblemate/main.py,sha256=Y8vBYRWhAQS5kznDaVFBPnCwLdrAG6DhHtPGfG_5TeA,19568
5
+ biblemate/main.py,sha256=l1auGP_zCkmmTIsGqxh-dArj0lWliLYnqxhHafp9X-w,22258
6
6
  biblemate/package_name.txt,sha256=WkkuEEkgw7EKpXV8GshpzhZlwRor1wpotTS7vP24b_g,9
7
- biblemate/requirements.txt,sha256=3U9APDHETMqgh55M1XXgE7rJP-9v4TuW2bSygYFqDfM,70
7
+ biblemate/requirements.txt,sha256=MliJX2PmogiVmgqHk4W0TMqRp2FLYXcGIkf8PS2RV94,70
8
8
  biblemate/core/systems.py,sha256=nG_NgcLSRhdaHuxuCPN5ZfJUhP88kdfwhRCvRk4RLjI,1874
9
9
  biblemate/ui/info.py,sha256=QRCno0CYUHVoOtVkZIxVamZONmtI7KRmOT2YoUagY5s,811
10
10
  biblemate/ui/prompts.py,sha256=mxdC5BU7NMok9MOm1E39MHSrxB9gSRqGY7HsOc--rRg,3484
11
- biblemate-0.0.20.dist-info/METADATA,sha256=76ZmLIW4Q56Ykt0pEPDW1XUknIyOyLF315UFHnIMFHA,5599
12
- biblemate-0.0.20.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
13
- biblemate-0.0.20.dist-info/entry_points.txt,sha256=tbEfTFr6LhPR1E_zP3CsPwJsmG-G4MCnJ3FcQEMiqo0,50
14
- biblemate-0.0.20.dist-info/top_level.txt,sha256=pq9uX0tAS0bizZcZ5GW5zIoDLQBa-b5QDlDGsdHNgiU,10
15
- biblemate-0.0.20.dist-info/RECORD,,
11
+ biblemate-0.0.22.dist-info/METADATA,sha256=_IYAhDnfecbFVrSXVd0-0WEGfs-1cPyRbF_SsKcPuvg,7668
12
+ biblemate-0.0.22.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
13
+ biblemate-0.0.22.dist-info/entry_points.txt,sha256=tbEfTFr6LhPR1E_zP3CsPwJsmG-G4MCnJ3FcQEMiqo0,50
14
+ biblemate-0.0.22.dist-info/top_level.txt,sha256=pq9uX0tAS0bizZcZ5GW5zIoDLQBa-b5QDlDGsdHNgiU,10
15
+ biblemate-0.0.22.dist-info/RECORD,,