chatterer 0.1.8__py3-none-any.whl → 0.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
chatterer/__init__.py CHANGED
@@ -2,6 +2,7 @@ from .language_model import Chatterer
2
2
  from .messages import (
3
3
  AIMessage,
4
4
  BaseMessage,
5
+ FunctionMessage,
5
6
  HumanMessage,
6
7
  SystemMessage,
7
8
  )
@@ -16,9 +17,17 @@ from .tools import (
16
17
  citation_chunker,
17
18
  get_default_html_to_markdown_options,
18
19
  html_to_markdown,
20
+ init_webpage_to_markdown,
19
21
  pdf_to_text,
20
22
  pyscripts_to_snippets,
21
23
  )
24
+ from .utils import (
25
+ Base64Image,
26
+ CodeExecutionResult,
27
+ FunctionSignature,
28
+ get_default_repl_tool,
29
+ insert_callables_into_global,
30
+ )
22
31
 
23
32
  __all__ = [
24
33
  "BaseStrategy",
@@ -36,4 +45,11 @@ __all__ = [
36
45
  "HumanMessage",
37
46
  "SystemMessage",
38
47
  "AIMessage",
48
+ "FunctionMessage",
49
+ "Base64Image",
50
+ "init_webpage_to_markdown",
51
+ "FunctionSignature",
52
+ "CodeExecutionResult",
53
+ "get_default_repl_tool",
54
+ "insert_callables_into_global",
39
55
  ]
@@ -2,9 +2,12 @@ from typing import (
2
2
  TYPE_CHECKING,
3
3
  Any,
4
4
  AsyncIterator,
5
+ Callable,
6
+ Iterable,
5
7
  Iterator,
6
8
  Optional,
7
9
  Self,
10
+ Sequence,
8
11
  Type,
9
12
  TypeAlias,
10
13
  TypeVar,
@@ -18,15 +21,39 @@ from langchain_core.runnables.base import Runnable
18
21
  from langchain_core.runnables.config import RunnableConfig
19
22
  from pydantic import BaseModel, Field
20
23
 
21
- from .messages import AIMessage, BaseMessage, HumanMessage
24
+ from .messages import AIMessage, BaseMessage, HumanMessage, SystemMessage
25
+ from .utils.code_agent import CodeExecutionResult, FunctionSignature
22
26
 
23
27
  if TYPE_CHECKING:
24
28
  from instructor import Partial
29
+ from langchain_experimental.tools.python.tool import PythonAstREPLTool
25
30
 
26
31
  PydanticModelT = TypeVar("PydanticModelT", bound=BaseModel)
27
32
  StructuredOutputType: TypeAlias = dict[object, object] | BaseModel
28
33
 
29
- DEFAULT_IMAGE_DESCRIPTION_INSTRUCTION = "Just describe all the details you see in the image in few sentences."
34
+ DEFAULT_IMAGE_DESCRIPTION_INSTRUCTION = "Provide a detailed description of all visible elements in the image, summarizing key details in a few clear sentences."
35
+ DEFAULT_CODE_GENERATION_PROMPT = (
36
+ "You are utilizing a Python code execution tool now.\n"
37
+ "Your goal is to generate Python code that solves the task efficiently and appends both the code and its output to your context memory.\n"
38
+ "Since your context window is highly limited, type `pass` if no code execution is needed.\n"
39
+ "\n"
40
+ "To optimize tool efficiency, follow these guidelines:\n"
41
+ "- Write concise, efficient code that directly serves the intended purpose.\n"
42
+ "- Avoid unnecessary operations (e.g., excessive loops, recursion, or heavy computations).\n"
43
+ "- Handle potential errors gracefully (e.g., using try-except blocks).\n"
44
+ "- Prevent excessive output by limiting print statements to essential information only (e.g., avoid printing large datasets).\n"
45
+ "\n"
46
+ "Return your response strictly in the following JSON format:\n"
47
+ '{\n "code": "<your_python_code_here>"\n}\n\n'
48
+ )
49
+
50
+
51
+ DEFAULT_FUNCTION_REFERENCE_PREFIX_PROMPT = (
52
+ "Below functions are included in global scope and can be used in your code.\n"
53
+ "Do not try to redefine the function(s).\n"
54
+ "You don't have to force yourself to use these tools - use them only when you need to.\n"
55
+ )
56
+ DEFAULT_FUNCTION_REFERENCE_SEPARATOR = "\n---\n" # Separator to distinguish different function references
30
57
 
31
58
 
32
59
  class Chatterer(BaseModel):
@@ -288,6 +315,82 @@ class Chatterer(BaseModel):
288
315
  except Exception:
289
316
  return None
290
317
 
318
+ def invoke_code_execution(
319
+ self,
320
+ messages: LanguageModelInput,
321
+ repl_tool: Optional["PythonAstREPLTool"] = None,
322
+ prompt_for_code_invoke: Optional[str] = DEFAULT_CODE_GENERATION_PROMPT,
323
+ additional_callables: Optional[Callable[..., object] | Sequence[Callable[..., object]]] = None,
324
+ function_reference_prefix: Optional[str] = DEFAULT_FUNCTION_REFERENCE_PREFIX_PROMPT,
325
+ function_reference_seperator: str = DEFAULT_FUNCTION_REFERENCE_SEPARATOR,
326
+ config: Optional[RunnableConfig] = None,
327
+ stop: Optional[list[str]] = None,
328
+ **kwargs: Any,
329
+ ) -> CodeExecutionResult:
330
+ function_signatures: Optional[list[FunctionSignature]] = None
331
+ if additional_callables:
332
+ if not isinstance(additional_callables, Iterable):
333
+ additional_callables = (additional_callables,)
334
+ function_signatures = FunctionSignature.from_callables(additional_callables)
335
+ messages = _add_message_last(
336
+ messages=messages,
337
+ prompt_to_add=FunctionSignature.as_prompt(
338
+ function_signatures, function_reference_prefix, function_reference_seperator
339
+ ),
340
+ )
341
+ if prompt_for_code_invoke:
342
+ messages = _add_message_last(messages=messages, prompt_to_add=prompt_for_code_invoke)
343
+ code_obj: PythonCodeToExecute = self.generate_pydantic(
344
+ response_model=PythonCodeToExecute, messages=messages, config=config, stop=stop, **kwargs
345
+ )
346
+ return CodeExecutionResult.from_code(
347
+ code=code_obj.code,
348
+ config=config,
349
+ repl_tool=repl_tool,
350
+ function_signatures=function_signatures,
351
+ **kwargs,
352
+ )
353
+
354
+ async def ainvoke_code_execution(
355
+ self,
356
+ messages: LanguageModelInput,
357
+ repl_tool: Optional["PythonAstREPLTool"] = None,
358
+ prompt_for_code_invoke: Optional[str] = DEFAULT_CODE_GENERATION_PROMPT,
359
+ additional_callables: Optional[Callable[..., object] | Sequence[Callable[..., object]]] = None,
360
+ function_reference_prefix: Optional[str] = DEFAULT_FUNCTION_REFERENCE_PREFIX_PROMPT,
361
+ function_reference_seperator: str = DEFAULT_FUNCTION_REFERENCE_SEPARATOR,
362
+ config: Optional[RunnableConfig] = None,
363
+ stop: Optional[list[str]] = None,
364
+ **kwargs: Any,
365
+ ) -> CodeExecutionResult:
366
+ function_signatures: Optional[list[FunctionSignature]] = None
367
+ if additional_callables:
368
+ if not isinstance(additional_callables, Iterable):
369
+ additional_callables = (additional_callables,)
370
+ function_signatures = FunctionSignature.from_callables(additional_callables)
371
+ messages = _add_message_last(
372
+ messages=messages,
373
+ prompt_to_add=FunctionSignature.as_prompt(
374
+ function_signatures, function_reference_prefix, function_reference_seperator
375
+ ),
376
+ )
377
+ if prompt_for_code_invoke:
378
+ messages = _add_message_last(messages=messages, prompt_to_add=prompt_for_code_invoke)
379
+ code_obj: PythonCodeToExecute = await self.agenerate_pydantic(
380
+ response_model=PythonCodeToExecute, messages=messages, config=config, stop=stop, **kwargs
381
+ )
382
+ return await CodeExecutionResult.afrom_code(
383
+ code=code_obj.code,
384
+ config=config,
385
+ repl_tool=repl_tool,
386
+ function_signatures=function_signatures,
387
+ **kwargs,
388
+ )
389
+
390
+
391
+ class PythonCodeToExecute(BaseModel):
392
+ code: str = Field(description="Python code to execute")
393
+
291
394
 
292
395
  def with_structured_output(
293
396
  client: BaseChatModel,
@@ -297,75 +400,93 @@ def with_structured_output(
297
400
  return client.with_structured_output(schema=response_model, **structured_output_kwargs) # pyright: ignore[reportUnknownVariableType, reportUnknownMemberType]
298
401
 
299
402
 
300
- if __name__ == "__main__":
301
- import asyncio
302
-
303
- # 테스트용 Pydantic 모델 정의
304
- class Propositions(BaseModel):
305
- proposition_topic: str
306
- proposition_content: str
307
-
308
- chatterer = Chatterer.openai()
309
- prompt = "What is the meaning of life?"
310
-
311
- # === Synchronous Tests ===
312
-
313
- # generate
314
- print("=== Synchronous generate ===")
315
- result_sync = chatterer(prompt)
316
- print("Result (generate):", result_sync)
317
-
318
- # generate_stream
319
- print("\n=== Synchronous generate_stream ===")
320
- for i, chunk in enumerate(chatterer.generate_stream(prompt)):
321
- print(f"Chunk {i}:", chunk)
322
-
323
- # generate_pydantic
324
- print("\n=== Synchronous generate_pydantic ===")
325
- result_pydantic = chatterer(prompt, Propositions)
326
- print("Result (generate_pydantic):", result_pydantic)
327
-
328
- # generate_pydantic_stream
329
- print("\n=== Synchronous generate_pydantic_stream ===")
330
- for i, chunk in enumerate(chatterer.generate_pydantic_stream(Propositions, prompt)):
331
- print(f"Pydantic Chunk {i}:", chunk)
332
-
333
- # === Asynchronous Tests ===
334
-
335
- # Async helper function to enumerate async iterator
336
- async def async_enumerate(aiter: AsyncIterator[Any], start: int = 0) -> AsyncIterator[tuple[int, Any]]:
337
- i = start
338
- async for item in aiter:
339
- yield i, item
340
- i += 1
341
-
342
- async def run_async_tests():
343
- # 6. agenerate
344
- print("\n=== Asynchronous agenerate ===")
345
- result_async = await chatterer.agenerate(prompt)
346
- print("Result (agenerate):", result_async)
347
-
348
- # 7. agenerate_stream
349
- print("\n=== Asynchronous agenerate_stream ===")
350
- async for i, chunk in async_enumerate(chatterer.agenerate_stream(prompt)):
351
- print(f"Async Chunk {i}:", chunk)
352
-
353
- # 8. agenerate_pydantic
354
- print("\n=== Asynchronous agenerate_pydantic ===")
355
- try:
356
- result_async_pydantic = await chatterer.agenerate_pydantic(Propositions, prompt)
357
- print("Result (agenerate_pydantic):", result_async_pydantic)
358
- except Exception as e:
359
- print("Error in agenerate_pydantic:", e)
403
+ def _add_message_last(messages: LanguageModelInput, prompt_to_add: str) -> LanguageModelInput:
404
+ if isinstance(messages, str):
405
+ messages += f"\n{prompt_to_add}"
406
+ elif isinstance(messages, Sequence):
407
+ messages = list(messages)
408
+ messages.append(SystemMessage(content=prompt_to_add))
409
+ else:
410
+ messages = messages.to_messages()
411
+ messages.append(SystemMessage(content=prompt_to_add))
412
+ return messages
360
413
 
361
- # 9. agenerate_pydantic_stream
362
- print("\n=== Asynchronous agenerate_pydantic_stream ===")
363
- try:
364
- i = 0
365
- async for chunk in chatterer.agenerate_pydantic_stream(Propositions, prompt):
366
- print(f"Async Pydantic Chunk {i}:", chunk)
367
- i += 1
368
- except Exception as e:
369
- print("Error in agenerate_pydantic_stream:", e)
370
-
371
- asyncio.run(run_async_tests())
414
+
415
+ # def _add_message_first(messages: LanguageModelInput, prompt_to_add: str) -> LanguageModelInput:
416
+ # if isinstance(messages, str):
417
+ # messages = f"{prompt_to_add}\n{messages}"
418
+ # elif isinstance(messages, Sequence):
419
+ # messages = list(messages)
420
+ # messages.insert(0, SystemMessage(content=prompt_to_add))
421
+ # else:
422
+ # messages = messages.to_messages()
423
+ # messages.insert(0, SystemMessage(content=prompt_to_add))
424
+ # return messages
425
+
426
+
427
+ def chatbot_example(chatterer: Chatterer = Chatterer.openai()) -> None:
428
+ # Define the CodeExecutionDecision class using Pydantic
429
+
430
+ from rich.console import Console
431
+ from rich.prompt import Prompt
432
+
433
+ class CodeExecutionDecision(BaseModel):
434
+ is_code_execution_needed: bool = Field(
435
+ description="Whether Python tool calling is needed to answer user query."
436
+ )
437
+
438
+ # Initialize Rich console
439
+ console = Console()
440
+
441
+ # Initialize conversation context
442
+ context: list[BaseMessage] = [SystemMessage("You are an AI that can answer questions and execute Python code.")]
443
+
444
+ # Display welcome message
445
+ console.print("[bold blue]Welcome to the Rich-based chatbot![/bold blue]")
446
+ console.print("Type 'quit' or 'exit' to end the conversation.")
447
+
448
+ while True:
449
+ # Get user input
450
+ user_input = Prompt.ask("[bold green]You[/bold green]")
451
+ if user_input.lower() in ["quit", "exit"]:
452
+ console.print("[bold blue]Goodbye![/bold blue]")
453
+ break
454
+
455
+ # Add user message to context
456
+ context.append(HumanMessage(content=user_input))
457
+
458
+ # Determine if code execution is needed
459
+ decision = chatterer.generate_pydantic(
460
+ response_model=CodeExecutionDecision, # Use response_model instead of pydantic_model
461
+ messages=context,
462
+ )
463
+
464
+ if decision.is_code_execution_needed:
465
+ # Execute code if needed
466
+ code_result = chatterer.invoke_code_execution(messages=context)
467
+ if code_result.code.strip() == "pass":
468
+ new_message = None
469
+ else:
470
+ new_message = SystemMessage(
471
+ content=f"Executed code:\n```python\n{code_result.code}\n```\nOutput:\n{code_result.output}"
472
+ )
473
+ console.print("[bold yellow]Executed code:[/bold yellow]")
474
+ console.print(f"[code]{code_result.code}[/code]")
475
+ console.print("[bold yellow]Output:[/bold yellow]")
476
+ console.print(code_result.output)
477
+ else:
478
+ # No code execution required
479
+ new_message = None
480
+
481
+ # Add system message to context
482
+ if new_message:
483
+ context.append(new_message)
484
+
485
+ # Generate and display chatbot response
486
+ response = chatterer.generate(messages=context) # Use generate instead of generate_response
487
+ context.append(AIMessage(content=response))
488
+ console.print(f"[bold blue]Chatbot:[/bold blue] {response}")
489
+
490
+
491
+ if __name__ == "__main__":
492
+ chatbot_example()
chatterer/messages.py CHANGED
@@ -1,8 +1,9 @@
1
- from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage
1
+ from langchain_core.messages import AIMessage, BaseMessage, FunctionMessage, HumanMessage, SystemMessage
2
2
 
3
3
  __all__ = [
4
4
  "AIMessage",
5
5
  "BaseMessage",
6
6
  "HumanMessage",
7
7
  "SystemMessage",
8
+ "FunctionMessage",
8
9
  ]
@@ -7,6 +7,13 @@ from .convert_to_text import (
7
7
  pyscripts_to_snippets,
8
8
  )
9
9
 
10
+
11
+ def init_webpage_to_markdown():
12
+ from . import webpage_to_markdown
13
+
14
+ return webpage_to_markdown
15
+
16
+
10
17
  __all__ = [
11
18
  "html_to_markdown",
12
19
  "anything_to_markdown",
@@ -14,4 +21,5 @@ __all__ = [
14
21
  "get_default_html_to_markdown_options",
15
22
  "pyscripts_to_snippets",
16
23
  "citation_chunker",
24
+ "init_webpage_to_markdown",
17
25
  ]
@@ -0,0 +1,15 @@
1
+ from .code_agent import (
2
+ CodeExecutionResult,
3
+ FunctionSignature,
4
+ get_default_repl_tool,
5
+ insert_callables_into_global,
6
+ )
7
+ from .image import Base64Image
8
+
9
+ __all__ = [
10
+ "Base64Image",
11
+ "FunctionSignature",
12
+ "CodeExecutionResult",
13
+ "get_default_repl_tool",
14
+ "insert_callables_into_global",
15
+ ]
@@ -0,0 +1,134 @@
1
+ import inspect
2
+ import textwrap
3
+ from typing import (
4
+ TYPE_CHECKING,
5
+ Callable,
6
+ Iterable,
7
+ NamedTuple,
8
+ Optional,
9
+ Self,
10
+ )
11
+
12
+ from langchain_core.runnables.config import RunnableConfig
13
+
14
+ if TYPE_CHECKING:
15
+ from langchain_experimental.tools import PythonAstREPLTool
16
+
17
+
18
+ class FunctionSignature(NamedTuple):
19
+ name: str
20
+ callable: Callable[..., object]
21
+ signature: str
22
+
23
+ @classmethod
24
+ def from_callable(cls, callable: Callable[..., object]) -> Self:
25
+ """
26
+ Get the name and signature of a function as a string.
27
+ """
28
+ # Determine if the function is async
29
+ is_async_func = inspect.iscoroutinefunction(callable)
30
+ function_def = "async def" if is_async_func else "def"
31
+
32
+ # Determine the function name based on the type of callable
33
+ if inspect.isfunction(callable):
34
+ # For regular Python functions, use __code__.co_name
35
+ function_name = callable.__code__.co_name
36
+ elif hasattr(callable, "name"):
37
+ # For StructuredTool or similar objects with a 'name' attribute
38
+ function_name = callable.name # type: ignore
39
+ elif hasattr(callable, "__name__"):
40
+ # For other callables with a __name__ attribute
41
+ function_name = callable.__name__
42
+ else:
43
+ # Fallback to the class name if no name is found
44
+ function_name = type(callable).__name__
45
+
46
+ # Build the signature string
47
+ signature = f"{function_def} {function_name}{inspect.signature(callable)}:"
48
+ docstring = inspect.getdoc(callable)
49
+ if docstring:
50
+ docstring = f'"""{docstring.strip()}"""'
51
+ return cls(
52
+ name=function_name, callable=callable, signature=f"{signature}\n{textwrap.indent(docstring, ' ')}"
53
+ )
54
+ else:
55
+ return cls(name=function_name, callable=callable, signature=signature)
56
+
57
+ @classmethod
58
+ def from_callables(cls, callables: Iterable[Callable[..., object]]) -> list[Self]:
59
+ return [cls.from_callable(callable) for callable in callables]
60
+
61
+ @classmethod
62
+ def as_prompt(
63
+ cls,
64
+ callables: Iterable[Self],
65
+ prefix: Optional[str] = "You can use the pre-made functions below without defining them:\n",
66
+ sep: str = "\n---\n",
67
+ ) -> str:
68
+ """
69
+ Generate a prompt string from a list of callables.
70
+ """
71
+ body: str = sep.join(fsig.signature for fsig in callables)
72
+ if prefix:
73
+ return f"{prefix}{body}"
74
+ return body
75
+
76
+
77
+ class CodeExecutionResult(NamedTuple):
78
+ code: str
79
+ output: str
80
+
81
+ @classmethod
82
+ def from_code(
83
+ cls,
84
+ code: str,
85
+ repl_tool: Optional["PythonAstREPLTool"] = None,
86
+ config: Optional[RunnableConfig] = None,
87
+ function_signatures: Optional[Iterable[FunctionSignature]] = None,
88
+ **kwargs: object,
89
+ ) -> Self:
90
+ """
91
+ Execute code using the Python Code Execution Language Model.
92
+ """
93
+ if repl_tool is None:
94
+ repl_tool = get_default_repl_tool()
95
+ if function_signatures is not None:
96
+ insert_callables_into_global(function_signatures=function_signatures, repl_tool=repl_tool)
97
+ output = str(repl_tool.invoke(code, config=config, **kwargs)) # pyright: ignore[reportUnknownMemberType]
98
+ return cls(code=code, output=output)
99
+
100
+ @classmethod
101
+ async def afrom_code(
102
+ cls,
103
+ code: str,
104
+ repl_tool: Optional["PythonAstREPLTool"] = None,
105
+ config: Optional[RunnableConfig] = None,
106
+ function_signatures: Optional[Iterable[FunctionSignature]] = None,
107
+ **kwargs: object,
108
+ ) -> Self:
109
+ """
110
+ Execute code using the Python Code Execution Language Model asynchronously.
111
+ """
112
+ if repl_tool is None:
113
+ repl_tool = get_default_repl_tool()
114
+ if function_signatures is not None:
115
+ insert_callables_into_global(function_signatures=function_signatures, repl_tool=repl_tool)
116
+ output = str(await repl_tool.ainvoke(code, config=config, **kwargs)) # pyright: ignore[reportUnknownMemberType]
117
+ return cls(code=code, output=output)
118
+
119
+
120
+ def get_default_repl_tool() -> "PythonAstREPLTool":
121
+ from langchain_experimental.tools import PythonAstREPLTool
122
+
123
+ return PythonAstREPLTool()
124
+
125
+
126
+ def insert_callables_into_global(
127
+ function_signatures: Iterable[FunctionSignature], repl_tool: "PythonAstREPLTool"
128
+ ) -> None:
129
+ """Insert callables into the REPL tool's globals."""
130
+ repl_globals: Optional[dict[str, object]] = repl_tool.globals # pyright: ignore[reportUnknownMemberType]
131
+ if repl_globals is None:
132
+ repl_tool.globals = {fsig.name: fsig.callable for fsig in function_signatures}
133
+ else:
134
+ repl_globals.update({fsig.name: fsig.callable for fsig in function_signatures})
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: chatterer
3
- Version: 0.1.8
3
+ Version: 0.1.9
4
4
  Summary: The highest-level interface for various LLM APIs.
5
5
  Requires-Python: >=3.12
6
6
  Description-Content-Type: text/markdown
@@ -18,13 +18,16 @@ Requires-Dist: pillow>=11.1.0; extra == "conversion"
18
18
  Requires-Dist: mistune>=3.1.2; extra == "conversion"
19
19
  Requires-Dist: markitdown>=0.0.2; extra == "conversion"
20
20
  Requires-Dist: pymupdf>=1.25.4; extra == "conversion"
21
+ Provides-Extra: langchain
22
+ Requires-Dist: chatterer[langchain-providers]; extra == "langchain"
23
+ Requires-Dist: langchain-experimental>=0.3.4; extra == "langchain"
21
24
  Provides-Extra: langchain-providers
22
25
  Requires-Dist: langchain-openai>=0.3.7; extra == "langchain-providers"
23
26
  Requires-Dist: langchain-anthropic>=0.3.8; extra == "langchain-providers"
24
27
  Requires-Dist: langchain-google-genai>=2.0.10; extra == "langchain-providers"
25
28
  Requires-Dist: langchain-ollama>=0.2.3; extra == "langchain-providers"
26
29
  Provides-Extra: all
27
- Requires-Dist: chatterer[langchain-providers]; extra == "all"
30
+ Requires-Dist: chatterer[langchain]; extra == "all"
28
31
  Requires-Dist: chatterer[conversion]; extra == "all"
29
32
  Requires-Dist: chatterer[dev]; extra == "all"
30
33
 
@@ -1,11 +1,11 @@
1
- chatterer/__init__.py,sha256=kl8VWiDJIt5IQjaBpQu13n0GrzP3qzaNXyA68B1xHTE,802
2
- chatterer/language_model.py,sha256=S8x2IbzZBi1mAKSKrGuoB4-gfKBz73RCNXt_H-fiDzc,13826
3
- chatterer/messages.py,sha256=-NyOIK7wJI1uVD8qaJPeLA0LqirFEsZ1mOYoO1F2wLc,188
1
+ chatterer/__init__.py,sha256=QqW6ITeJ7Qpt42BvVzTgI8M6vKRYIlwSDa0nMO4NsV0,1209
2
+ chatterer/language_model.py,sha256=J7_iLtfjr-0tNTejrY4_vLiEWGTnRGGUb_x9G2CZ-Vg,20083
3
+ chatterer/messages.py,sha256=OtbZ3two0LUQ4PXES97FDIBUSO3IcMHdFV1VFkDL2mI,229
4
4
  chatterer/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
5
  chatterer/strategies/__init__.py,sha256=SdOggbmHpw4f7Njwy-T8q64e91OLOUp1k0a0ozZd4qI,221
6
6
  chatterer/strategies/atom_of_thoughts.py,sha256=CygOCLu5vLk-fzY9O-iE3qLShfjD7iY40ks9jH4ULBM,40872
7
7
  chatterer/strategies/base.py,sha256=b2gMPqodp97OP1dkHfj0UqixjdjVhmTw_V5qJ7i2S6g,427
8
- chatterer/tools/__init__.py,sha256=yA4RcHIAO33xsmWXQTmtSm9bk1p80yJKSadtMa3X-aY,415
8
+ chatterer/tools/__init__.py,sha256=XomZMXHKhMyLprQkCfAKetI_uueHH184xWESjTKJeeA,560
9
9
  chatterer/tools/convert_to_text.py,sha256=kBqxCJ0IoiAw2eiPYqep_SPZm-TtYKF7mdACLsWQUuI,15915
10
10
  chatterer/tools/citation_chunking/__init__.py,sha256=gG7Fnkkp28UpcWMbfMY_4gqzZSZ8QzlhalHBoeoq7K0,82
11
11
  chatterer/tools/citation_chunking/chunks.py,sha256=50Dpa43RaYftlNox8tM1qI8htZ3_AJ9Uyyn02WsmxYk,2173
@@ -17,8 +17,10 @@ chatterer/tools/citation_chunking/utils.py,sha256=M4pH2-UIE1VLzQLXDqjEe4L3Xcy0e0
17
17
  chatterer/tools/webpage_to_markdown/__init__.py,sha256=bHH4qfnXyw8Zz-yBPLaTezF1sh9njvNBJmhBVtcpjsA,123
18
18
  chatterer/tools/webpage_to_markdown/playwright_bot.py,sha256=yP0KixYZNQ4Kn_ZCFDI3mVyBD_DpUGfqgklpaGJUTCU,27496
19
19
  chatterer/tools/webpage_to_markdown/utils.py,sha256=ZLUU94imYciEdynD2K7Dmcsbt8BVQTaOP56Ba6DAFvk,12593
20
+ chatterer/utils/__init__.py,sha256=8nzpFJKU_wSRPH6LBP6HRBotPMrSl_VO9UlmFprTrK0,334
21
+ chatterer/utils/code_agent.py,sha256=phMMXHeZNkzHrySX72y50IW3-o2MOSriPV9IUPQd4nU,4973
20
22
  chatterer/utils/image.py,sha256=F3_D1677UDFlgp-UQBS_ChkNODzf_VOfjYNSUi02MaI,10852
21
- chatterer-0.1.8.dist-info/METADATA,sha256=01CGNp0oae5VdHM5gzqPKYFtlSqufE0h5XFMdn2E_6c,4234
22
- chatterer-0.1.8.dist-info/WHEEL,sha256=1tXe9gY0PYatrMPMDd6jXqjfpz_B-Wqm32CPfRC58XU,91
23
- chatterer-0.1.8.dist-info/top_level.txt,sha256=7nSQKP0bHxPRc7HyzdbKsJdkvPgYD0214o6slRizv9s,10
24
- chatterer-0.1.8.dist-info/RECORD,,
23
+ chatterer-0.1.9.dist-info/METADATA,sha256=GbjuJgbQJ09TEYt7lvI96Od5saTZ_y0KDzmybsW-0H0,4388
24
+ chatterer-0.1.9.dist-info/WHEEL,sha256=1tXe9gY0PYatrMPMDd6jXqjfpz_B-Wqm32CPfRC58XU,91
25
+ chatterer-0.1.9.dist-info/top_level.txt,sha256=7nSQKP0bHxPRc7HyzdbKsJdkvPgYD0214o6slRizv9s,10
26
+ chatterer-0.1.9.dist-info/RECORD,,