webscout 7.1__py3-none-any.whl → 7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (144) hide show
  1. webscout/AIauto.py +191 -191
  2. webscout/AIbase.py +122 -122
  3. webscout/AIutel.py +440 -440
  4. webscout/Bard.py +343 -161
  5. webscout/DWEBS.py +489 -492
  6. webscout/Extra/YTToolkit/YTdownloader.py +995 -995
  7. webscout/Extra/YTToolkit/__init__.py +2 -2
  8. webscout/Extra/YTToolkit/transcriber.py +476 -479
  9. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  10. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  11. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  12. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  13. webscout/Extra/YTToolkit/ytapi/video.py +103 -103
  14. webscout/Extra/autocoder/__init__.py +9 -9
  15. webscout/Extra/autocoder/autocoder_utiles.py +199 -199
  16. webscout/Extra/autocoder/rawdog.py +5 -7
  17. webscout/Extra/autollama.py +230 -230
  18. webscout/Extra/gguf.py +3 -3
  19. webscout/Extra/weather.py +171 -171
  20. webscout/LLM.py +442 -442
  21. webscout/Litlogger/__init__.py +67 -681
  22. webscout/Litlogger/core/__init__.py +6 -0
  23. webscout/Litlogger/core/level.py +20 -0
  24. webscout/Litlogger/core/logger.py +123 -0
  25. webscout/Litlogger/handlers/__init__.py +12 -0
  26. webscout/Litlogger/handlers/console.py +50 -0
  27. webscout/Litlogger/handlers/file.py +143 -0
  28. webscout/Litlogger/handlers/network.py +174 -0
  29. webscout/Litlogger/styles/__init__.py +7 -0
  30. webscout/Litlogger/styles/colors.py +231 -0
  31. webscout/Litlogger/styles/formats.py +377 -0
  32. webscout/Litlogger/styles/text.py +87 -0
  33. webscout/Litlogger/utils/__init__.py +6 -0
  34. webscout/Litlogger/utils/detectors.py +154 -0
  35. webscout/Litlogger/utils/formatters.py +200 -0
  36. webscout/Provider/AISEARCH/DeepFind.py +250 -250
  37. webscout/Provider/Blackboxai.py +3 -3
  38. webscout/Provider/ChatGPTGratis.py +226 -0
  39. webscout/Provider/Cloudflare.py +3 -4
  40. webscout/Provider/DeepSeek.py +218 -0
  41. webscout/Provider/Deepinfra.py +3 -3
  42. webscout/Provider/Free2GPT.py +131 -124
  43. webscout/Provider/Gemini.py +100 -115
  44. webscout/Provider/Glider.py +3 -3
  45. webscout/Provider/Groq.py +5 -1
  46. webscout/Provider/Jadve.py +3 -3
  47. webscout/Provider/Marcus.py +191 -192
  48. webscout/Provider/Netwrck.py +3 -3
  49. webscout/Provider/PI.py +2 -2
  50. webscout/Provider/PizzaGPT.py +2 -3
  51. webscout/Provider/QwenLM.py +311 -0
  52. webscout/Provider/TTI/AiForce/__init__.py +22 -22
  53. webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
  54. webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
  55. webscout/Provider/TTI/Nexra/__init__.py +22 -22
  56. webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
  57. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
  58. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
  59. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
  60. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
  61. webscout/Provider/TTI/artbit/__init__.py +22 -22
  62. webscout/Provider/TTI/artbit/async_artbit.py +184 -184
  63. webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
  64. webscout/Provider/TTI/blackbox/__init__.py +4 -4
  65. webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
  66. webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
  67. webscout/Provider/TTI/deepinfra/__init__.py +4 -4
  68. webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
  69. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
  70. webscout/Provider/TTI/huggingface/__init__.py +22 -22
  71. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
  72. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
  73. webscout/Provider/TTI/imgninza/__init__.py +4 -4
  74. webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
  75. webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
  76. webscout/Provider/TTI/talkai/__init__.py +4 -4
  77. webscout/Provider/TTI/talkai/async_talkai.py +229 -229
  78. webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
  79. webscout/Provider/TTS/deepgram.py +182 -182
  80. webscout/Provider/TTS/elevenlabs.py +136 -136
  81. webscout/Provider/TTS/gesserit.py +150 -150
  82. webscout/Provider/TTS/murfai.py +138 -138
  83. webscout/Provider/TTS/parler.py +133 -134
  84. webscout/Provider/TTS/streamElements.py +360 -360
  85. webscout/Provider/TTS/utils.py +280 -280
  86. webscout/Provider/TTS/voicepod.py +116 -116
  87. webscout/Provider/TextPollinationsAI.py +2 -3
  88. webscout/Provider/WiseCat.py +193 -0
  89. webscout/Provider/__init__.py +144 -134
  90. webscout/Provider/cerebras.py +242 -227
  91. webscout/Provider/chatglm.py +204 -204
  92. webscout/Provider/dgaf.py +2 -3
  93. webscout/Provider/gaurish.py +2 -3
  94. webscout/Provider/geminiapi.py +208 -208
  95. webscout/Provider/granite.py +223 -0
  96. webscout/Provider/hermes.py +218 -218
  97. webscout/Provider/llama3mitril.py +179 -179
  98. webscout/Provider/llamatutor.py +3 -3
  99. webscout/Provider/llmchat.py +2 -3
  100. webscout/Provider/meta.py +794 -794
  101. webscout/Provider/multichat.py +331 -331
  102. webscout/Provider/typegpt.py +359 -359
  103. webscout/Provider/yep.py +2 -2
  104. webscout/__main__.py +5 -5
  105. webscout/cli.py +319 -319
  106. webscout/conversation.py +241 -242
  107. webscout/exceptions.py +328 -328
  108. webscout/litagent/__init__.py +28 -28
  109. webscout/litagent/agent.py +2 -3
  110. webscout/litprinter/__init__.py +0 -58
  111. webscout/scout/__init__.py +8 -8
  112. webscout/scout/core.py +884 -884
  113. webscout/scout/element.py +459 -459
  114. webscout/scout/parsers/__init__.py +69 -69
  115. webscout/scout/parsers/html5lib_parser.py +172 -172
  116. webscout/scout/parsers/html_parser.py +236 -236
  117. webscout/scout/parsers/lxml_parser.py +178 -178
  118. webscout/scout/utils.py +38 -38
  119. webscout/swiftcli/__init__.py +811 -811
  120. webscout/update_checker.py +2 -12
  121. webscout/version.py +1 -1
  122. webscout/webscout_search.py +5 -4
  123. webscout/zeroart/__init__.py +54 -54
  124. webscout/zeroart/base.py +60 -60
  125. webscout/zeroart/effects.py +99 -99
  126. webscout/zeroart/fonts.py +816 -816
  127. {webscout-7.1.dist-info → webscout-7.2.dist-info}/METADATA +4 -3
  128. webscout-7.2.dist-info/RECORD +217 -0
  129. webstoken/__init__.py +30 -30
  130. webstoken/classifier.py +189 -189
  131. webstoken/keywords.py +216 -216
  132. webstoken/language.py +128 -128
  133. webstoken/ner.py +164 -164
  134. webstoken/normalizer.py +35 -35
  135. webstoken/processor.py +77 -77
  136. webstoken/sentiment.py +206 -206
  137. webstoken/stemmer.py +73 -73
  138. webstoken/tagger.py +60 -60
  139. webstoken/tokenizer.py +158 -158
  140. webscout-7.1.dist-info/RECORD +0 -198
  141. {webscout-7.1.dist-info → webscout-7.2.dist-info}/LICENSE.md +0 -0
  142. {webscout-7.1.dist-info → webscout-7.2.dist-info}/WHEEL +0 -0
  143. {webscout-7.1.dist-info → webscout-7.2.dist-info}/entry_points.txt +0 -0
  144. {webscout-7.1.dist-info → webscout-7.2.dist-info}/top_level.txt +0 -0
webscout/conversation.py CHANGED
@@ -1,242 +1,241 @@
1
- import os
2
- import logging
3
- from typing import Optional
4
- from .Litlogger import LitLogger, LogFormat, ColorScheme
5
-
6
- # Create a logger instance for this module
7
- logger = LitLogger(
8
- name="Conversation",
9
- format=LogFormat.MODERN_EMOJI,
10
- color_scheme=ColorScheme.CYBERPUNK
11
- )
12
-
13
- class Conversation:
14
- """Handles prompt generation based on history and maintains chat context.
15
-
16
- This class is responsible for managing chat conversations, including:
17
- - Maintaining chat history
18
- - Loading/saving conversations from/to files
19
- - Generating prompts based on context
20
- - Managing token limits and history pruning
21
-
22
- Examples:
23
- >>> chat = Conversation(max_tokens=500)
24
- >>> chat.add_message("user", "Hello!")
25
- >>> chat.add_message("llm", "Hi there!")
26
- >>> prompt = chat.gen_complete_prompt("What's up?")
27
- """
28
-
29
- intro = (
30
- "You're a Large Language Model for chatting with people. "
31
- "Assume role of the LLM and give your response."
32
- )
33
-
34
- def __init__(
35
- self,
36
- status: bool = True,
37
- max_tokens: int = 600,
38
- filepath: Optional[str] = None,
39
- update_file: bool = True,
40
- ):
41
- """Initialize a new Conversation manager.
42
-
43
- Args:
44
- status (bool): Flag to control history tracking. Defaults to True.
45
- max_tokens (int): Maximum tokens for completion response. Defaults to 600.
46
- filepath (str, optional): Path to save/load conversation history. Defaults to None.
47
- update_file (bool): Whether to append new messages to file. Defaults to True.
48
-
49
- Examples:
50
- >>> chat = Conversation(max_tokens=500)
51
- >>> chat = Conversation(filepath="chat_history.txt")
52
- """
53
- self.status = status
54
- self.max_tokens_to_sample = max_tokens
55
- self.chat_history = "" # Initialize as empty string
56
- self.history_format = "\nUser : %(user)s\nLLM :%(llm)s"
57
- self.file = filepath
58
- self.update_file = update_file
59
- self.history_offset = 10250
60
- self.prompt_allowance = 10
61
-
62
- if filepath:
63
- self.load_conversation(filepath, False)
64
-
65
- def load_conversation(self, filepath: str, exists: bool = True) -> None:
66
- """Load conversation history from a text file.
67
-
68
- Args:
69
- filepath (str): Path to the history file
70
- exists (bool): Flag for file availability. Defaults to True.
71
-
72
- Raises:
73
- AssertionError: If filepath is not str or file doesn't exist
74
- """
75
- assert isinstance(
76
- filepath, str
77
- ), f"Filepath needs to be of str datatype not {type(filepath)}"
78
- assert (
79
- os.path.isfile(filepath) if exists else True
80
- ), f"File '{filepath}' does not exist"
81
-
82
- if not os.path.isfile(filepath):
83
- logging.debug(f"Creating new chat-history file - '{filepath}'")
84
- with open(filepath, "w", encoding="utf-8") as fh:
85
- fh.write(self.intro)
86
- else:
87
- logging.debug(f"Loading conversation from '{filepath}'")
88
- with open(filepath, encoding="utf-8") as fh:
89
- file_contents = fh.readlines()
90
- if file_contents:
91
- self.intro = file_contents[0] # First line is intro
92
- self.chat_history = "\n".join(file_contents[1:])
93
-
94
- def __trim_chat_history(self, chat_history: str, intro: str) -> str:
95
- """Keep the chat history fresh by trimming it when it gets too long!
96
-
97
- This method makes sure we don't exceed our token limits by:
98
- - Calculating total length (intro + history)
99
- - Trimming older messages if needed
100
- - Keeping the convo smooth and within limits
101
-
102
- Args:
103
- chat_history (str): The current chat history to trim
104
- intro (str): The conversation's intro/system prompt
105
-
106
- Returns:
107
- str: The trimmed chat history, ready to use!
108
-
109
- Examples:
110
- >>> chat = Conversation(max_tokens=500)
111
- >>> trimmed = chat._Conversation__trim_chat_history("Hello! Hi!", "Intro")
112
- """
113
- len_of_intro = len(intro)
114
- len_of_chat_history = len(chat_history)
115
- total = self.max_tokens_to_sample + len_of_intro + len_of_chat_history
116
-
117
- if total > self.history_offset:
118
- truncate_at = (total - self.history_offset) + self.prompt_allowance
119
- trimmed_chat_history = chat_history[truncate_at:]
120
- return "... " + trimmed_chat_history
121
- return chat_history
122
-
123
- def gen_complete_prompt(self, prompt: str, intro: Optional[str] = None) -> str:
124
- """Generate a complete prompt that's ready to go!
125
-
126
- This method:
127
- - Combines the intro, history, and new prompt
128
- - Trims history if needed
129
- - Keeps everything organized and flowing
130
-
131
- Args:
132
- prompt (str): Your message to add to the chat
133
- intro (str, optional): Custom intro to use. Default: None (uses class intro)
134
-
135
- Returns:
136
- str: The complete conversation prompt, ready for the LLM!
137
-
138
- Examples:
139
- >>> chat = Conversation()
140
- >>> prompt = chat.gen_complete_prompt("What's good?")
141
- """
142
- if not self.status:
143
- return prompt
144
-
145
- intro = intro or self.intro or (
146
- "You're a Large Language Model for chatting with people. "
147
- "Assume role of the LLM and give your response."
148
- )
149
-
150
- incomplete_chat_history = self.chat_history + self.history_format % {
151
- "user": prompt,
152
- "llm": ""
153
- }
154
- complete_prompt = intro + self.__trim_chat_history(incomplete_chat_history, intro)
155
- # logger.info(f"Generated prompt: {complete_prompt}")
156
- return complete_prompt
157
-
158
- def update_chat_history(
159
- self, prompt: str, response: str, force: bool = False
160
- ) -> None:
161
- """Keep the conversation flowing by updating the chat history!
162
-
163
- This method:
164
- - Adds new messages to the history
165
- - Updates the file if needed
166
- - Keeps everything organized
167
-
168
- Args:
169
- prompt (str): Your message to add
170
- response (str): The LLM's response
171
- force (bool): Force update even if history is off. Default: False
172
-
173
- Examples:
174
- >>> chat = Conversation()
175
- >>> chat.update_chat_history("Hi!", "Hello there!")
176
- """
177
- if not self.status and not force:
178
- return
179
-
180
- new_history = self.history_format % {"user": prompt, "llm": response}
181
-
182
- if self.file and self.update_file:
183
- # Create file if it doesn't exist
184
- if not os.path.exists(self.file):
185
- with open(self.file, "w", encoding="utf-8") as fh:
186
- fh.write(self.intro + "\n")
187
-
188
- # Append new history
189
- with open(self.file, "a", encoding="utf-8") as fh:
190
- fh.write(new_history)
191
-
192
- self.chat_history += new_history
193
- # logger.info(f"Chat history updated with prompt: {prompt}")
194
-
195
- def add_message(self, role: str, content: str) -> None:
196
- """Add a new message to the chat - simple and clean!
197
-
198
- This method:
199
- - Validates the message role
200
- - Adds the message to history
201
- - Updates file if needed
202
-
203
- Args:
204
- role (str): Who's sending? ('user', 'llm', 'tool', or 'reasoning')
205
- content (str): What's the message?
206
-
207
- Examples:
208
- >>> chat = Conversation()
209
- >>> chat.add_message("user", "Hey there!")
210
- >>> chat.add_message("llm", "Hi! How can I help?")
211
- """
212
- if not self.validate_message(role, content):
213
- raise ValueError("Invalid message role or content")
214
-
215
- role_formats = {
216
- "user": "User",
217
- "llm": "LLM",
218
- "tool": "Tool",
219
- "reasoning": "Reasoning"
220
- }
221
-
222
- if role in role_formats:
223
- self.chat_history += f"\n{role_formats[role]} : {content}"
224
- else:
225
- logger.warning(f"Unknown role '{role}' for message: {content}")
226
-
227
- # # Enhanced logging for message addition
228
- # logger.info(f"Added message from {role}: {content}")
229
- # logging.info(f"Message added: {role}: {content}")
230
-
231
- # def validate_message(self, role: str, content: str) -> bool:
232
- # """Validate the message role and content."""
233
- # valid_roles = {'user', 'llm', 'tool', 'reasoning'}
234
- # if role not in valid_roles:
235
- # logger.error(f"Invalid role: {role}")
236
- # return False
237
- # if not content:
238
- # logger.error("Content cannot be empty.")
239
- # return False
240
- # return True
241
-
242
-
1
+ import os
2
+ import logging
3
+ from typing import Optional
4
+ from .Litlogger import Logger, LogFormat
5
+
6
+ # Create a logger instance for this module
7
+ logger = Logger(
8
+ name="Conversation",
9
+ format=LogFormat.MODERN_EMOJI,
10
+ )
11
+
12
+ class Conversation:
13
+ """Handles prompt generation based on history and maintains chat context.
14
+
15
+ This class is responsible for managing chat conversations, including:
16
+ - Maintaining chat history
17
+ - Loading/saving conversations from/to files
18
+ - Generating prompts based on context
19
+ - Managing token limits and history pruning
20
+
21
+ Examples:
22
+ >>> chat = Conversation(max_tokens=500)
23
+ >>> chat.add_message("user", "Hello!")
24
+ >>> chat.add_message("llm", "Hi there!")
25
+ >>> prompt = chat.gen_complete_prompt("What's up?")
26
+ """
27
+
28
+ intro = (
29
+ "You're a Large Language Model for chatting with people. "
30
+ "Assume role of the LLM and give your response."
31
+ )
32
+
33
+ def __init__(
34
+ self,
35
+ status: bool = True,
36
+ max_tokens: int = 600,
37
+ filepath: Optional[str] = None,
38
+ update_file: bool = True,
39
+ ):
40
+ """Initialize a new Conversation manager.
41
+
42
+ Args:
43
+ status (bool): Flag to control history tracking. Defaults to True.
44
+ max_tokens (int): Maximum tokens for completion response. Defaults to 600.
45
+ filepath (str, optional): Path to save/load conversation history. Defaults to None.
46
+ update_file (bool): Whether to append new messages to file. Defaults to True.
47
+
48
+ Examples:
49
+ >>> chat = Conversation(max_tokens=500)
50
+ >>> chat = Conversation(filepath="chat_history.txt")
51
+ """
52
+ self.status = status
53
+ self.max_tokens_to_sample = max_tokens
54
+ self.chat_history = "" # Initialize as empty string
55
+ self.history_format = "\nUser : %(user)s\nLLM :%(llm)s"
56
+ self.file = filepath
57
+ self.update_file = update_file
58
+ self.history_offset = 10250
59
+ self.prompt_allowance = 10
60
+
61
+ if filepath:
62
+ self.load_conversation(filepath, False)
63
+
64
+ def load_conversation(self, filepath: str, exists: bool = True) -> None:
65
+ """Load conversation history from a text file.
66
+
67
+ Args:
68
+ filepath (str): Path to the history file
69
+ exists (bool): Flag for file availability. Defaults to True.
70
+
71
+ Raises:
72
+ AssertionError: If filepath is not str or file doesn't exist
73
+ """
74
+ assert isinstance(
75
+ filepath, str
76
+ ), f"Filepath needs to be of str datatype not {type(filepath)}"
77
+ assert (
78
+ os.path.isfile(filepath) if exists else True
79
+ ), f"File '{filepath}' does not exist"
80
+
81
+ if not os.path.isfile(filepath):
82
+ logging.debug(f"Creating new chat-history file - '{filepath}'")
83
+ with open(filepath, "w", encoding="utf-8") as fh:
84
+ fh.write(self.intro)
85
+ else:
86
+ logging.debug(f"Loading conversation from '{filepath}'")
87
+ with open(filepath, encoding="utf-8") as fh:
88
+ file_contents = fh.readlines()
89
+ if file_contents:
90
+ self.intro = file_contents[0] # First line is intro
91
+ self.chat_history = "\n".join(file_contents[1:])
92
+
93
+ def __trim_chat_history(self, chat_history: str, intro: str) -> str:
94
+ """Keep the chat history fresh by trimming it when it gets too long!
95
+
96
+ This method makes sure we don't exceed our token limits by:
97
+ - Calculating total length (intro + history)
98
+ - Trimming older messages if needed
99
+ - Keeping the convo smooth and within limits
100
+
101
+ Args:
102
+ chat_history (str): The current chat history to trim
103
+ intro (str): The conversation's intro/system prompt
104
+
105
+ Returns:
106
+ str: The trimmed chat history, ready to use!
107
+
108
+ Examples:
109
+ >>> chat = Conversation(max_tokens=500)
110
+ >>> trimmed = chat._Conversation__trim_chat_history("Hello! Hi!", "Intro")
111
+ """
112
+ len_of_intro = len(intro)
113
+ len_of_chat_history = len(chat_history)
114
+ total = self.max_tokens_to_sample + len_of_intro + len_of_chat_history
115
+
116
+ if total > self.history_offset:
117
+ truncate_at = (total - self.history_offset) + self.prompt_allowance
118
+ trimmed_chat_history = chat_history[truncate_at:]
119
+ return "... " + trimmed_chat_history
120
+ return chat_history
121
+
122
+ def gen_complete_prompt(self, prompt: str, intro: Optional[str] = None) -> str:
123
+ """Generate a complete prompt that's ready to go!
124
+
125
+ This method:
126
+ - Combines the intro, history, and new prompt
127
+ - Trims history if needed
128
+ - Keeps everything organized and flowing
129
+
130
+ Args:
131
+ prompt (str): Your message to add to the chat
132
+ intro (str, optional): Custom intro to use. Default: None (uses class intro)
133
+
134
+ Returns:
135
+ str: The complete conversation prompt, ready for the LLM!
136
+
137
+ Examples:
138
+ >>> chat = Conversation()
139
+ >>> prompt = chat.gen_complete_prompt("What's good?")
140
+ """
141
+ if not self.status:
142
+ return prompt
143
+
144
+ intro = intro or self.intro or (
145
+ "You're a Large Language Model for chatting with people. "
146
+ "Assume role of the LLM and give your response."
147
+ )
148
+
149
+ incomplete_chat_history = self.chat_history + self.history_format % {
150
+ "user": prompt,
151
+ "llm": ""
152
+ }
153
+ complete_prompt = intro + self.__trim_chat_history(incomplete_chat_history, intro)
154
+ # logger.info(f"Generated prompt: {complete_prompt}")
155
+ return complete_prompt
156
+
157
+ def update_chat_history(
158
+ self, prompt: str, response: str, force: bool = False
159
+ ) -> None:
160
+ """Keep the conversation flowing by updating the chat history!
161
+
162
+ This method:
163
+ - Adds new messages to the history
164
+ - Updates the file if needed
165
+ - Keeps everything organized
166
+
167
+ Args:
168
+ prompt (str): Your message to add
169
+ response (str): The LLM's response
170
+ force (bool): Force update even if history is off. Default: False
171
+
172
+ Examples:
173
+ >>> chat = Conversation()
174
+ >>> chat.update_chat_history("Hi!", "Hello there!")
175
+ """
176
+ if not self.status and not force:
177
+ return
178
+
179
+ new_history = self.history_format % {"user": prompt, "llm": response}
180
+
181
+ if self.file and self.update_file:
182
+ # Create file if it doesn't exist
183
+ if not os.path.exists(self.file):
184
+ with open(self.file, "w", encoding="utf-8") as fh:
185
+ fh.write(self.intro + "\n")
186
+
187
+ # Append new history
188
+ with open(self.file, "a", encoding="utf-8") as fh:
189
+ fh.write(new_history)
190
+
191
+ self.chat_history += new_history
192
+ # logger.info(f"Chat history updated with prompt: {prompt}")
193
+
194
+ def add_message(self, role: str, content: str) -> None:
195
+ """Add a new message to the chat - simple and clean!
196
+
197
+ This method:
198
+ - Validates the message role
199
+ - Adds the message to history
200
+ - Updates file if needed
201
+
202
+ Args:
203
+ role (str): Who's sending? ('user', 'llm', 'tool', or 'reasoning')
204
+ content (str): What's the message?
205
+
206
+ Examples:
207
+ >>> chat = Conversation()
208
+ >>> chat.add_message("user", "Hey there!")
209
+ >>> chat.add_message("llm", "Hi! How can I help?")
210
+ """
211
+ if not self.validate_message(role, content):
212
+ raise ValueError("Invalid message role or content")
213
+
214
+ role_formats = {
215
+ "user": "User",
216
+ "llm": "LLM",
217
+ "tool": "Tool",
218
+ "reasoning": "Reasoning"
219
+ }
220
+
221
+ if role in role_formats:
222
+ self.chat_history += f"\n{role_formats[role]} : {content}"
223
+ else:
224
+ logger.warning(f"Unknown role '{role}' for message: {content}")
225
+
226
+ # # Enhanced logging for message addition
227
+ # logger.info(f"Added message from {role}: {content}")
228
+ # logging.info(f"Message added: {role}: {content}")
229
+
230
+ # def validate_message(self, role: str, content: str) -> bool:
231
+ # """Validate the message role and content."""
232
+ # valid_roles = {'user', 'llm', 'tool', 'reasoning'}
233
+ # if role not in valid_roles:
234
+ # logger.error(f"Invalid role: {role}")
235
+ # return False
236
+ # if not content:
237
+ # logger.error("Content cannot be empty.")
238
+ # return False
239
+ # return True
240
+
241
+