webscout 8.2.6__py3-none-any.whl → 8.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (150) hide show
  1. webscout/AIauto.py +1 -1
  2. webscout/AIutel.py +298 -239
  3. webscout/Extra/Act.md +309 -0
  4. webscout/Extra/GitToolkit/gitapi/README.md +110 -0
  5. webscout/Extra/YTToolkit/README.md +375 -0
  6. webscout/Extra/YTToolkit/ytapi/README.md +44 -0
  7. webscout/Extra/YTToolkit/ytapi/extras.py +92 -19
  8. webscout/Extra/autocoder/autocoder.py +309 -114
  9. webscout/Extra/autocoder/autocoder_utiles.py +15 -15
  10. webscout/Extra/gguf.md +430 -0
  11. webscout/Extra/tempmail/README.md +488 -0
  12. webscout/Extra/weather.md +281 -0
  13. webscout/Litlogger/Readme.md +175 -0
  14. webscout/Provider/AISEARCH/DeepFind.py +41 -37
  15. webscout/Provider/AISEARCH/README.md +279 -0
  16. webscout/Provider/AISEARCH/__init__.py +0 -1
  17. webscout/Provider/AISEARCH/genspark_search.py +228 -86
  18. webscout/Provider/AISEARCH/hika_search.py +11 -11
  19. webscout/Provider/AISEARCH/scira_search.py +324 -322
  20. webscout/Provider/AllenAI.py +7 -14
  21. webscout/Provider/Blackboxai.py +518 -74
  22. webscout/Provider/Cloudflare.py +0 -1
  23. webscout/Provider/Deepinfra.py +23 -21
  24. webscout/Provider/Flowith.py +217 -0
  25. webscout/Provider/FreeGemini.py +250 -0
  26. webscout/Provider/GizAI.py +15 -5
  27. webscout/Provider/Glider.py +11 -8
  28. webscout/Provider/HeckAI.py +80 -52
  29. webscout/Provider/Koboldai.py +7 -4
  30. webscout/Provider/LambdaChat.py +2 -2
  31. webscout/Provider/Marcus.py +10 -18
  32. webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
  33. webscout/Provider/OPENAI/Cloudflare.py +378 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +282 -0
  35. webscout/Provider/OPENAI/NEMOTRON.py +244 -0
  36. webscout/Provider/OPENAI/README.md +1253 -0
  37. webscout/Provider/OPENAI/__init__.py +8 -0
  38. webscout/Provider/OPENAI/ai4chat.py +293 -286
  39. webscout/Provider/OPENAI/api.py +810 -0
  40. webscout/Provider/OPENAI/base.py +217 -14
  41. webscout/Provider/OPENAI/c4ai.py +373 -367
  42. webscout/Provider/OPENAI/chatgpt.py +7 -0
  43. webscout/Provider/OPENAI/chatgptclone.py +7 -0
  44. webscout/Provider/OPENAI/chatsandbox.py +172 -0
  45. webscout/Provider/OPENAI/deepinfra.py +30 -20
  46. webscout/Provider/OPENAI/e2b.py +6 -0
  47. webscout/Provider/OPENAI/exaai.py +7 -0
  48. webscout/Provider/OPENAI/exachat.py +6 -0
  49. webscout/Provider/OPENAI/flowith.py +162 -0
  50. webscout/Provider/OPENAI/freeaichat.py +359 -352
  51. webscout/Provider/OPENAI/glider.py +323 -316
  52. webscout/Provider/OPENAI/groq.py +361 -354
  53. webscout/Provider/OPENAI/heckai.py +30 -64
  54. webscout/Provider/OPENAI/llmchatco.py +8 -0
  55. webscout/Provider/OPENAI/mcpcore.py +7 -0
  56. webscout/Provider/OPENAI/multichat.py +8 -0
  57. webscout/Provider/OPENAI/netwrck.py +356 -350
  58. webscout/Provider/OPENAI/opkfc.py +8 -0
  59. webscout/Provider/OPENAI/scirachat.py +471 -462
  60. webscout/Provider/OPENAI/sonus.py +9 -0
  61. webscout/Provider/OPENAI/standardinput.py +9 -1
  62. webscout/Provider/OPENAI/textpollinations.py +339 -329
  63. webscout/Provider/OPENAI/toolbaz.py +7 -0
  64. webscout/Provider/OPENAI/typefully.py +355 -0
  65. webscout/Provider/OPENAI/typegpt.py +358 -346
  66. webscout/Provider/OPENAI/uncovrAI.py +7 -0
  67. webscout/Provider/OPENAI/utils.py +103 -7
  68. webscout/Provider/OPENAI/venice.py +12 -0
  69. webscout/Provider/OPENAI/wisecat.py +19 -19
  70. webscout/Provider/OPENAI/writecream.py +7 -0
  71. webscout/Provider/OPENAI/x0gpt.py +7 -0
  72. webscout/Provider/OPENAI/yep.py +50 -21
  73. webscout/Provider/OpenGPT.py +1 -1
  74. webscout/Provider/TTI/AiForce/README.md +159 -0
  75. webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
  76. webscout/Provider/TTI/ImgSys/README.md +174 -0
  77. webscout/Provider/TTI/MagicStudio/README.md +101 -0
  78. webscout/Provider/TTI/Nexra/README.md +155 -0
  79. webscout/Provider/TTI/PollinationsAI/README.md +146 -0
  80. webscout/Provider/TTI/README.md +128 -0
  81. webscout/Provider/TTI/aiarta/README.md +134 -0
  82. webscout/Provider/TTI/artbit/README.md +100 -0
  83. webscout/Provider/TTI/fastflux/README.md +129 -0
  84. webscout/Provider/TTI/huggingface/README.md +114 -0
  85. webscout/Provider/TTI/piclumen/README.md +161 -0
  86. webscout/Provider/TTI/pixelmuse/README.md +79 -0
  87. webscout/Provider/TTI/talkai/README.md +139 -0
  88. webscout/Provider/TTS/README.md +192 -0
  89. webscout/Provider/TTS/__init__.py +2 -1
  90. webscout/Provider/TTS/speechma.py +500 -100
  91. webscout/Provider/TTS/sthir.py +94 -0
  92. webscout/Provider/TeachAnything.py +3 -7
  93. webscout/Provider/TextPollinationsAI.py +4 -2
  94. webscout/Provider/{aimathgpt.py → UNFINISHED/ChatHub.py} +88 -68
  95. webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
  96. webscout/Provider/UNFINISHED/oivscode.py +351 -0
  97. webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
  98. webscout/Provider/Writecream.py +11 -2
  99. webscout/Provider/__init__.py +8 -14
  100. webscout/Provider/ai4chat.py +4 -58
  101. webscout/Provider/asksteve.py +17 -9
  102. webscout/Provider/cerebras.py +3 -1
  103. webscout/Provider/koala.py +170 -268
  104. webscout/Provider/llmchat.py +3 -0
  105. webscout/Provider/lmarena.py +198 -0
  106. webscout/Provider/meta.py +7 -4
  107. webscout/Provider/samurai.py +223 -0
  108. webscout/Provider/scira_chat.py +4 -2
  109. webscout/Provider/typefully.py +23 -151
  110. webscout/__init__.py +4 -2
  111. webscout/cli.py +3 -28
  112. webscout/conversation.py +35 -35
  113. webscout/litagent/Readme.md +276 -0
  114. webscout/scout/README.md +402 -0
  115. webscout/swiftcli/Readme.md +323 -0
  116. webscout/version.py +1 -1
  117. webscout/webscout_search.py +2 -182
  118. webscout/webscout_search_async.py +1 -179
  119. webscout/zeroart/README.md +89 -0
  120. webscout/zeroart/__init__.py +134 -54
  121. webscout/zeroart/base.py +19 -13
  122. webscout/zeroart/effects.py +101 -99
  123. webscout/zeroart/fonts.py +1239 -816
  124. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/METADATA +116 -74
  125. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/RECORD +130 -103
  126. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
  127. webscout-8.2.8.dist-info/entry_points.txt +3 -0
  128. webscout-8.2.8.dist-info/top_level.txt +1 -0
  129. webscout/Provider/AISEARCH/ISou.py +0 -256
  130. webscout/Provider/ElectronHub.py +0 -773
  131. webscout/Provider/Free2GPT.py +0 -241
  132. webscout/Provider/GPTWeb.py +0 -249
  133. webscout/Provider/bagoodex.py +0 -145
  134. webscout/Provider/geminiprorealtime.py +0 -160
  135. webscout/scout/core.py +0 -881
  136. webscout-8.2.6.dist-info/entry_points.txt +0 -3
  137. webscout-8.2.6.dist-info/top_level.txt +0 -2
  138. webstoken/__init__.py +0 -30
  139. webstoken/classifier.py +0 -189
  140. webstoken/keywords.py +0 -216
  141. webstoken/language.py +0 -128
  142. webstoken/ner.py +0 -164
  143. webstoken/normalizer.py +0 -35
  144. webstoken/processor.py +0 -77
  145. webstoken/sentiment.py +0 -206
  146. webstoken/stemmer.py +0 -73
  147. webstoken/tagger.py +0 -60
  148. webstoken/tokenizer.py +0 -158
  149. /webscout/Provider/{Youchat.py → UNFINISHED/Youchat.py} +0 -0
  150. {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
@@ -4,30 +4,14 @@ from uuid import uuid4
4
4
 
5
5
  from webscout.AIutel import Optimizers
6
6
  from webscout.AIutel import Conversation
7
- from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
7
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
8
8
  from webscout.AIbase import Provider
9
9
  from webscout import exceptions
10
10
  from webscout.litagent import LitAgent
11
- # Replace requests with curl_cffi
12
- from curl_cffi.requests import Session # Import Session
13
- from curl_cffi import CurlError # Import CurlError
11
+ from curl_cffi.requests import Session
12
+ from curl_cffi import CurlError
14
13
 
15
14
  class TypefullyAI(Provider):
16
- """
17
- A class to interact with the Typefully AI API.
18
-
19
- Attributes:
20
- system_prompt (str): The system prompt to define the assistant's role.
21
- model (str): The model identifier to use for completions.
22
- output_length (int): Maximum length of the generated output.
23
-
24
- Examples:
25
- >>> from webscout.Provider.typefully import TypefullyAI
26
- >>> ai = TypefullyAI()
27
- >>> response = ai.chat("What's the weather today?")
28
- >>> print(response)
29
- 'The weather today is sunny with a high of 75°F.'
30
- """
31
15
  AVAILABLE_MODELS = ["openai:gpt-4o-mini", "openai:gpt-4o", "anthropic:claude-3-5-haiku-20241022", "groq:llama-3.3-70b-versatile"]
32
16
 
33
17
  def __init__(
@@ -44,28 +28,6 @@ class TypefullyAI(Provider):
44
28
  system_prompt: str = "You're a helpful assistant.",
45
29
  model: str = "openai:gpt-4o-mini",
46
30
  ):
47
- """
48
- Initializes the TypefullyAI API with given parameters.
49
-
50
- Args:
51
- is_conversation (bool): Whether the provider is in conversation mode.
52
- max_tokens (int): Maximum number of tokens to sample.
53
- timeout (int): Timeout for API requests.
54
- intro (str): Introduction message for the conversation.
55
- filepath (str): Filepath for storing conversation history.
56
- update_file (bool): Whether to update the conversation history file.
57
- proxies (dict): Proxies for the API requests.
58
- history_offset (int): Offset for conversation history.
59
- act (str): Act for the conversation.
60
- system_prompt (str): The system prompt to define the assistant's role.
61
- model (str): The model identifier to use.
62
-
63
- Examples:
64
- >>> ai = TypefullyAI(system_prompt="You are a friendly assistant.")
65
- >>> print(ai.system_prompt)
66
- 'You are a friendly assistant.'
67
- """
68
- # Initialize curl_cffi Session
69
31
  self.session = Session()
70
32
  self.is_conversation = is_conversation
71
33
  self.max_tokens_to_sample = max_tokens
@@ -75,10 +37,7 @@ class TypefullyAI(Provider):
75
37
  self.system_prompt = system_prompt
76
38
  self.model = model
77
39
  self.output_length = max_tokens
78
-
79
- # Initialize LitAgent for user agent generation
80
40
  self.agent = LitAgent()
81
-
82
41
  self.headers = {
83
42
  "authority": "typefully.com",
84
43
  "accept": "*/*",
@@ -91,17 +50,15 @@ class TypefullyAI(Provider):
91
50
  "sec-ch-ua": '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
92
51
  "sec-ch-ua-mobile": "?0",
93
52
  "sec-ch-ua-platform": '"Windows"',
94
- "user-agent": self.agent.random() # Use LitAgent to generate a random user agent
53
+ "user-agent": self.agent.random()
95
54
  }
96
-
97
55
  self.__available_optimizers = (
98
56
  method
99
57
  for method in dir(Optimizers)
100
58
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
101
59
  )
102
- # Update curl_cffi session headers and proxies
103
60
  self.session.headers.update(self.headers)
104
- self.session.proxies = proxies # Use proxies directly, not session.proxies.update
61
+ self.session.proxies = proxies
105
62
  Conversation.intro = (
106
63
  AwesomePrompts().get_act(
107
64
  act, raise_not_found=True, default=None, case_insensitive=True
@@ -116,13 +73,13 @@ class TypefullyAI(Provider):
116
73
 
117
74
  @staticmethod
118
75
  def _typefully_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
119
- """Extracts content from the Typefully stream format '0:"..."'."""
120
76
  if isinstance(chunk, str):
121
- match = re.search(r'0:"(.*?)"(?=,|$)', chunk) # Look for 0:"...", possibly followed by comma or end of string
77
+ if isinstance(chunk, bytes):
78
+ chunk = chunk.decode('utf-8', errors='replace')
79
+ match = re.search(r'0:"(.*?)"', chunk)
122
80
  if match:
123
- # Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
124
81
  content = match.group(1).encode().decode('unicode_escape')
125
- return content.replace('\\\\', '\\').replace('\\"', '"')
82
+ return content.replace('\\', '\\').replace('\\"', '"')
126
83
  return None
127
84
 
128
85
  def ask(
@@ -133,25 +90,6 @@ class TypefullyAI(Provider):
133
90
  optimizer: str = None,
134
91
  conversationally: bool = False,
135
92
  ) -> Dict[str, Any]:
136
- """
137
- Sends a prompt to the Typefully AI API and returns the response.
138
-
139
- Args:
140
- prompt (str): The prompt to send to the API.
141
- stream (bool): Whether to stream the response.
142
- raw (bool): Whether to return the raw response.
143
- optimizer (str): Optimizer to use for the prompt.
144
- conversationally (bool): Whether to generate the prompt conversationally.
145
-
146
- Returns:
147
- Dict[str, Any]: The API response.
148
-
149
- Examples:
150
- >>> ai = TypefullyAI()
151
- >>> response = ai.ask("Tell me a joke!")
152
- >>> print(response)
153
- {'text': 'Why did the scarecrow win an award? Because he was outstanding in his field!'}
154
- """
155
93
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
156
94
  if optimizer:
157
95
  if optimizer in self.__available_optimizers:
@@ -162,60 +100,49 @@ class TypefullyAI(Provider):
162
100
  raise Exception(
163
101
  f"Optimizer is not one of {self.__available_optimizers}"
164
102
  )
165
-
166
103
  payload = {
167
104
  "prompt": conversation_prompt,
168
105
  "systemPrompt": self.system_prompt,
169
106
  "modelIdentifier": self.model,
170
107
  "outputLength": self.output_length
171
108
  }
172
-
173
109
  def for_stream():
174
- try: # Add try block for CurlError
175
- # Use curl_cffi session post with impersonate
110
+ try:
176
111
  response = self.session.post(
177
112
  self.api_endpoint,
178
113
  headers=self.headers,
179
114
  json=payload,
180
115
  stream=True,
181
116
  timeout=self.timeout,
182
- impersonate="chrome120" # Add impersonate
117
+ impersonate="chrome120"
183
118
  )
184
119
  if not response.ok:
185
120
  raise exceptions.FailedToGenerateResponseError(
186
121
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
187
122
  )
188
123
  streaming_text = ""
189
- # Use sanitize_stream with the custom extractor
190
124
  processed_stream = sanitize_stream(
191
- data=response.iter_content(chunk_size=None), # Pass byte iterator
192
- intro_value=None, # No simple prefix
193
- to_json=False, # Content is not JSON
194
- content_extractor=self._typefully_extractor, # Use the specific extractor
195
- end_marker="e:", # Stop processing if "e:" line is encountered (adjust if needed)
125
+ data=response.iter_content(chunk_size=None),
126
+ intro_value=None,
127
+ to_json=False,
128
+ content_extractor=self._typefully_extractor,
196
129
  )
197
-
198
130
  for content_chunk in processed_stream:
199
131
  if content_chunk and isinstance(content_chunk, str):
200
132
  streaming_text += content_chunk
201
133
  yield content_chunk if raw else dict(text=content_chunk)
202
- # Update history and last response after stream finishes
203
134
  self.last_response.update(dict(text=streaming_text))
204
135
  self.conversation.update_chat_history(
205
136
  prompt, self.get_message(self.last_response)
206
137
  )
207
- except CurlError as e: # Catch CurlError
138
+ except CurlError as e:
208
139
  raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
209
- except Exception as e: # Catch other potential exceptions
140
+ except Exception as e:
210
141
  raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
211
-
212
142
  def for_non_stream():
213
- # This function implicitly uses the updated for_stream
214
143
  for _ in for_stream():
215
144
  pass
216
- # Ensure last_response is updated by for_stream before returning
217
145
  return self.last_response
218
-
219
146
  return for_stream() if stream else for_non_stream()
220
147
 
221
148
  def chat(
@@ -225,31 +152,11 @@ class TypefullyAI(Provider):
225
152
  optimizer: str = None,
226
153
  conversationally: bool = False,
227
154
  ) -> str:
228
- """
229
- Generates a response from the Typefully AI API.
230
-
231
- Args:
232
- prompt (str): The prompt to send to the API.
233
- stream (bool): Whether to stream the response.
234
- optimizer (str): Optimizer to use for the prompt.
235
- conversationally (bool): Whether to generate the prompt conversationally.
236
-
237
- Returns:
238
- str: The API response.
239
-
240
- Examples:
241
- >>> ai = TypefullyAI()
242
- >>> response = ai.chat("What's the weather today?")
243
- >>> print(response)
244
- 'The weather today is sunny with a high of 75°F.'
245
- """
246
-
247
155
  def for_stream():
248
156
  for response in self.ask(
249
157
  prompt, True, optimizer=optimizer, conversationally=conversationally
250
158
  ):
251
159
  yield self.get_message(response)
252
-
253
160
  def for_non_stream():
254
161
  return self.get_message(
255
162
  self.ask(
@@ -259,72 +166,37 @@ class TypefullyAI(Provider):
259
166
  conversationally=conversationally,
260
167
  )
261
168
  )
262
-
263
169
  return for_stream() if stream else for_non_stream()
264
170
 
265
171
  def get_message(self, response: dict) -> str:
266
- """
267
- Extracts the message from the API response.
268
-
269
- Args:
270
- response (dict): The API response.
271
-
272
- Returns:
273
- str: The message content.
274
-
275
- Examples:
276
- >>> ai = TypefullyAI()
277
- >>> response = ai.ask("Tell me a joke!")
278
- >>> message = ai.get_message(response)
279
- >>> print(message)
280
- 'Why did the scarecrow win an award? Because he was outstanding in his field!'
281
- """
282
172
  assert isinstance(response, dict), "Response should be of dict data-type only"
283
- # Handle potential unicode escapes in the final text
284
- # Formatting is now handled by the extractor
285
173
  text = response.get("text", "")
286
174
  try:
287
175
  formatted_text = text.replace('\\n', '\n').replace('\\n\\n', '\n\n')
288
176
  return formatted_text
289
- except Exception: # Catch potential errors during newline replacement
290
- return text # Return original text if formatting fails
291
-
177
+ except Exception:
178
+ return text
292
179
 
293
180
  if __name__ == "__main__":
294
- # Ensure curl_cffi is installed
295
181
  print("-" * 80)
296
182
  print(f"{'Model':<50} {'Status':<10} {'Response'}")
297
183
  print("-" * 80)
298
-
299
- # Test all available models
300
184
  working = 0
301
185
  total = len(TypefullyAI.AVAILABLE_MODELS)
302
-
303
186
  for model in TypefullyAI.AVAILABLE_MODELS:
304
187
  try:
305
188
  test_ai = TypefullyAI(model=model, timeout=60)
306
- # Test stream first
307
189
  response_stream = test_ai.chat("Say 'Hello' in one word", stream=True)
308
190
  response_text = ""
309
- print(f"\r{model:<50} {'Streaming...':<10}", end="", flush=True)
310
191
  for chunk in response_stream:
311
192
  response_text += chunk
312
-
313
193
  if response_text and len(response_text.strip()) > 0:
314
- status = ""
315
- # Clean and truncate response
316
- clean_text = response_text.strip() # Already formatted in get_message
194
+ status = "OK"
195
+ clean_text = response_text.strip()
317
196
  display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
318
197
  else:
319
- status = " (Stream)"
198
+ status = "FAIL (Stream)"
320
199
  display_text = "Empty or invalid stream response"
321
200
  print(f"\r{model:<50} {status:<10} {display_text}")
322
-
323
- # Optional: Add non-stream test if needed
324
- # print(f"\r{model:<50} {'Non-Stream...':<10}", end="", flush=True)
325
- # response_non_stream = test_ai.chat("Say 'Hi' again", stream=False)
326
- # if not response_non_stream or len(response_non_stream.strip()) == 0:
327
- # print(f"\r{model:<50} {'✗ (Non-Stream)':<10} Empty non-stream response")
328
-
329
201
  except Exception as e:
330
- print(f"\r{model:<50} {'':<10} {str(e)}")
202
+ print(f"\r{model:<50} {'FAIL':<10} {str(e)}")
webscout/__init__.py CHANGED
@@ -26,9 +26,11 @@ __repo__ = "https://github.com/OE-LUCIFER/Webscout"
26
26
  # Add update checker
27
27
  from .update_checker import check_for_updates
28
28
  try:
29
- check_for_updates()
29
+ update_message = check_for_updates()
30
+ if update_message:
31
+ print(update_message)
30
32
  except Exception:
31
- pass # Silently handle any update check errors
33
+ pass # Silently handle any update check errorslently handle any update check errors
32
34
 
33
35
  import logging
34
36
  logging.getLogger("webscout").addHandler(logging.NullHandler())
webscout/cli.py CHANGED
@@ -1,6 +1,6 @@
1
1
  import sys
2
2
  from .swiftcli import CLI, option
3
- from .webscout_search import WEBS
3
+ from .webscout_search import WEBS # Import the WEBS class from webscout_search
4
4
  from .DWEBS import GoogleSearch # Import GoogleSearch from DWEBS
5
5
  from .yep_search import YepSearch # Import YepSearch from yep_search
6
6
  from .version import __version__
@@ -48,32 +48,7 @@ def version():
48
48
  """Show the version of webscout."""
49
49
  print(f"webscout version: {__version__}")
50
50
 
51
- @app.command()
52
- @option("--proxy", help="Proxy URL to use for requests")
53
- @option("--model", "-m", help="AI model to use", default="gpt-4o-mini", type=str)
54
- @option("--timeout", "-t", help="Timeout value for requests", type=int, default=10)
55
- def chat(proxy: str = None, model: str = "gpt-4o-mini", timeout: int = 10):
56
- """Interactive AI chat using DuckDuckGo's AI."""
57
- webs = WEBS(proxy=proxy, timeout=timeout)
58
-
59
- print(f"Using model: {model}")
60
- print("Type your message and press Enter. Press Ctrl+C or type 'exit' to quit.\n")
61
-
62
- try:
63
- while True:
64
- try:
65
- user_input = input(">>> ").strip()
66
- if not user_input or user_input.lower() in ['exit', 'quit']:
67
- break
68
-
69
- response = webs.chat(keywords=user_input, model=model)
70
- print(f"\nAI: {response}\n")
71
-
72
- except Exception as e:
73
- print(f"Error: {str(e)}\n")
74
-
75
- except KeyboardInterrupt:
76
- print("\nChat session interrupted. Exiting...")
51
+
77
52
 
78
53
  @app.command()
79
54
  @option("--keywords", "-k", help="Search keywords", required=True)
@@ -546,4 +521,4 @@ def main():
546
521
  sys.exit(1)
547
522
 
548
523
  if __name__ == "__main__":
549
- main()
524
+ main()
webscout/conversation.py CHANGED
@@ -1,6 +1,24 @@
1
+ """
2
+ conversation.py
3
+
4
+ This module provides a modern conversation manager for handling chat-based interactions, message history, tool calls, and robust error handling. It defines the Conversation class and supporting types for managing conversational state, tool integration, and message validation.
5
+
6
+ Classes:
7
+ ConversationError: Base exception for conversation-related errors.
8
+ ToolCallError: Raised when there's an error with tool calls.
9
+ MessageValidationError: Raised when message validation fails.
10
+ Message: Represents a single message in the conversation.
11
+ FunctionCall: TypedDict for a function call.
12
+ ToolDefinition: TypedDict for a tool definition.
13
+ FunctionCallData: TypedDict for function call data.
14
+ Fn: Represents a function (tool) that the agent can call.
15
+ Conversation: Main conversation manager class.
16
+
17
+ Functions:
18
+ tools: Decorator to mark a function as a tool.
19
+ """
1
20
  import os
2
21
  import json
3
- import logging
4
22
  from typing import Optional, Dict, List, Any, TypedDict, Callable, TypeVar, Union
5
23
  from dataclasses import dataclass
6
24
  from datetime import datetime
@@ -59,14 +77,15 @@ def tools(func: Callable[..., T]) -> Callable[..., T]:
59
77
  return func
60
78
 
61
79
  class Conversation:
62
- """Modern conversation manager with enhanced features.
63
-
80
+ """
81
+ Modern conversation manager with enhanced features.
82
+
64
83
  Key Features:
65
- - Robust message handling with metadata
66
- - Enhanced tool calling support
67
- - Efficient history management
68
- - Improved error handling
69
- - Memory optimization
84
+ - Robust message handling with metadata
85
+ - Enhanced tool calling support
86
+ - Efficient history management
87
+ - Improved error handling
88
+ - Memory optimization
70
89
  """
71
90
 
72
91
  intro = (
@@ -95,23 +114,8 @@ class Conversation:
95
114
  self.prompt_allowance = 10
96
115
  self.tools = tools or []
97
116
  self.compression_threshold = compression_threshold
98
- self.logger = self._setup_logger()
99
-
100
117
  if filepath:
101
- self.load_conversation(filepath, False)
102
-
103
- def _setup_logger(self) -> logging.Logger:
104
- """Set up enhanced logging."""
105
- logger = logging.getLogger("conversation")
106
- if not logger.handlers:
107
- handler = logging.StreamHandler()
108
- formatter = logging.Formatter(
109
- '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
110
- )
111
- handler.setFormatter(formatter)
112
- logger.addHandler(handler)
113
- logger.setLevel(logging.INFO)
114
- return logger
118
+ self.load_conversation(filepath, True)
115
119
 
116
120
  def load_conversation(self, filepath: str, exists: bool = True) -> None:
117
121
  """Load conversation with improved error handling."""
@@ -132,7 +136,6 @@ class Conversation:
132
136
  self.intro = file_contents[0]
133
137
  self._process_history_from_file(file_contents[1:])
134
138
  except Exception as e:
135
- self.logger.error(f"Error loading conversation: {str(e)}")
136
139
  raise ConversationError(f"Failed to load conversation: {str(e)}") from e
137
140
 
138
141
  def _process_history_from_file(self, lines: List[str]) -> None:
@@ -272,6 +275,7 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
272
275
  def add_message(self, role: str, content: str, metadata: Optional[Dict[str, Any]] = None) -> None:
273
276
  """Add a message with enhanced validation and metadata support."""
274
277
  try:
278
+ role = role.lower() # Normalize role to lowercase
275
279
  if not self.validate_message(role, content):
276
280
  raise MessageValidationError("Invalid message role or content")
277
281
 
@@ -284,7 +288,6 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
284
288
  self._compress_history()
285
289
 
286
290
  except Exception as e:
287
- self.logger.error(f"Error adding message: {str(e)}")
288
291
  raise ConversationError(f"Failed to add message: {str(e)}") from e
289
292
 
290
293
  def _append_to_file(self, message: Message) -> None:
@@ -299,17 +302,17 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
299
302
  fh.write(f"\n{role_display}: {message.content}")
300
303
 
301
304
  except Exception as e:
302
- self.logger.error(f"Error writing to file: {str(e)}")
303
305
  raise ConversationError(f"Failed to write to file: {str(e)}") from e
304
306
 
305
307
  def validate_message(self, role: str, content: str) -> bool:
306
308
  """Validate message with enhanced role checking."""
307
309
  valid_roles = {'user', 'assistant', 'tool', 'system'}
308
310
  if role not in valid_roles:
309
- self.logger.error(f"Invalid role: {role}")
310
311
  return False
311
- if not content or not isinstance(content, str):
312
- self.logger.error("Invalid content")
312
+ if not isinstance(content, str):
313
+ return False
314
+ # Allow empty content for assistant (needed for streaming)
315
+ if not content and role != 'assistant':
313
316
  return False
314
317
  return True
315
318
 
@@ -345,7 +348,6 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
345
348
  }
346
349
 
347
350
  except Exception as e:
348
- self.logger.error(f"Error handling tool response: {str(e)}")
349
351
  raise ToolCallError(f"Failed to handle tool response: {str(e)}") from e
350
352
 
351
353
  def _parse_function_call(self, response: str) -> FunctionCallData:
@@ -381,7 +383,6 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
381
383
  raise
382
384
 
383
385
  except Exception as e:
384
- self.logger.error(f"Error parsing function call: {str(e)}")
385
386
  return {"error": str(e)}
386
387
 
387
388
  def execute_function(self, function_call_data: FunctionCallData) -> str:
@@ -405,7 +406,6 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
405
406
  return "; ".join(results)
406
407
 
407
408
  except Exception as e:
408
- self.logger.error(f"Error executing function: {str(e)}")
409
409
  raise ToolCallError(f"Failed to execute function: {str(e)}") from e
410
410
 
411
411
  def get_tools_description(self) -> str:
@@ -428,9 +428,9 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
428
428
  This method adds both the user's prompt and the assistant's response
429
429
  to the conversation history as separate messages.
430
430
  """
431
- # Add user's message
431
+ # Add user's message (normalize role)
432
432
  self.add_message("user", prompt)
433
433
 
434
- # Add assistant's response
434
+ # Add assistant's response (normalize role)
435
435
  self.add_message("assistant", response)
436
436