webscout 7.1__py3-none-any.whl → 7.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (154) hide show
  1. webscout/AIauto.py +191 -191
  2. webscout/AIbase.py +122 -122
  3. webscout/AIutel.py +440 -440
  4. webscout/Bard.py +343 -161
  5. webscout/DWEBS.py +489 -492
  6. webscout/Extra/YTToolkit/YTdownloader.py +995 -995
  7. webscout/Extra/YTToolkit/__init__.py +2 -2
  8. webscout/Extra/YTToolkit/transcriber.py +476 -479
  9. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  10. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  11. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  12. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  13. webscout/Extra/YTToolkit/ytapi/video.py +103 -103
  14. webscout/Extra/autocoder/__init__.py +9 -9
  15. webscout/Extra/autocoder/autocoder_utiles.py +199 -199
  16. webscout/Extra/autocoder/rawdog.py +5 -7
  17. webscout/Extra/autollama.py +230 -230
  18. webscout/Extra/gguf.py +3 -3
  19. webscout/Extra/weather.py +171 -171
  20. webscout/LLM.py +442 -442
  21. webscout/Litlogger/__init__.py +67 -681
  22. webscout/Litlogger/core/__init__.py +6 -0
  23. webscout/Litlogger/core/level.py +23 -0
  24. webscout/Litlogger/core/logger.py +166 -0
  25. webscout/Litlogger/handlers/__init__.py +12 -0
  26. webscout/Litlogger/handlers/console.py +33 -0
  27. webscout/Litlogger/handlers/file.py +143 -0
  28. webscout/Litlogger/handlers/network.py +173 -0
  29. webscout/Litlogger/styles/__init__.py +7 -0
  30. webscout/Litlogger/styles/colors.py +249 -0
  31. webscout/Litlogger/styles/formats.py +460 -0
  32. webscout/Litlogger/styles/text.py +87 -0
  33. webscout/Litlogger/utils/__init__.py +6 -0
  34. webscout/Litlogger/utils/detectors.py +154 -0
  35. webscout/Litlogger/utils/formatters.py +200 -0
  36. webscout/Provider/AISEARCH/DeepFind.py +250 -250
  37. webscout/Provider/AISEARCH/ISou.py +277 -0
  38. webscout/Provider/AISEARCH/__init__.py +2 -1
  39. webscout/Provider/Blackboxai.py +3 -3
  40. webscout/Provider/ChatGPTGratis.py +226 -0
  41. webscout/Provider/Cloudflare.py +3 -4
  42. webscout/Provider/DeepSeek.py +218 -0
  43. webscout/Provider/Deepinfra.py +40 -24
  44. webscout/Provider/Free2GPT.py +131 -124
  45. webscout/Provider/Gemini.py +100 -115
  46. webscout/Provider/Glider.py +3 -3
  47. webscout/Provider/Groq.py +5 -1
  48. webscout/Provider/Jadve.py +3 -3
  49. webscout/Provider/Marcus.py +191 -192
  50. webscout/Provider/Netwrck.py +3 -3
  51. webscout/Provider/PI.py +2 -2
  52. webscout/Provider/PizzaGPT.py +2 -3
  53. webscout/Provider/QwenLM.py +311 -0
  54. webscout/Provider/TTI/AiForce/__init__.py +22 -22
  55. webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
  56. webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
  57. webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -0
  58. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +206 -0
  59. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +192 -0
  60. webscout/Provider/TTI/Nexra/__init__.py +22 -22
  61. webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
  62. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
  63. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
  64. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
  65. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
  66. webscout/Provider/TTI/__init__.py +2 -1
  67. webscout/Provider/TTI/artbit/__init__.py +22 -22
  68. webscout/Provider/TTI/artbit/async_artbit.py +184 -184
  69. webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
  70. webscout/Provider/TTI/blackbox/__init__.py +4 -4
  71. webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
  72. webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
  73. webscout/Provider/TTI/deepinfra/__init__.py +4 -4
  74. webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
  75. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
  76. webscout/Provider/TTI/huggingface/__init__.py +22 -22
  77. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
  78. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
  79. webscout/Provider/TTI/imgninza/__init__.py +4 -4
  80. webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
  81. webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
  82. webscout/Provider/TTI/talkai/__init__.py +4 -4
  83. webscout/Provider/TTI/talkai/async_talkai.py +229 -229
  84. webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
  85. webscout/Provider/TTS/deepgram.py +182 -182
  86. webscout/Provider/TTS/elevenlabs.py +136 -136
  87. webscout/Provider/TTS/gesserit.py +150 -150
  88. webscout/Provider/TTS/murfai.py +138 -138
  89. webscout/Provider/TTS/parler.py +133 -134
  90. webscout/Provider/TTS/streamElements.py +360 -360
  91. webscout/Provider/TTS/utils.py +280 -280
  92. webscout/Provider/TTS/voicepod.py +116 -116
  93. webscout/Provider/TextPollinationsAI.py +28 -8
  94. webscout/Provider/WiseCat.py +193 -0
  95. webscout/Provider/__init__.py +146 -134
  96. webscout/Provider/cerebras.py +242 -227
  97. webscout/Provider/chatglm.py +204 -204
  98. webscout/Provider/dgaf.py +2 -3
  99. webscout/Provider/freeaichat.py +221 -0
  100. webscout/Provider/gaurish.py +2 -3
  101. webscout/Provider/geminiapi.py +208 -208
  102. webscout/Provider/granite.py +223 -0
  103. webscout/Provider/hermes.py +218 -218
  104. webscout/Provider/llama3mitril.py +179 -179
  105. webscout/Provider/llamatutor.py +3 -3
  106. webscout/Provider/llmchat.py +2 -3
  107. webscout/Provider/meta.py +794 -794
  108. webscout/Provider/multichat.py +331 -331
  109. webscout/Provider/typegpt.py +359 -359
  110. webscout/Provider/yep.py +3 -3
  111. webscout/__init__.py +1 -0
  112. webscout/__main__.py +5 -5
  113. webscout/cli.py +319 -319
  114. webscout/conversation.py +241 -242
  115. webscout/exceptions.py +328 -328
  116. webscout/litagent/__init__.py +28 -28
  117. webscout/litagent/agent.py +2 -3
  118. webscout/litprinter/__init__.py +0 -58
  119. webscout/scout/__init__.py +8 -8
  120. webscout/scout/core.py +884 -884
  121. webscout/scout/element.py +459 -459
  122. webscout/scout/parsers/__init__.py +69 -69
  123. webscout/scout/parsers/html5lib_parser.py +172 -172
  124. webscout/scout/parsers/html_parser.py +236 -236
  125. webscout/scout/parsers/lxml_parser.py +178 -178
  126. webscout/scout/utils.py +38 -38
  127. webscout/swiftcli/__init__.py +811 -811
  128. webscout/update_checker.py +2 -12
  129. webscout/version.py +1 -1
  130. webscout/webscout_search.py +87 -6
  131. webscout/webscout_search_async.py +58 -1
  132. webscout/yep_search.py +297 -0
  133. webscout/zeroart/__init__.py +54 -54
  134. webscout/zeroart/base.py +60 -60
  135. webscout/zeroart/effects.py +99 -99
  136. webscout/zeroart/fonts.py +816 -816
  137. {webscout-7.1.dist-info → webscout-7.3.dist-info}/METADATA +62 -22
  138. webscout-7.3.dist-info/RECORD +223 -0
  139. {webscout-7.1.dist-info → webscout-7.3.dist-info}/WHEEL +1 -1
  140. webstoken/__init__.py +30 -30
  141. webstoken/classifier.py +189 -189
  142. webstoken/keywords.py +216 -216
  143. webstoken/language.py +128 -128
  144. webstoken/ner.py +164 -164
  145. webstoken/normalizer.py +35 -35
  146. webstoken/processor.py +77 -77
  147. webstoken/sentiment.py +206 -206
  148. webstoken/stemmer.py +73 -73
  149. webstoken/tagger.py +60 -60
  150. webstoken/tokenizer.py +158 -158
  151. webscout-7.1.dist-info/RECORD +0 -198
  152. {webscout-7.1.dist-info → webscout-7.3.dist-info}/LICENSE.md +0 -0
  153. {webscout-7.1.dist-info → webscout-7.3.dist-info}/entry_points.txt +0 -0
  154. {webscout-7.1.dist-info → webscout-7.3.dist-info}/top_level.txt +0 -0
@@ -1,219 +1,219 @@
1
- import requests
2
- import json
3
- from typing import Any, Dict, Generator, Optional
4
-
5
- from webscout.AIutel import Optimizers
6
- from webscout.AIutel import Conversation
7
- from webscout.AIutel import AwesomePrompts
8
- from webscout.AIbase import Provider
9
- from webscout import exceptions
10
-
11
- class NousHermes(Provider):
12
- """
13
- A class to interact with the Hermes API.
14
- """
15
-
16
- AVAILABLE_MODELS = ["Hermes-3-Llama-3.1-70B", "Hermes-3-Llama-3.1-8B"]
17
-
18
- def __init__(
19
- self,
20
- cookies_path: str,
21
- is_conversation: bool = True,
22
- max_tokens: int = 8000,
23
- timeout: int = 30,
24
- intro: str = None,
25
- filepath: str = None,
26
- update_file: bool = True,
27
- proxies: dict = {},
28
- history_offset: int = 10250,
29
- act: str = None,
30
- model: str = "Hermes-3-Llama-3.1-70B",
31
- system_prompt: str = "You are a helpful AI assistant.",
32
- temperature: float = 0.7,
33
- top_p: float = 0.9,
34
- ):
35
- """Initializes the Hermes API client."""
36
- if model not in self.AVAILABLE_MODELS:
37
- raise ValueError(
38
- f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}"
39
- )
40
-
41
- self.session = requests.Session()
42
- self.is_conversation = is_conversation
43
- self.max_tokens_to_sample = max_tokens
44
- self.timeout = timeout
45
- self.last_response = {}
46
- self.model = model
47
- self.system_prompt = system_prompt
48
- self.api_endpoint = "https://hermes.nousresearch.com/api/chat"
49
- self.temperature = temperature
50
- self.top_p = top_p
51
- self.cookies_path = cookies_path
52
- self.cookies = self._load_cookies()
53
- self.headers = {
54
- 'accept': '*/*',
55
- 'accept-language': 'en-US,en;q=0.9',
56
- 'content-type': 'application/json',
57
- 'origin': 'https://hermes.nousresearch.com',
58
- 'referer': 'https://hermes.nousresearch.com/',
59
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36',
60
- 'cookie': self.cookies
61
- }
62
-
63
- self.__available_optimizers = (
64
- method
65
- for method in dir(Optimizers)
66
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
67
- )
68
- self.session.headers.update(self.headers)
69
- Conversation.intro = (
70
- AwesomePrompts().get_act(
71
- act, raise_not_found=True, default=None, case_insensitive=True
72
- )
73
- if act
74
- else intro or Conversation.intro
75
- )
76
- self.conversation = Conversation(
77
- is_conversation, self.max_tokens_to_sample, filepath, update_file
78
- )
79
- self.conversation.history_offset = history_offset
80
- self.session.proxies = proxies
81
-
82
- def _load_cookies(self) -> Optional[str]:
83
- """Load cookies from a JSON file and convert them to a string."""
84
- try:
85
- with open(self.cookies_path, 'r') as f:
86
- cookies_data = json.load(f)
87
- return '; '.join([f"{cookie['name']}={cookie['value']}" for cookie in cookies_data])
88
- except FileNotFoundError:
89
- print("Error: cookies.json file not found!")
90
- return None
91
- except json.JSONDecodeError:
92
- print("Error: Invalid JSON format in cookies.json!")
93
- return None
94
-
95
- def ask(
96
- self,
97
- prompt: str,
98
- stream: bool = False,
99
- raw: bool = False,
100
- optimizer: str = None,
101
- conversationally: bool = False,
102
- ) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
103
- """Chat with AI
104
- Args:
105
- prompt (str): Prompt to be send.
106
- stream (bool, optional): Flag for streaming response. Defaults to False.
107
- raw (bool, optional): Stream back raw response as received. Defaults to False.
108
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
109
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
110
- Returns:
111
- dict|AsyncGenerator : ai content
112
- ```json
113
- {
114
- "text" : "How may I assist you today?"
115
- }
116
- ```
117
- """
118
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
119
- if optimizer:
120
- if optimizer in self.__available_optimizers:
121
- conversation_prompt = getattr(Optimizers, optimizer)(
122
- conversation_prompt if conversationally else prompt
123
- )
124
- else:
125
- raise exceptions.FailedToGenerateResponseError(
126
- f"Optimizer is not one of {self.__available_optimizers}"
127
- )
128
-
129
- payload = {
130
- "messages": [{"role": "system", "content": self.system_prompt}, {"role": "user", "content": conversation_prompt}],
131
- "model": self.model,
132
- "max_tokens": self.max_tokens_to_sample,
133
- "temperature": self.temperature,
134
- "top_p": self.top_p,
135
- }
136
- def for_stream():
137
- response = self.session.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout)
138
- if not response.ok:
139
- raise exceptions.FailedToGenerateResponseError(
140
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
141
- )
142
- full_response = ""
143
- for line in response.iter_lines():
144
- if line:
145
- decoded_line = line.decode('utf-8').replace('data: ', '')
146
- try:
147
- data = json.loads(decoded_line)
148
- if data['type'] == 'llm_response':
149
- content = data['content']
150
- full_response += content
151
- yield content if raw else dict(text=content)
152
- except json.JSONDecodeError:
153
- continue
154
- self.last_response.update(dict(text=full_response))
155
- self.conversation.update_chat_history(
156
- prompt, self.get_message(self.last_response)
157
- )
158
-
159
- def for_non_stream():
160
- for _ in for_stream():
161
- pass
162
- return self.last_response
163
-
164
- return for_stream() if stream else for_non_stream()
165
-
166
- def chat(
167
- self,
168
- prompt: str,
169
- stream: bool = False,
170
- optimizer: str = None,
171
- conversationally: bool = False,
172
- ) -> str | Generator[str, None, None]:
173
- """Generate response `str`
174
- Args:
175
- prompt (str): Prompt to be send.
176
- stream (bool, optional): Flag for streaming response. Defaults to False.
177
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
178
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
179
- Returns:
180
- str: Response generated
181
- """
182
-
183
- def for_stream():
184
- for response in self.ask(
185
- prompt, True, optimizer=optimizer, conversationally=conversationally
186
- ):
187
- yield self.get_message(response)
188
-
189
- def for_non_stream():
190
- return self.get_message(
191
- self.ask(
192
- prompt,
193
- False,
194
- optimizer=optimizer,
195
- conversationally=conversationally,
196
- )
197
- )
198
-
199
- return for_stream() if stream else for_non_stream()
200
-
201
- def get_message(self, response: dict) -> str:
202
- """Retrieves message only from response
203
-
204
- Args:
205
- response (dict): Response generated by `self.ask`
206
-
207
- Returns:
208
- str: Message extracted
209
- """
210
- assert isinstance(response, dict), "Response should be of dict data-type only"
211
- return response["text"]
212
-
213
-
214
- if __name__ == "__main__":
215
- from rich import print
216
- ai = NousHermes(cookies_path="cookies.json")
217
- response = ai.chat(input(">>> "), stream=True)
218
- for chunk in response:
1
+ import requests
2
+ import json
3
+ from typing import Any, Dict, Generator, Optional
4
+
5
+ from webscout.AIutel import Optimizers
6
+ from webscout.AIutel import Conversation
7
+ from webscout.AIutel import AwesomePrompts
8
+ from webscout.AIbase import Provider
9
+ from webscout import exceptions
10
+
11
+ class NousHermes(Provider):
12
+ """
13
+ A class to interact with the Hermes API.
14
+ """
15
+
16
+ AVAILABLE_MODELS = ["Hermes-3-Llama-3.1-70B", "Hermes-3-Llama-3.1-8B"]
17
+
18
+ def __init__(
19
+ self,
20
+ cookies_path: str,
21
+ is_conversation: bool = True,
22
+ max_tokens: int = 8000,
23
+ timeout: int = 30,
24
+ intro: str = None,
25
+ filepath: str = None,
26
+ update_file: bool = True,
27
+ proxies: dict = {},
28
+ history_offset: int = 10250,
29
+ act: str = None,
30
+ model: str = "Hermes-3-Llama-3.1-70B",
31
+ system_prompt: str = "You are a helpful AI assistant.",
32
+ temperature: float = 0.7,
33
+ top_p: float = 0.9,
34
+ ):
35
+ """Initializes the Hermes API client."""
36
+ if model not in self.AVAILABLE_MODELS:
37
+ raise ValueError(
38
+ f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}"
39
+ )
40
+
41
+ self.session = requests.Session()
42
+ self.is_conversation = is_conversation
43
+ self.max_tokens_to_sample = max_tokens
44
+ self.timeout = timeout
45
+ self.last_response = {}
46
+ self.model = model
47
+ self.system_prompt = system_prompt
48
+ self.api_endpoint = "https://hermes.nousresearch.com/api/chat"
49
+ self.temperature = temperature
50
+ self.top_p = top_p
51
+ self.cookies_path = cookies_path
52
+ self.cookies = self._load_cookies()
53
+ self.headers = {
54
+ 'accept': '*/*',
55
+ 'accept-language': 'en-US,en;q=0.9',
56
+ 'content-type': 'application/json',
57
+ 'origin': 'https://hermes.nousresearch.com',
58
+ 'referer': 'https://hermes.nousresearch.com/',
59
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36',
60
+ 'cookie': self.cookies
61
+ }
62
+
63
+ self.__available_optimizers = (
64
+ method
65
+ for method in dir(Optimizers)
66
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
67
+ )
68
+ self.session.headers.update(self.headers)
69
+ Conversation.intro = (
70
+ AwesomePrompts().get_act(
71
+ act, raise_not_found=True, default=None, case_insensitive=True
72
+ )
73
+ if act
74
+ else intro or Conversation.intro
75
+ )
76
+ self.conversation = Conversation(
77
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
78
+ )
79
+ self.conversation.history_offset = history_offset
80
+ self.session.proxies = proxies
81
+
82
+ def _load_cookies(self) -> Optional[str]:
83
+ """Load cookies from a JSON file and convert them to a string."""
84
+ try:
85
+ with open(self.cookies_path, 'r') as f:
86
+ cookies_data = json.load(f)
87
+ return '; '.join([f"{cookie['name']}={cookie['value']}" for cookie in cookies_data])
88
+ except FileNotFoundError:
89
+ print("Error: cookies.json file not found!")
90
+ return None
91
+ except json.JSONDecodeError:
92
+ print("Error: Invalid JSON format in cookies.json!")
93
+ return None
94
+
95
+ def ask(
96
+ self,
97
+ prompt: str,
98
+ stream: bool = False,
99
+ raw: bool = False,
100
+ optimizer: str = None,
101
+ conversationally: bool = False,
102
+ ) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
103
+ """Chat with AI
104
+ Args:
105
+ prompt (str): Prompt to be send.
106
+ stream (bool, optional): Flag for streaming response. Defaults to False.
107
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
108
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
109
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
110
+ Returns:
111
+ dict|AsyncGenerator : ai content
112
+ ```json
113
+ {
114
+ "text" : "How may I assist you today?"
115
+ }
116
+ ```
117
+ """
118
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
119
+ if optimizer:
120
+ if optimizer in self.__available_optimizers:
121
+ conversation_prompt = getattr(Optimizers, optimizer)(
122
+ conversation_prompt if conversationally else prompt
123
+ )
124
+ else:
125
+ raise exceptions.FailedToGenerateResponseError(
126
+ f"Optimizer is not one of {self.__available_optimizers}"
127
+ )
128
+
129
+ payload = {
130
+ "messages": [{"role": "system", "content": self.system_prompt}, {"role": "user", "content": conversation_prompt}],
131
+ "model": self.model,
132
+ "max_tokens": self.max_tokens_to_sample,
133
+ "temperature": self.temperature,
134
+ "top_p": self.top_p,
135
+ }
136
+ def for_stream():
137
+ response = self.session.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout)
138
+ if not response.ok:
139
+ raise exceptions.FailedToGenerateResponseError(
140
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
141
+ )
142
+ full_response = ""
143
+ for line in response.iter_lines():
144
+ if line:
145
+ decoded_line = line.decode('utf-8').replace('data: ', '')
146
+ try:
147
+ data = json.loads(decoded_line)
148
+ if data['type'] == 'llm_response':
149
+ content = data['content']
150
+ full_response += content
151
+ yield content if raw else dict(text=content)
152
+ except json.JSONDecodeError:
153
+ continue
154
+ self.last_response.update(dict(text=full_response))
155
+ self.conversation.update_chat_history(
156
+ prompt, self.get_message(self.last_response)
157
+ )
158
+
159
+ def for_non_stream():
160
+ for _ in for_stream():
161
+ pass
162
+ return self.last_response
163
+
164
+ return for_stream() if stream else for_non_stream()
165
+
166
+ def chat(
167
+ self,
168
+ prompt: str,
169
+ stream: bool = False,
170
+ optimizer: str = None,
171
+ conversationally: bool = False,
172
+ ) -> str | Generator[str, None, None]:
173
+ """Generate response `str`
174
+ Args:
175
+ prompt (str): Prompt to be send.
176
+ stream (bool, optional): Flag for streaming response. Defaults to False.
177
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
178
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
179
+ Returns:
180
+ str: Response generated
181
+ """
182
+
183
+ def for_stream():
184
+ for response in self.ask(
185
+ prompt, True, optimizer=optimizer, conversationally=conversationally
186
+ ):
187
+ yield self.get_message(response)
188
+
189
+ def for_non_stream():
190
+ return self.get_message(
191
+ self.ask(
192
+ prompt,
193
+ False,
194
+ optimizer=optimizer,
195
+ conversationally=conversationally,
196
+ )
197
+ )
198
+
199
+ return for_stream() if stream else for_non_stream()
200
+
201
+ def get_message(self, response: dict) -> str:
202
+ """Retrieves message only from response
203
+
204
+ Args:
205
+ response (dict): Response generated by `self.ask`
206
+
207
+ Returns:
208
+ str: Message extracted
209
+ """
210
+ assert isinstance(response, dict), "Response should be of dict data-type only"
211
+ return response["text"]
212
+
213
+
214
+ if __name__ == "__main__":
215
+ from rich import print
216
+ ai = NousHermes(cookies_path="cookies.json")
217
+ response = ai.chat(input(">>> "), stream=True)
218
+ for chunk in response:
219
219
  print(chunk, end="", flush=True)