webscout 6.9__py3-none-any.whl → 7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (51) hide show
  1. webscout/AIbase.py +12 -2
  2. webscout/DWEBS.py +38 -22
  3. webscout/Extra/autocoder/autocoder_utiles.py +68 -7
  4. webscout/Extra/autollama.py +0 -16
  5. webscout/Extra/gguf.py +0 -13
  6. webscout/LLM.py +1 -1
  7. webscout/Provider/AISEARCH/DeepFind.py +251 -0
  8. webscout/Provider/AISEARCH/__init__.py +2 -2
  9. webscout/Provider/AISEARCH/felo_search.py +167 -118
  10. webscout/Provider/Blackboxai.py +136 -137
  11. webscout/Provider/Cloudflare.py +92 -78
  12. webscout/Provider/Deepinfra.py +59 -35
  13. webscout/Provider/Glider.py +222 -0
  14. webscout/Provider/Groq.py +26 -18
  15. webscout/Provider/HF_space/__init__.py +0 -0
  16. webscout/Provider/HF_space/qwen_qwen2.py +206 -0
  17. webscout/Provider/Jadve.py +108 -77
  18. webscout/Provider/Llama3.py +117 -94
  19. webscout/Provider/Marcus.py +65 -10
  20. webscout/Provider/Netwrck.py +61 -49
  21. webscout/Provider/PI.py +77 -122
  22. webscout/Provider/PizzaGPT.py +129 -82
  23. webscout/Provider/TextPollinationsAI.py +229 -0
  24. webscout/Provider/Youchat.py +28 -22
  25. webscout/Provider/__init__.py +12 -4
  26. webscout/Provider/askmyai.py +2 -2
  27. webscout/Provider/chatglm.py +205 -0
  28. webscout/Provider/dgaf.py +215 -0
  29. webscout/Provider/gaurish.py +106 -66
  30. webscout/Provider/hermes.py +219 -0
  31. webscout/Provider/llamatutor.py +72 -62
  32. webscout/Provider/llmchat.py +62 -35
  33. webscout/Provider/meta.py +6 -6
  34. webscout/Provider/multichat.py +205 -104
  35. webscout/Provider/typegpt.py +26 -23
  36. webscout/Provider/yep.py +3 -3
  37. webscout/litagent/__init__.py +3 -146
  38. webscout/litagent/agent.py +120 -0
  39. webscout/litagent/constants.py +31 -0
  40. webscout/tempid.py +0 -4
  41. webscout/version.py +1 -1
  42. webscout/webscout_search.py +1141 -1140
  43. webscout/webscout_search_async.py +635 -635
  44. {webscout-6.9.dist-info → webscout-7.1.dist-info}/METADATA +37 -33
  45. {webscout-6.9.dist-info → webscout-7.1.dist-info}/RECORD +49 -41
  46. {webscout-6.9.dist-info → webscout-7.1.dist-info}/WHEEL +1 -1
  47. webscout/Provider/AISEARCH/ooai.py +0 -155
  48. webscout/Provider/RUBIKSAI.py +0 -272
  49. {webscout-6.9.dist-info → webscout-7.1.dist-info}/LICENSE.md +0 -0
  50. {webscout-6.9.dist-info → webscout-7.1.dist-info}/entry_points.txt +0 -0
  51. {webscout-6.9.dist-info → webscout-7.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,229 @@
1
+
2
+ import requests
3
+ import json
4
+ from typing import Any, Dict, Generator
5
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
6
+ from webscout.AIbase import Provider
7
+ from webscout import exceptions
8
+ from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
9
+ from webscout import LitAgent as Lit
10
+ class TextPollinationsAI(Provider):
11
+ """
12
+ A class to interact with the Pollinations AI API with comprehensive logging.
13
+ """
14
+
15
+ AVAILABLE_MODELS = [
16
+ "openai", "openai-large", "qwen", "qwen-coder", "llama", "mistral",
17
+ "unity", "midijourney", "rtist", "searchgpt", "evil", "deepseek",
18
+ "claude-hybridspace", "deepseek-r1", "llamalight", "llamaguard",
19
+ "gemini", "gemini-thinking", "hormoz"
20
+ ]
21
+
22
+ def __init__(
23
+ self,
24
+ is_conversation: bool = True,
25
+ max_tokens: int = 8096,
26
+ timeout: int = 30,
27
+ intro: str = None,
28
+ filepath: str = None,
29
+ update_file: bool = True,
30
+ proxies: dict = {},
31
+ history_offset: int = 10250,
32
+ act: str = None,
33
+ model: str = "openai-large",
34
+ system_prompt: str = "You are a helpful AI assistant.",
35
+ logging: bool = False
36
+ ):
37
+ """Initializes the TextPollinationsAI API client with logging capabilities."""
38
+ if model not in self.AVAILABLE_MODELS:
39
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
40
+
41
+ self.logger = LitLogger(
42
+ name="TextPollinationsAI",
43
+ format=LogFormat.MODERN_EMOJI,
44
+ color_scheme=ColorScheme.CYBERPUNK
45
+ ) if logging else None
46
+
47
+ if self.logger:
48
+ self.logger.info(f"Initializing TextPollinationsAI with model: {model}")
49
+
50
+ self.session = requests.Session()
51
+ self.is_conversation = is_conversation
52
+ self.max_tokens_to_sample = max_tokens
53
+ self.api_endpoint = "https://text.pollinations.ai/openai"
54
+ self.stream_chunk_size = 64
55
+ self.timeout = timeout
56
+ self.last_response = {}
57
+ self.model = model
58
+ self.system_prompt = system_prompt
59
+
60
+ self.headers = {
61
+ 'Accept': '*/*',
62
+ 'Accept-Language': 'en-US,en;q=0.9',
63
+ 'User-Agent': Lit().random(),
64
+ 'Content-Type': 'application/json',
65
+ }
66
+
67
+ self.session.headers.update(self.headers)
68
+ self.session.proxies = proxies
69
+
70
+ self.__available_optimizers = (
71
+ method for method in dir(Optimizers)
72
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
73
+ )
74
+
75
+ Conversation.intro = (
76
+ AwesomePrompts().get_act(
77
+ act, raise_not_found=True, default=None, case_insensitive=True
78
+ )
79
+ if act
80
+ else intro or Conversation.intro
81
+ )
82
+
83
+ self.conversation = Conversation(
84
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
85
+ )
86
+ self.conversation.history_offset = history_offset
87
+
88
+ if self.logger:
89
+ self.logger.info("TextPollinationsAI initialized successfully")
90
+
91
+ def ask(
92
+ self,
93
+ prompt: str,
94
+ stream: bool = False,
95
+ raw: bool = False,
96
+ optimizer: str = None,
97
+ conversationally: bool = False,
98
+ ) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
99
+ """Chat with AI with logging capabilities"""
100
+ if self.logger:
101
+ self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
102
+ self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
103
+
104
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
105
+ if optimizer:
106
+ if optimizer in self.__available_optimizers:
107
+ conversation_prompt = getattr(Optimizers, optimizer)(
108
+ conversation_prompt if conversationally else prompt
109
+ )
110
+ if self.logger:
111
+ self.logger.debug(f"Applied optimizer: {optimizer}")
112
+ else:
113
+ if self.logger:
114
+ self.logger.error(f"Invalid optimizer requested: {optimizer}")
115
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
116
+
117
+ payload = {
118
+ "messages": [
119
+ {"role": "system", "content": self.system_prompt},
120
+ {"role": "user", "content": conversation_prompt}
121
+ ],
122
+ "model": self.model,
123
+ "stream": stream,
124
+ }
125
+
126
+ def for_stream():
127
+ if self.logger:
128
+ self.logger.debug("Initiating streaming request to API")
129
+
130
+ response = self.session.post(
131
+ self.api_endpoint,
132
+ headers=self.headers,
133
+ json=payload,
134
+ stream=True,
135
+ timeout=self.timeout
136
+ )
137
+
138
+ if not response.ok:
139
+ if self.logger:
140
+ self.logger.error(f"API request failed. Status: {response.status_code}, Reason: {response.reason}")
141
+ raise exceptions.FailedToGenerateResponseError(
142
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
143
+ )
144
+
145
+ if self.logger:
146
+ self.logger.info(f"API connection established successfully. Status: {response.status_code}")
147
+
148
+ full_response = ""
149
+ for line in response.iter_lines():
150
+ if line:
151
+ line = line.decode('utf-8').strip()
152
+ if line == "data: [DONE]":
153
+ if self.logger:
154
+ self.logger.debug("Stream completed")
155
+ break
156
+ if line.startswith('data: '):
157
+ try:
158
+ json_data = json.loads(line[6:])
159
+ if 'choices' in json_data and len(json_data['choices']) > 0:
160
+ choice = json_data['choices'][0]
161
+ if 'delta' in choice and 'content' in choice['delta']:
162
+ content = choice['delta']['content']
163
+ else:
164
+ content = ""
165
+ full_response += content
166
+ yield content if raw else dict(text=content)
167
+ except json.JSONDecodeError as e:
168
+ if self.logger:
169
+ self.logger.error(f"JSON parsing error: {str(e)}")
170
+ continue
171
+
172
+ self.last_response.update(dict(text=full_response))
173
+ self.conversation.update_chat_history(
174
+ prompt, self.get_message(self.last_response)
175
+ )
176
+
177
+ if self.logger:
178
+ self.logger.debug("Response processing completed")
179
+
180
+ def for_non_stream():
181
+ if self.logger:
182
+ self.logger.debug("Processing non-streaming request")
183
+ for _ in for_stream():
184
+ pass
185
+ return self.last_response
186
+
187
+ return for_stream() if stream else for_non_stream()
188
+
189
+ def chat(
190
+ self,
191
+ prompt: str,
192
+ stream: bool = False,
193
+ optimizer: str = None,
194
+ conversationally: bool = False,
195
+ ) -> str | Generator[str, None, None]:
196
+ """Generate response as a string with logging"""
197
+ if self.logger:
198
+ self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
199
+
200
+ def for_stream():
201
+ for response in self.ask(
202
+ prompt, True, optimizer=optimizer, conversationally=conversationally
203
+ ):
204
+ yield self.get_message(response)
205
+
206
+ def for_non_stream():
207
+ return self.get_message(
208
+ self.ask(
209
+ prompt,
210
+ False,
211
+ optimizer=optimizer,
212
+ conversationally=conversationally,
213
+ )
214
+ )
215
+
216
+ return for_stream() if stream else for_non_stream()
217
+
218
+ def get_message(self, response: dict) -> str:
219
+ """Retrieves message only from response"""
220
+ assert isinstance(response, dict), "Response should be of dict data-type only"
221
+ return response["text"]
222
+
223
+ if __name__ == "__main__":
224
+ from rich import print
225
+ # Enable logging for testing
226
+ ai = TextPollinationsAI(model="deepseek-r1", logging=True)
227
+ response = ai.chat(input(">>> "), stream=True)
228
+ for chunk in response:
229
+ print(chunk, end="", flush=True)
@@ -16,31 +16,33 @@ class YouChat(Provider):
16
16
  This class provides methods for interacting with the You.com chat API in a consistent provider structure.
17
17
  """
18
18
 
19
+ # Updated available models based on provided "aiModels" list
19
20
  AVAILABLE_MODELS = [
21
+ "openai_o3_mini_high",
22
+ "openai_o3_mini_medium",
20
23
  "openai_o1",
24
+ "openai_o1_preview",
21
25
  "openai_o1_mini",
22
26
  "gpt_4o_mini",
23
27
  "gpt_4o",
24
28
  "gpt_4_turbo",
25
29
  "gpt_4",
30
+ "grok_2",
26
31
  "claude_3_5_sonnet",
27
32
  "claude_3_opus",
28
33
  "claude_3_sonnet",
29
34
  "claude_3_5_haiku",
30
- "claude_3_haiku",
35
+ "deepseek_r1",
36
+ "deepseek_v3",
31
37
  "llama3_3_70b",
32
38
  "llama3_2_90b",
33
- "llama3_2_11b",
34
39
  "llama3_1_405b",
35
- "llama3_1_70b",
36
- "llama3",
37
40
  "mistral_large_2",
38
41
  "gemini_1_5_flash",
39
42
  "gemini_1_5_pro",
40
43
  "databricks_dbrx_instruct",
41
44
  "qwen2p5_72b",
42
45
  "qwen2p5_coder_32b",
43
- "command_r",
44
46
  "command_r_plus",
45
47
  "solar_1_mini",
46
48
  "dolphin_2_5"
@@ -156,22 +158,25 @@ class YouChat(Provider):
156
158
  )
157
159
 
158
160
  payload = {
159
- "q": conversation_prompt,
160
- "page": 2,
161
- "count": 20,
162
- "safeSearch": "Moderate",
163
- "mkt": "en-IN",
164
- "domain": "youchat",
165
- "use_personalization_extraction": "false",
166
- "queryTraceId": str(uuid4()),
167
- "chatId": str(uuid4()),
168
- "conversationTurnId": str(uuid4()),
169
- "pastChatLength": 0,
170
- "isSmallMediumDevice": "true",
171
- "selectedChatMode": self.model, # Use the selected model
172
- "traceId": str(uuid4()),
173
- "chat": "[]"
174
- }
161
+ "q": conversation_prompt,
162
+ "page": 1,
163
+ "count": 10,
164
+ "safeSearch": "Moderate",
165
+ "mkt": "en-IN",
166
+ "enable_workflow_generation_ux": "true",
167
+ "domain": "youchat",
168
+ "use_personalization_extraction": "false",
169
+ "enable_agent_clarification_questions": "true",
170
+ "queryTraceId": str(uuid4()),
171
+ "chatId": str(uuid4()),
172
+ "conversationTurnId": str(uuid4()),
173
+ "pastChatLength": 0,
174
+ "isSmallMediumDevice": "true",
175
+ "selectedChatMode": self.model,
176
+ "use_nested_youchat_updates": "true",
177
+ "traceId": str(uuid4()),
178
+ "chat": "[]"
179
+ }
175
180
 
176
181
  def for_stream():
177
182
  response = self.session.get(
@@ -255,9 +260,10 @@ class YouChat(Provider):
255
260
  """
256
261
  assert isinstance(response, dict), "Response should be of dict data-type only"
257
262
  return response["text"]
263
+
258
264
  if __name__ == '__main__':
259
265
  from rich import print
260
266
  ai = YouChat(timeout=5000)
261
267
  response = ai.chat("hi", stream=True)
262
268
  for chunk in response:
263
- print(chunk, end="", flush=True)
269
+ print(chunk, end="", flush=True)
@@ -22,7 +22,6 @@ from .PizzaGPT import *
22
22
  from .Llama3 import *
23
23
  from .DARKAI import *
24
24
  from .koala import *
25
- from .RUBIKSAI import *
26
25
  from .meta import *
27
26
  from .DiscordRocks import *
28
27
  from .julius import *
@@ -60,8 +59,16 @@ from .Marcus import *
60
59
  from .typegpt import *
61
60
  from .multichat import *
62
61
  from .Jadve import *
62
+ from .chatglm import *
63
+ from .hermes import *
64
+ from .TextPollinationsAI import *
65
+ from .Glider import *
66
+ from .dgaf import *
63
67
  __all__ = [
64
- 'LLAMA',
68
+ 'LLAMA',
69
+ 'DGAFAI',
70
+ 'TextPollinationsAI',
71
+ 'GliderAI',
65
72
  'Cohere',
66
73
  'REKA',
67
74
  'GROQ',
@@ -79,10 +86,9 @@ __all__ = [
79
86
  'OLLAMA',
80
87
  'AndiSearch',
81
88
  'PIZZAGPT',
82
- 'LLAMA3',
89
+ 'Sambanova',
83
90
  'DARKAI',
84
91
  'KOALA',
85
- 'RUBIKSAI',
86
92
  'Meta',
87
93
  'AskMyAI',
88
94
  'DiscordRocks',
@@ -123,4 +129,6 @@ __all__ = [
123
129
  'Netwrck',
124
130
  'MultiChatAI',
125
131
  'JadveOpenAI',
132
+ 'ChatGLM',
133
+ 'NousHermes',
126
134
  ]
@@ -8,7 +8,7 @@ from webscout.AIutel import Conversation
8
8
  from webscout.AIutel import AwesomePrompts
9
9
  from webscout.AIbase import Provider
10
10
  from webscout import exceptions
11
- from fake_useragent import UserAgent
11
+ from webscout import LitAgent as UserAgent
12
12
 
13
13
  class AskMyAI(Provider):
14
14
  """
@@ -41,7 +41,7 @@ class AskMyAI(Provider):
41
41
  "Accept": "*/*",
42
42
  "Accept-Encoding": "gzip, deflate, br",
43
43
  "Accept-Language": "en-US,en;q=0.9",
44
- 'user-agent': UserAgent().random
44
+ 'user-agent': UserAgent().random()
45
45
  }
46
46
  self.__available_optimizers = (
47
47
  method
@@ -0,0 +1,205 @@
1
+ import requests
2
+ import json
3
+ from typing import Any, Dict, Optional, Generator, List, Union
4
+ import uuid
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts
9
+ from webscout.AIbase import Provider
10
+ from webscout import exceptions
11
+
12
+
13
+ class ChatGLM(Provider):
14
+ """
15
+ A class to interact with the ChatGLM API.
16
+ """
17
+
18
+ def __init__(
19
+ self,
20
+ is_conversation: bool = True,
21
+ max_tokens: int = 600,
22
+ timeout: int = 30,
23
+ intro: str = None,
24
+ filepath: str = None,
25
+ update_file: bool = True,
26
+ proxies: dict = {},
27
+ history_offset: int = 10250,
28
+ act: str = None,
29
+ model: str = "all-tools-230b",
30
+ ):
31
+ """Initializes the ChatGLM API client."""
32
+ self.session = requests.Session()
33
+ self.is_conversation = is_conversation
34
+ self.max_tokens_to_sample = max_tokens
35
+ self.api_endpoint = "https://chatglm.cn/chatglm/mainchat-api/guest/stream"
36
+ self.stream_chunk_size = 64
37
+ self.timeout = timeout
38
+ self.last_response = {}
39
+ self.model = model
40
+ self.headers = {
41
+ 'Accept-Language': 'en-US,en;q=0.9',
42
+ 'App-Name': 'chatglm',
43
+ 'Authorization': 'undefined',
44
+ 'Content-Type': 'application/json',
45
+ 'Origin': 'https://chatglm.cn',
46
+ 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
47
+ 'X-App-Platform': 'pc',
48
+ 'X-App-Version': '0.0.1',
49
+ 'X-Device-Id': '', #Will be generated each time
50
+ 'Accept': 'text/event-stream',
51
+ }
52
+ self.__available_optimizers = (
53
+ method
54
+ for method in dir(Optimizers)
55
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
56
+ )
57
+ self.session.headers.update(self.headers)
58
+ Conversation.intro = (
59
+ AwesomePrompts().get_act(
60
+ act, raise_not_found=True, default=None, case_insensitive=True
61
+ )
62
+ if act
63
+ else intro or Conversation.intro
64
+ )
65
+ self.conversation = Conversation(
66
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
67
+ )
68
+ self.conversation.history_offset = history_offset
69
+ self.session.proxies = proxies
70
+
71
+ def ask(
72
+ self,
73
+ prompt: str,
74
+ stream: bool = False,
75
+ raw: bool = False,
76
+ optimizer: str = None,
77
+ conversationally: bool = False,
78
+ ) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
79
+ """Chat with AI
80
+ Args:
81
+ prompt (str): Prompt to be sent.
82
+ stream (bool, optional): Flag for streaming response. Defaults to False.
83
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
84
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
85
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
86
+ Returns:
87
+ Union[Dict, Generator[Dict, None, None]]: Response generated
88
+ """
89
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
90
+ if optimizer:
91
+ if optimizer in self.__available_optimizers:
92
+ conversation_prompt = getattr(Optimizers, optimizer)(
93
+ conversation_prompt if conversationally else prompt
94
+ )
95
+ else:
96
+ raise exceptions.FailedToGenerateResponseError(
97
+ f"Optimizer is not one of {self.__available_optimizers}"
98
+ )
99
+ device_id = str(uuid.uuid4()).replace('-', '')
100
+ self.session.headers.update({'X-Device-Id': device_id})
101
+ payload = {
102
+ "assistant_id": "65940acff94777010aa6b796",
103
+ "conversation_id": "",
104
+ "meta_data": {
105
+ "if_plus_model": False,
106
+ "is_test": False,
107
+ "input_question_type": "xxxx",
108
+ "channel": "",
109
+ "draft_id": "",
110
+ "quote_log_id": "",
111
+ "platform": "pc",
112
+ },
113
+ "messages": [
114
+ {
115
+ "role": "user",
116
+ "content": [{"type": "text", "text": conversation_prompt}],
117
+ }
118
+ ],
119
+ }
120
+
121
+ def for_stream():
122
+ try:
123
+ with self.session.post(
124
+ self.api_endpoint, json=payload, stream=True, timeout=self.timeout
125
+ ) as response:
126
+ response.raise_for_status()
127
+
128
+ streaming_text = ""
129
+ last_processed_content = "" # Track the last processed content
130
+ for chunk in response.iter_lines():
131
+ if chunk:
132
+ decoded_chunk = chunk.decode('utf-8')
133
+ if decoded_chunk.startswith('data: '):
134
+ try:
135
+ json_data = json.loads(decoded_chunk[6:])
136
+ parts = json_data.get('parts', [])
137
+ if parts:
138
+ content = parts[0].get('content', [])
139
+ if content:
140
+ text = content[0].get('text', '')
141
+ new_text = text[len(last_processed_content):]
142
+ if new_text: # Check for new content
143
+ streaming_text += new_text
144
+ last_processed_content = text
145
+ yield new_text if raw else dict(text=new_text)
146
+ except json.JSONDecodeError:
147
+ continue
148
+
149
+ self.last_response.update(dict(text=streaming_text))
150
+ self.conversation.update_chat_history(
151
+ prompt, self.get_message(self.last_response)
152
+ )
153
+
154
+ except requests.exceptions.RequestException as e:
155
+ raise exceptions.ProviderConnectionError(f"Request failed: {e}")
156
+ except json.JSONDecodeError as e:
157
+ raise exceptions.InvalidResponseError(f"Failed to decode JSON: {e}")
158
+ except Exception as e:
159
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred: {e}")
160
+
161
+ def for_non_stream():
162
+ for _ in for_stream():
163
+ pass
164
+ return self.last_response
165
+ return for_stream() if stream else for_non_stream()
166
+
167
+ def chat(
168
+ self,
169
+ prompt: str,
170
+ stream: bool = False,
171
+ optimizer: str = None,
172
+ conversationally: bool = False,
173
+ ) -> str | Generator[str, None, None]:
174
+ """Generate response `str`"""
175
+
176
+ def for_stream():
177
+ for response in self.ask(
178
+ prompt, True, optimizer=optimizer, conversationally=conversationally
179
+ ):
180
+ yield self.get_message(response)
181
+
182
+ def for_non_stream():
183
+ return self.get_message(
184
+ self.ask(
185
+ prompt,
186
+ False,
187
+ optimizer=optimizer,
188
+ conversationally=conversationally,
189
+ )
190
+ )
191
+
192
+ return for_stream() if stream else for_non_stream()
193
+
194
+ def get_message(self, response: dict) -> str:
195
+ """Retrieves message only from response"""
196
+ assert isinstance(response, dict), "Response should be of dict data-type only"
197
+ return response["text"]
198
+
199
+
200
+ if __name__ == "__main__":
201
+ from rich import print
202
+ ai = ChatGLM()
203
+ response = ai.chat(input(">>> "), stream=True)
204
+ for chunk in response:
205
+ print(chunk, end="", flush=True)