webscout 7.1__py3-none-any.whl → 7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (144) hide show
  1. webscout/AIauto.py +191 -191
  2. webscout/AIbase.py +122 -122
  3. webscout/AIutel.py +440 -440
  4. webscout/Bard.py +343 -161
  5. webscout/DWEBS.py +489 -492
  6. webscout/Extra/YTToolkit/YTdownloader.py +995 -995
  7. webscout/Extra/YTToolkit/__init__.py +2 -2
  8. webscout/Extra/YTToolkit/transcriber.py +476 -479
  9. webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
  10. webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
  11. webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
  12. webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
  13. webscout/Extra/YTToolkit/ytapi/video.py +103 -103
  14. webscout/Extra/autocoder/__init__.py +9 -9
  15. webscout/Extra/autocoder/autocoder_utiles.py +199 -199
  16. webscout/Extra/autocoder/rawdog.py +5 -7
  17. webscout/Extra/autollama.py +230 -230
  18. webscout/Extra/gguf.py +3 -3
  19. webscout/Extra/weather.py +171 -171
  20. webscout/LLM.py +442 -442
  21. webscout/Litlogger/__init__.py +67 -681
  22. webscout/Litlogger/core/__init__.py +6 -0
  23. webscout/Litlogger/core/level.py +20 -0
  24. webscout/Litlogger/core/logger.py +123 -0
  25. webscout/Litlogger/handlers/__init__.py +12 -0
  26. webscout/Litlogger/handlers/console.py +50 -0
  27. webscout/Litlogger/handlers/file.py +143 -0
  28. webscout/Litlogger/handlers/network.py +174 -0
  29. webscout/Litlogger/styles/__init__.py +7 -0
  30. webscout/Litlogger/styles/colors.py +231 -0
  31. webscout/Litlogger/styles/formats.py +377 -0
  32. webscout/Litlogger/styles/text.py +87 -0
  33. webscout/Litlogger/utils/__init__.py +6 -0
  34. webscout/Litlogger/utils/detectors.py +154 -0
  35. webscout/Litlogger/utils/formatters.py +200 -0
  36. webscout/Provider/AISEARCH/DeepFind.py +250 -250
  37. webscout/Provider/Blackboxai.py +3 -3
  38. webscout/Provider/ChatGPTGratis.py +226 -0
  39. webscout/Provider/Cloudflare.py +3 -4
  40. webscout/Provider/DeepSeek.py +218 -0
  41. webscout/Provider/Deepinfra.py +3 -3
  42. webscout/Provider/Free2GPT.py +131 -124
  43. webscout/Provider/Gemini.py +100 -115
  44. webscout/Provider/Glider.py +3 -3
  45. webscout/Provider/Groq.py +5 -1
  46. webscout/Provider/Jadve.py +3 -3
  47. webscout/Provider/Marcus.py +191 -192
  48. webscout/Provider/Netwrck.py +3 -3
  49. webscout/Provider/PI.py +2 -2
  50. webscout/Provider/PizzaGPT.py +2 -3
  51. webscout/Provider/QwenLM.py +311 -0
  52. webscout/Provider/TTI/AiForce/__init__.py +22 -22
  53. webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
  54. webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
  55. webscout/Provider/TTI/Nexra/__init__.py +22 -22
  56. webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
  57. webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
  58. webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
  59. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
  60. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
  61. webscout/Provider/TTI/artbit/__init__.py +22 -22
  62. webscout/Provider/TTI/artbit/async_artbit.py +184 -184
  63. webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
  64. webscout/Provider/TTI/blackbox/__init__.py +4 -4
  65. webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
  66. webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
  67. webscout/Provider/TTI/deepinfra/__init__.py +4 -4
  68. webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
  69. webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
  70. webscout/Provider/TTI/huggingface/__init__.py +22 -22
  71. webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
  72. webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
  73. webscout/Provider/TTI/imgninza/__init__.py +4 -4
  74. webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
  75. webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
  76. webscout/Provider/TTI/talkai/__init__.py +4 -4
  77. webscout/Provider/TTI/talkai/async_talkai.py +229 -229
  78. webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
  79. webscout/Provider/TTS/deepgram.py +182 -182
  80. webscout/Provider/TTS/elevenlabs.py +136 -136
  81. webscout/Provider/TTS/gesserit.py +150 -150
  82. webscout/Provider/TTS/murfai.py +138 -138
  83. webscout/Provider/TTS/parler.py +133 -134
  84. webscout/Provider/TTS/streamElements.py +360 -360
  85. webscout/Provider/TTS/utils.py +280 -280
  86. webscout/Provider/TTS/voicepod.py +116 -116
  87. webscout/Provider/TextPollinationsAI.py +2 -3
  88. webscout/Provider/WiseCat.py +193 -0
  89. webscout/Provider/__init__.py +144 -134
  90. webscout/Provider/cerebras.py +242 -227
  91. webscout/Provider/chatglm.py +204 -204
  92. webscout/Provider/dgaf.py +2 -3
  93. webscout/Provider/gaurish.py +2 -3
  94. webscout/Provider/geminiapi.py +208 -208
  95. webscout/Provider/granite.py +223 -0
  96. webscout/Provider/hermes.py +218 -218
  97. webscout/Provider/llama3mitril.py +179 -179
  98. webscout/Provider/llamatutor.py +3 -3
  99. webscout/Provider/llmchat.py +2 -3
  100. webscout/Provider/meta.py +794 -794
  101. webscout/Provider/multichat.py +331 -331
  102. webscout/Provider/typegpt.py +359 -359
  103. webscout/Provider/yep.py +2 -2
  104. webscout/__main__.py +5 -5
  105. webscout/cli.py +319 -319
  106. webscout/conversation.py +241 -242
  107. webscout/exceptions.py +328 -328
  108. webscout/litagent/__init__.py +28 -28
  109. webscout/litagent/agent.py +2 -3
  110. webscout/litprinter/__init__.py +0 -58
  111. webscout/scout/__init__.py +8 -8
  112. webscout/scout/core.py +884 -884
  113. webscout/scout/element.py +459 -459
  114. webscout/scout/parsers/__init__.py +69 -69
  115. webscout/scout/parsers/html5lib_parser.py +172 -172
  116. webscout/scout/parsers/html_parser.py +236 -236
  117. webscout/scout/parsers/lxml_parser.py +178 -178
  118. webscout/scout/utils.py +38 -38
  119. webscout/swiftcli/__init__.py +811 -811
  120. webscout/update_checker.py +2 -12
  121. webscout/version.py +1 -1
  122. webscout/webscout_search.py +5 -4
  123. webscout/zeroart/__init__.py +54 -54
  124. webscout/zeroart/base.py +60 -60
  125. webscout/zeroart/effects.py +99 -99
  126. webscout/zeroart/fonts.py +816 -816
  127. {webscout-7.1.dist-info → webscout-7.2.dist-info}/METADATA +4 -3
  128. webscout-7.2.dist-info/RECORD +217 -0
  129. webstoken/__init__.py +30 -30
  130. webstoken/classifier.py +189 -189
  131. webstoken/keywords.py +216 -216
  132. webstoken/language.py +128 -128
  133. webstoken/ner.py +164 -164
  134. webstoken/normalizer.py +35 -35
  135. webstoken/processor.py +77 -77
  136. webstoken/sentiment.py +206 -206
  137. webstoken/stemmer.py +73 -73
  138. webstoken/tagger.py +60 -60
  139. webstoken/tokenizer.py +158 -158
  140. webscout-7.1.dist-info/RECORD +0 -198
  141. {webscout-7.1.dist-info → webscout-7.2.dist-info}/LICENSE.md +0 -0
  142. {webscout-7.1.dist-info → webscout-7.2.dist-info}/WHEEL +0 -0
  143. {webscout-7.1.dist-info → webscout-7.2.dist-info}/entry_points.txt +0 -0
  144. {webscout-7.1.dist-info → webscout-7.2.dist-info}/top_level.txt +0 -0
@@ -1,208 +1,208 @@
1
- """
2
- Install the Google AI Python SDK
3
-
4
- $ pip install google-generativeai
5
- """
6
-
7
- import os
8
- import google.generativeai as genai
9
-
10
- from google.generativeai.types import HarmCategory, HarmBlockThreshold
11
- import requests
12
- from webscout.AIutel import Optimizers
13
- from webscout.AIutel import Conversation
14
- from webscout.AIutel import AwesomePrompts
15
- from webscout.AIbase import Provider
16
-
17
-
18
- class GEMINIAPI(Provider):
19
- """
20
- A class to interact with the Gemini API using the google-generativeai library.
21
- """
22
-
23
- def __init__(
24
- self,
25
- api_key,
26
- model_name: str = "gemini-1.5-flash-latest",
27
- temperature: float = 1,
28
- top_p: float = 0.95,
29
- top_k: int = 64,
30
- max_output_tokens: int = 8192,
31
- is_conversation: bool = True,
32
- timeout: int = 30,
33
- intro: str = None,
34
- filepath: str = None,
35
- update_file: bool = True,
36
- proxies: dict = {},
37
- history_offset: int = 10250,
38
- act: str = None,
39
- system_instruction: str = "You are a helpful and informative AI assistant.",
40
- safety_settings: dict = None,
41
- ):
42
- """
43
- Initializes the Gemini API with the given parameters.
44
-
45
- Args:
46
- api_key (str, optional): Your Gemini API key. If None, it will use the environment variable "GEMINI_API_KEY".
47
- Defaults to None.
48
- model_name (str, optional): The name of the Gemini model to use.
49
- Defaults to "gemini-1.5-flash-exp-0827".
50
- temperature (float, optional): The temperature parameter for the model. Defaults to 1.
51
- top_p (float, optional): The top_p parameter for the model. Defaults to 0.95.
52
- top_k (int, optional): The top_k parameter for the model. Defaults to 64.
53
- max_output_tokens (int, optional): The maximum number of output tokens. Defaults to 8192.
54
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
55
- timeout (int, optional): Http request timeout. Defaults to 30.
56
- intro (str, optional): Conversation introductory prompt. Defaults to None.
57
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
58
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
59
- proxies (dict, optional): Http request proxies. Defaults to {}.
60
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
61
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
62
- system_instruction (str, optional): System instruction to guide the AI's behavior.
63
- Defaults to "You are a helpful and informative AI assistant.".
64
- """
65
- self.api_key = api_key
66
- self.model_name = model_name
67
- self.temperature = temperature
68
- self.top_p = top_p
69
- self.top_k = top_k
70
- self.max_output_tokens = max_output_tokens
71
- self.system_instruction = system_instruction
72
- self.safety_settings = safety_settings if safety_settings else {}
73
- self.session = requests.Session() # Not directly used for Gemini API calls, but can be used for other requests
74
- self.is_conversation = is_conversation
75
- self.max_tokens_to_sample = max_output_tokens
76
- self.timeout = timeout
77
- self.last_response = {}
78
-
79
- self.__available_optimizers = (
80
- method
81
- for method in dir(Optimizers)
82
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
83
- )
84
- Conversation.intro = (
85
- AwesomePrompts().get_act(
86
- act, raise_not_found=True, default=None, case_insensitive=True
87
- )
88
- if act
89
- else intro or Conversation.intro
90
- )
91
- self.conversation = Conversation(
92
- is_conversation, self.max_tokens_to_sample, filepath, update_file
93
- )
94
- self.conversation.history_offset = history_offset
95
- self.session.proxies = proxies
96
-
97
- # Configure the Gemini API
98
- genai.configure(api_key=self.api_key)
99
-
100
- # Create the model with generation config
101
- self.generation_config = {
102
- "temperature": self.temperature,
103
- "top_p": self.top_p,
104
- "top_k": self.top_k,
105
- "max_output_tokens": self.max_output_tokens,
106
- "response_mime_type": "text/plain",
107
- }
108
-
109
- self.model = genai.GenerativeModel(
110
- model_name=self.model_name,
111
- generation_config=self.generation_config,
112
- safety_settings=self.safety_settings,
113
- system_instruction=self.system_instruction,
114
- )
115
-
116
- # Start the chat session
117
- self.chat_session = self.model.start_chat()
118
-
119
- def ask(
120
- self,
121
- prompt: str,
122
- stream: bool = False,
123
- raw: bool = False,
124
- optimizer: str = None,
125
- conversationally: bool = False,
126
- ) -> dict:
127
- """Chat with AI
128
-
129
- Args:
130
- prompt (str): Prompt to be send.
131
- stream (bool, optional): Not used for Gemini API. Defaults to False.
132
- raw (bool, optional): Not used for Gemini API. Defaults to False.
133
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
134
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
135
- Returns:
136
- dict : {}
137
- ```json
138
- {
139
- "text" : "How may I assist you today?"
140
- }
141
- ```
142
- """
143
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
144
- if optimizer:
145
- if optimizer in self.__available_optimizers:
146
- conversation_prompt = getattr(Optimizers, optimizer)(
147
- conversation_prompt if conversationally else prompt
148
- )
149
- else:
150
- raise Exception(
151
- f"Optimizer is not one of {self.__available_optimizers}"
152
- )
153
-
154
- # Send the message to the chat session and get the response
155
- response = self.chat_session.send_message(conversation_prompt)
156
- self.last_response.update(dict(text=response.text))
157
- self.conversation.update_chat_history(
158
- prompt, self.get_message(self.last_response)
159
- )
160
- return self.last_response
161
-
162
- def chat(
163
- self,
164
- prompt: str,
165
- stream: bool = False, # Streaming not supported by the current google-generativeai library
166
- optimizer: str = None,
167
- conversationally: bool = False,
168
- ) -> str:
169
- """Generate response `str`
170
-
171
- Args:
172
- prompt (str): Prompt to be send.
173
- stream (bool, optional): Not used for Gemini API. Defaults to False.
174
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
175
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
176
- Returns:
177
- str: Response generated
178
- """
179
- return self.get_message(
180
- self.ask(
181
- prompt,
182
- optimizer=optimizer,
183
- conversationally=conversationally,
184
- )
185
- )
186
-
187
- def get_message(self, response: dict) -> str:
188
- """Retrieves message only from response
189
-
190
- Args:
191
- response (dict): Response generated by `self.ask`
192
-
193
- Returns:
194
- str: Message extracted
195
- """
196
- assert isinstance(response, dict), "Response should be of dict data-type only"
197
- return response["text"]
198
- if __name__ == "__main__":
199
- safety_settings = {
200
- HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
201
- HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
202
- HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
203
- HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
204
- }
205
- ai = GEMINIAPI(api_key="" , safety_settings=safety_settings)
206
- res = ai.chat(input(">>> "))
207
- for r in res:
208
- print(r, end="", flush=True)
1
+ """
2
+ Install the Google AI Python SDK
3
+
4
+ $ pip install google-generativeai
5
+ """
6
+
7
+ import os
8
+ import google.generativeai as genai
9
+
10
+ from google.generativeai.types import HarmCategory, HarmBlockThreshold
11
+ import requests
12
+ from webscout.AIutel import Optimizers
13
+ from webscout.AIutel import Conversation
14
+ from webscout.AIutel import AwesomePrompts
15
+ from webscout.AIbase import Provider
16
+
17
+
18
+ class GEMINIAPI(Provider):
19
+ """
20
+ A class to interact with the Gemini API using the google-generativeai library.
21
+ """
22
+
23
+ def __init__(
24
+ self,
25
+ api_key,
26
+ model_name: str = "gemini-1.5-flash-latest",
27
+ temperature: float = 1,
28
+ top_p: float = 0.95,
29
+ top_k: int = 64,
30
+ max_output_tokens: int = 8192,
31
+ is_conversation: bool = True,
32
+ timeout: int = 30,
33
+ intro: str = None,
34
+ filepath: str = None,
35
+ update_file: bool = True,
36
+ proxies: dict = {},
37
+ history_offset: int = 10250,
38
+ act: str = None,
39
+ system_instruction: str = "You are a helpful and informative AI assistant.",
40
+ safety_settings: dict = None,
41
+ ):
42
+ """
43
+ Initializes the Gemini API with the given parameters.
44
+
45
+ Args:
46
+ api_key (str, optional): Your Gemini API key. If None, it will use the environment variable "GEMINI_API_KEY".
47
+ Defaults to None.
48
+ model_name (str, optional): The name of the Gemini model to use.
49
+ Defaults to "gemini-1.5-flash-exp-0827".
50
+ temperature (float, optional): The temperature parameter for the model. Defaults to 1.
51
+ top_p (float, optional): The top_p parameter for the model. Defaults to 0.95.
52
+ top_k (int, optional): The top_k parameter for the model. Defaults to 64.
53
+ max_output_tokens (int, optional): The maximum number of output tokens. Defaults to 8192.
54
+ is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
55
+ timeout (int, optional): Http request timeout. Defaults to 30.
56
+ intro (str, optional): Conversation introductory prompt. Defaults to None.
57
+ filepath (str, optional): Path to file containing conversation history. Defaults to None.
58
+ update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
59
+ proxies (dict, optional): Http request proxies. Defaults to {}.
60
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
61
+ act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
62
+ system_instruction (str, optional): System instruction to guide the AI's behavior.
63
+ Defaults to "You are a helpful and informative AI assistant.".
64
+ """
65
+ self.api_key = api_key
66
+ self.model_name = model_name
67
+ self.temperature = temperature
68
+ self.top_p = top_p
69
+ self.top_k = top_k
70
+ self.max_output_tokens = max_output_tokens
71
+ self.system_instruction = system_instruction
72
+ self.safety_settings = safety_settings if safety_settings else {}
73
+ self.session = requests.Session() # Not directly used for Gemini API calls, but can be used for other requests
74
+ self.is_conversation = is_conversation
75
+ self.max_tokens_to_sample = max_output_tokens
76
+ self.timeout = timeout
77
+ self.last_response = {}
78
+
79
+ self.__available_optimizers = (
80
+ method
81
+ for method in dir(Optimizers)
82
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
83
+ )
84
+ Conversation.intro = (
85
+ AwesomePrompts().get_act(
86
+ act, raise_not_found=True, default=None, case_insensitive=True
87
+ )
88
+ if act
89
+ else intro or Conversation.intro
90
+ )
91
+ self.conversation = Conversation(
92
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
93
+ )
94
+ self.conversation.history_offset = history_offset
95
+ self.session.proxies = proxies
96
+
97
+ # Configure the Gemini API
98
+ genai.configure(api_key=self.api_key)
99
+
100
+ # Create the model with generation config
101
+ self.generation_config = {
102
+ "temperature": self.temperature,
103
+ "top_p": self.top_p,
104
+ "top_k": self.top_k,
105
+ "max_output_tokens": self.max_output_tokens,
106
+ "response_mime_type": "text/plain",
107
+ }
108
+
109
+ self.model = genai.GenerativeModel(
110
+ model_name=self.model_name,
111
+ generation_config=self.generation_config,
112
+ safety_settings=self.safety_settings,
113
+ system_instruction=self.system_instruction,
114
+ )
115
+
116
+ # Start the chat session
117
+ self.chat_session = self.model.start_chat()
118
+
119
+ def ask(
120
+ self,
121
+ prompt: str,
122
+ stream: bool = False,
123
+ raw: bool = False,
124
+ optimizer: str = None,
125
+ conversationally: bool = False,
126
+ ) -> dict:
127
+ """Chat with AI
128
+
129
+ Args:
130
+ prompt (str): Prompt to be send.
131
+ stream (bool, optional): Not used for Gemini API. Defaults to False.
132
+ raw (bool, optional): Not used for Gemini API. Defaults to False.
133
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
134
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
135
+ Returns:
136
+ dict : {}
137
+ ```json
138
+ {
139
+ "text" : "How may I assist you today?"
140
+ }
141
+ ```
142
+ """
143
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
144
+ if optimizer:
145
+ if optimizer in self.__available_optimizers:
146
+ conversation_prompt = getattr(Optimizers, optimizer)(
147
+ conversation_prompt if conversationally else prompt
148
+ )
149
+ else:
150
+ raise Exception(
151
+ f"Optimizer is not one of {self.__available_optimizers}"
152
+ )
153
+
154
+ # Send the message to the chat session and get the response
155
+ response = self.chat_session.send_message(conversation_prompt)
156
+ self.last_response.update(dict(text=response.text))
157
+ self.conversation.update_chat_history(
158
+ prompt, self.get_message(self.last_response)
159
+ )
160
+ return self.last_response
161
+
162
+ def chat(
163
+ self,
164
+ prompt: str,
165
+ stream: bool = False, # Streaming not supported by the current google-generativeai library
166
+ optimizer: str = None,
167
+ conversationally: bool = False,
168
+ ) -> str:
169
+ """Generate response `str`
170
+
171
+ Args:
172
+ prompt (str): Prompt to be send.
173
+ stream (bool, optional): Not used for Gemini API. Defaults to False.
174
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
175
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
176
+ Returns:
177
+ str: Response generated
178
+ """
179
+ return self.get_message(
180
+ self.ask(
181
+ prompt,
182
+ optimizer=optimizer,
183
+ conversationally=conversationally,
184
+ )
185
+ )
186
+
187
+ def get_message(self, response: dict) -> str:
188
+ """Retrieves message only from response
189
+
190
+ Args:
191
+ response (dict): Response generated by `self.ask`
192
+
193
+ Returns:
194
+ str: Message extracted
195
+ """
196
+ assert isinstance(response, dict), "Response should be of dict data-type only"
197
+ return response["text"]
198
+ if __name__ == "__main__":
199
+ safety_settings = {
200
+ HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
201
+ HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
202
+ HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
203
+ HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
204
+ }
205
+ ai = GEMINIAPI(api_key="" , safety_settings=safety_settings)
206
+ res = ai.chat(input(">>> "))
207
+ for r in res:
208
+ print(r, end="", flush=True)
@@ -0,0 +1,223 @@
1
+ import requests
2
+ import json
3
+ from typing import Any, Dict, Generator
4
+
5
+ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
6
+ from webscout.AIbase import Provider
7
+ from webscout import exceptions
8
+ from webscout.Litlogger import Logger, LogFormat
9
+ from webscout import LitAgent as Lit
10
+ class IBMGranite(Provider):
11
+ """
12
+ A class to interact with the IBM Granite API (accessed via d18n68ssusgr7r.cloudfront.net)
13
+ with comprehensive logging and using Lit agent for the user agent.
14
+ """
15
+
16
+ AVAILABLE_MODELS = ["granite-3-8b-instruct"]
17
+
18
+ def __init__(
19
+ self,
20
+ api_key: str,
21
+ is_conversation: bool = True,
22
+ max_tokens: int = 600,
23
+ timeout: int = 30,
24
+ intro: str = None,
25
+ filepath: str = None,
26
+ update_file: bool = True,
27
+ proxies: dict = {},
28
+ history_offset: int = 10250,
29
+ act: str = None,
30
+ model: str = "granite-3-8b-instruct",
31
+ system_prompt: str = "You are a helpful AI assistant.",
32
+ logging: bool = False
33
+ ):
34
+ """Initializes the IBM Granite API client with logging and Lit agent for the user agent."""
35
+ if model not in self.AVAILABLE_MODELS:
36
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
37
+
38
+ # Setup logging if enabled
39
+ self.logger = Logger(
40
+ name="IBMGranite",
41
+ format=LogFormat.MODERN_EMOJI,
42
+
43
+ ) if logging else None
44
+
45
+ if self.logger:
46
+ self.logger.info(f"Initializing IBMGranite with model: {model}")
47
+
48
+ self.session = requests.Session()
49
+ self.is_conversation = is_conversation
50
+ self.max_tokens_to_sample = max_tokens
51
+ self.api_endpoint = "https://d18n68ssusgr7r.cloudfront.net/v1/chat/completions"
52
+ self.stream_chunk_size = 64
53
+ self.timeout = timeout
54
+ self.last_response = {}
55
+ self.model = model
56
+ self.system_prompt = system_prompt
57
+
58
+ # Use Lit agent to generate a random User-Agent
59
+ self.headers = {
60
+ "authority": "d18n68ssusgr7r.cloudfront.net",
61
+ "accept": "application/json,application/jsonl",
62
+ "content-type": "application/json",
63
+ "origin": "https://www.ibm.com",
64
+ "referer": "https://www.ibm.com/",
65
+ "user-agent": Lit().random(),
66
+ }
67
+ self.headers["Authorization"] = f"Bearer {api_key}"
68
+ self.session.headers.update(self.headers)
69
+ self.session.proxies = proxies
70
+
71
+ self.__available_optimizers = (
72
+ method for method in dir(Optimizers)
73
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
74
+ )
75
+
76
+ Conversation.intro = (
77
+ AwesomePrompts().get_act(
78
+ act, raise_not_found=True, default=None, case_insensitive=True
79
+ )
80
+ if act
81
+ else intro or Conversation.intro
82
+ )
83
+ self.conversation = Conversation(is_conversation, self.max_tokens_to_sample, filepath, update_file)
84
+ self.conversation.history_offset = history_offset
85
+
86
+ def ask(
87
+ self,
88
+ prompt: str,
89
+ stream: bool = False,
90
+ raw: bool = False,
91
+ optimizer: str = None,
92
+ conversationally: bool = False,
93
+ ) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
94
+ """Chat with AI
95
+ Args:
96
+ prompt (str): Prompt to be sent.
97
+ stream (bool, optional): Flag for streaming response. Defaults to False.
98
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
99
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
100
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
101
+ Returns:
102
+ Union[Dict, Generator[Dict, None, None]]: Response generated
103
+ """
104
+ if self.logger:
105
+ self.logger.debug(f"Ask method initiated - Prompt (first 50 chars): {prompt[:50]}")
106
+
107
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
108
+ if optimizer:
109
+ if optimizer in self.__available_optimizers:
110
+ conversation_prompt = getattr(Optimizers, optimizer)(
111
+ conversation_prompt if conversationally else prompt
112
+ )
113
+ if self.logger:
114
+ self.logger.debug(f"Applied optimizer: {optimizer}")
115
+ else:
116
+ if self.logger:
117
+ self.logger.error(f"Invalid optimizer requested: {optimizer}")
118
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
119
+
120
+ payload = {
121
+ "model": self.model,
122
+ "messages": [
123
+ {"role": "system", "content": self.system_prompt},
124
+ {"role": "user", "content": conversation_prompt},
125
+ ],
126
+ "stream": stream
127
+ }
128
+
129
+ def for_stream():
130
+ try:
131
+ if self.logger:
132
+ self.logger.debug(f"Sending POST request to {self.api_endpoint} with payload: {payload}")
133
+ response = self.session.post(
134
+ self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout
135
+ )
136
+ if not response.ok:
137
+ msg = f"Request failed with status code {response.status_code}: {response.text}"
138
+ if self.logger:
139
+ self.logger.error(msg)
140
+ raise exceptions.FailedToGenerateResponseError(msg)
141
+
142
+ streaming_text = ""
143
+ for line in response.iter_lines(decode_unicode=True):
144
+ if line:
145
+ try:
146
+ data = json.loads(line)
147
+ if len(data) == 2 and data[0] == 3 and isinstance(data[1], str):
148
+ content = data[1]
149
+ streaming_text += content
150
+ yield content if raw else dict(text=content)
151
+ else:
152
+ if self.logger:
153
+ self.logger.debug(f"Skipping unrecognized line: {line}")
154
+ except json.JSONDecodeError as e:
155
+ if self.logger:
156
+ self.logger.error(f"JSON decode error: {e}")
157
+ continue
158
+ self.last_response.update(dict(text=streaming_text))
159
+ self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
160
+ if self.logger:
161
+ self.logger.info("Stream processing completed.")
162
+
163
+ except requests.exceptions.RequestException as e:
164
+ if self.logger:
165
+ self.logger.error(f"Request exception: {e}")
166
+ raise exceptions.ProviderConnectionError(f"Request failed: {e}")
167
+ except json.JSONDecodeError as e:
168
+ if self.logger:
169
+ self.logger.error(f"Invalid JSON received: {e}")
170
+ raise exceptions.InvalidResponseError(f"Failed to decode JSON response: {e}")
171
+ except Exception as e:
172
+ if self.logger:
173
+ self.logger.error(f"Unexpected error: {e}")
174
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred: {e}")
175
+
176
+ def for_non_stream():
177
+ # Run the generator to completion
178
+ for _ in for_stream():
179
+ pass
180
+ return self.last_response
181
+
182
+ return for_stream() if stream else for_non_stream()
183
+
184
+ def chat(
185
+ self,
186
+ prompt: str,
187
+ stream: bool = False,
188
+ optimizer: str = None,
189
+ conversationally: bool = False,
190
+ ) -> str | Generator[str, None, None]:
191
+ """Generate response as a string using chat method"""
192
+ if self.logger:
193
+ self.logger.debug(f"Chat method initiated - Prompt (first 50 chars): {prompt[:50]}")
194
+
195
+ def for_stream():
196
+ for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
197
+ yield self.get_message(response)
198
+
199
+ def for_non_stream():
200
+ result = self.get_message(
201
+ self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
202
+ )
203
+ if self.logger:
204
+ self.logger.info("Chat method completed.")
205
+ return result
206
+
207
+ return for_stream() if stream else for_non_stream()
208
+
209
+ def get_message(self, response: dict) -> str:
210
+ """Retrieves message only from response"""
211
+ assert isinstance(response, dict), "Response should be of dict data-type only"
212
+ return response["text"]
213
+
214
+ if __name__ == "__main__":
215
+ from rich import print
216
+ # Example usage: Initialize with logging enabled.
217
+ ai = IBMGranite(
218
+ api_key="", # press f12 to see the API key
219
+ logging=True
220
+ )
221
+ response = ai.chat("write a poem about AI", stream=True)
222
+ for chunk in response:
223
+ print(chunk, end="", flush=True)