webscout 8.3.6__py3-none-any.whl → 8.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (130) hide show
  1. webscout/AIutel.py +2 -0
  2. webscout/Provider/AISEARCH/__init__.py +18 -11
  3. webscout/Provider/AISEARCH/scira_search.py +3 -1
  4. webscout/Provider/Aitopia.py +2 -3
  5. webscout/Provider/Andi.py +3 -3
  6. webscout/Provider/ChatGPTClone.py +1 -1
  7. webscout/Provider/ChatSandbox.py +1 -0
  8. webscout/Provider/Cloudflare.py +1 -1
  9. webscout/Provider/Cohere.py +1 -0
  10. webscout/Provider/Deepinfra.py +7 -10
  11. webscout/Provider/ExaAI.py +1 -1
  12. webscout/Provider/ExaChat.py +1 -80
  13. webscout/Provider/Flowith.py +1 -1
  14. webscout/Provider/Gemini.py +7 -5
  15. webscout/Provider/GeminiProxy.py +1 -0
  16. webscout/Provider/GithubChat.py +3 -1
  17. webscout/Provider/Groq.py +1 -1
  18. webscout/Provider/HeckAI.py +8 -4
  19. webscout/Provider/Jadve.py +23 -38
  20. webscout/Provider/K2Think.py +308 -0
  21. webscout/Provider/Koboldai.py +8 -186
  22. webscout/Provider/LambdaChat.py +2 -4
  23. webscout/Provider/Nemotron.py +3 -4
  24. webscout/Provider/Netwrck.py +3 -2
  25. webscout/Provider/OLLAMA.py +1 -0
  26. webscout/Provider/OPENAI/Cloudflare.py +6 -7
  27. webscout/Provider/OPENAI/FalconH1.py +2 -7
  28. webscout/Provider/OPENAI/FreeGemini.py +6 -8
  29. webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
  30. webscout/Provider/OPENAI/NEMOTRON.py +3 -6
  31. webscout/Provider/OPENAI/PI.py +5 -4
  32. webscout/Provider/OPENAI/Qwen3.py +2 -3
  33. webscout/Provider/OPENAI/TogetherAI.py +2 -2
  34. webscout/Provider/OPENAI/TwoAI.py +3 -4
  35. webscout/Provider/OPENAI/__init__.py +17 -58
  36. webscout/Provider/OPENAI/ai4chat.py +313 -303
  37. webscout/Provider/OPENAI/base.py +9 -29
  38. webscout/Provider/OPENAI/chatgpt.py +7 -2
  39. webscout/Provider/OPENAI/chatgptclone.py +4 -7
  40. webscout/Provider/OPENAI/chatsandbox.py +84 -59
  41. webscout/Provider/OPENAI/deepinfra.py +6 -6
  42. webscout/Provider/OPENAI/heckai.py +4 -1
  43. webscout/Provider/OPENAI/netwrck.py +1 -0
  44. webscout/Provider/OPENAI/scirachat.py +6 -0
  45. webscout/Provider/OPENAI/textpollinations.py +3 -11
  46. webscout/Provider/OPENAI/toolbaz.py +14 -11
  47. webscout/Provider/OpenGPT.py +1 -1
  48. webscout/Provider/Openai.py +150 -402
  49. webscout/Provider/PI.py +1 -0
  50. webscout/Provider/Perplexitylabs.py +1 -2
  51. webscout/Provider/QwenLM.py +107 -89
  52. webscout/Provider/STT/__init__.py +17 -2
  53. webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
  54. webscout/Provider/StandardInput.py +1 -1
  55. webscout/Provider/TTI/__init__.py +18 -12
  56. webscout/Provider/TTS/__init__.py +18 -10
  57. webscout/Provider/TeachAnything.py +1 -0
  58. webscout/Provider/TextPollinationsAI.py +5 -12
  59. webscout/Provider/TogetherAI.py +86 -87
  60. webscout/Provider/TwoAI.py +53 -309
  61. webscout/Provider/TypliAI.py +2 -1
  62. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
  63. webscout/Provider/Venice.py +2 -1
  64. webscout/Provider/VercelAI.py +1 -0
  65. webscout/Provider/WiseCat.py +2 -1
  66. webscout/Provider/WrDoChat.py +2 -1
  67. webscout/Provider/__init__.py +18 -86
  68. webscout/Provider/ai4chat.py +1 -1
  69. webscout/Provider/akashgpt.py +7 -10
  70. webscout/Provider/cerebras.py +115 -9
  71. webscout/Provider/chatglm.py +170 -83
  72. webscout/Provider/cleeai.py +1 -2
  73. webscout/Provider/deepseek_assistant.py +1 -1
  74. webscout/Provider/elmo.py +1 -1
  75. webscout/Provider/geminiapi.py +1 -1
  76. webscout/Provider/granite.py +1 -1
  77. webscout/Provider/hermes.py +1 -3
  78. webscout/Provider/julius.py +1 -0
  79. webscout/Provider/learnfastai.py +1 -1
  80. webscout/Provider/llama3mitril.py +1 -1
  81. webscout/Provider/llmchat.py +1 -1
  82. webscout/Provider/llmchatco.py +1 -1
  83. webscout/Provider/meta.py +3 -3
  84. webscout/Provider/oivscode.py +2 -2
  85. webscout/Provider/scira_chat.py +51 -124
  86. webscout/Provider/searchchat.py +1 -0
  87. webscout/Provider/sonus.py +1 -1
  88. webscout/Provider/toolbaz.py +15 -12
  89. webscout/Provider/turboseek.py +31 -22
  90. webscout/Provider/typefully.py +2 -1
  91. webscout/Provider/x0gpt.py +1 -0
  92. webscout/Provider/yep.py +2 -1
  93. webscout/tempid.py +6 -0
  94. webscout/version.py +1 -1
  95. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/METADATA +2 -1
  96. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/RECORD +103 -129
  97. webscout/Provider/AllenAI.py +0 -440
  98. webscout/Provider/Blackboxai.py +0 -793
  99. webscout/Provider/FreeGemini.py +0 -250
  100. webscout/Provider/GptOss.py +0 -207
  101. webscout/Provider/Hunyuan.py +0 -283
  102. webscout/Provider/Kimi.py +0 -445
  103. webscout/Provider/MCPCore.py +0 -322
  104. webscout/Provider/MiniMax.py +0 -207
  105. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
  106. webscout/Provider/OPENAI/MiniMax.py +0 -298
  107. webscout/Provider/OPENAI/autoproxy.py +0 -1067
  108. webscout/Provider/OPENAI/copilot.py +0 -321
  109. webscout/Provider/OPENAI/gptoss.py +0 -288
  110. webscout/Provider/OPENAI/kimi.py +0 -469
  111. webscout/Provider/OPENAI/mcpcore.py +0 -431
  112. webscout/Provider/OPENAI/multichat.py +0 -378
  113. webscout/Provider/Reka.py +0 -214
  114. webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
  115. webscout/Provider/asksteve.py +0 -220
  116. webscout/Provider/copilot.py +0 -441
  117. webscout/Provider/freeaichat.py +0 -294
  118. webscout/Provider/koala.py +0 -182
  119. webscout/Provider/lmarena.py +0 -198
  120. webscout/Provider/monochat.py +0 -275
  121. webscout/Provider/multichat.py +0 -375
  122. webscout/Provider/scnet.py +0 -244
  123. webscout/Provider/talkai.py +0 -194
  124. /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
  125. /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
  126. /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
  127. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
  128. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
  129. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
  130. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
@@ -1,303 +1,313 @@
1
- import time
2
- import uuid
3
- import urllib.parse
4
- from curl_cffi.requests import Session, RequestsError
5
- from typing import List, Dict, Optional, Union, Generator, Any
6
-
7
- # Import base classes and utility structures
8
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
- from .utils import (
10
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
- ChatCompletionMessage, CompletionUsage, count_tokens
12
- )
13
-
14
- # --- AI4Chat Client ---
15
-
16
- class Completions(BaseCompletions):
17
- def __init__(self, client: 'AI4Chat'):
18
- self._client = client
19
-
20
- def create(
21
- self,
22
- *,
23
- model: str,
24
- messages: List[Dict[str, str]],
25
- max_tokens: Optional[int] = None,
26
- stream: bool = False,
27
- temperature: Optional[float] = None,
28
- top_p: Optional[float] = None,
29
- timeout: Optional[int] = None,
30
- proxies: Optional[dict] = None,
31
- **kwargs: Any
32
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
33
- """
34
- Creates a model response for the given chat conversation.
35
- Mimics openai.chat.completions.create
36
- """
37
- # Use the format_prompt utility to format the conversation
38
- from .utils import format_prompt
39
-
40
- # Format the messages into a single string
41
- conversation_prompt = format_prompt(messages, add_special_tokens=True, include_system=True)
42
-
43
- # Set up request parameters
44
- country_param = kwargs.get("country", self._client.country)
45
- user_id_param = kwargs.get("user_id", self._client.user_id)
46
-
47
- # Generate request ID and timestamp
48
- request_id = f"chatcmpl-{uuid.uuid4()}"
49
- created_time = int(time.time())
50
-
51
- # AI4Chat doesn't support streaming, so we'll simulate it if requested
52
- if stream:
53
- return self._create_stream(request_id, created_time, model, conversation_prompt, country_param, user_id_param, timeout=timeout, proxies=proxies)
54
- else:
55
- return self._create_non_stream(request_id, created_time, model, conversation_prompt, country_param, user_id_param, timeout=timeout, proxies=proxies)
56
-
57
- def _create_stream(
58
- self, request_id: str, created_time: int, model: str,
59
- conversation_prompt: str, country: str, user_id: str,
60
- timeout: Optional[int] = None, proxies: Optional[dict] = None
61
- ) -> Generator[ChatCompletionChunk, None, None]:
62
- """Simulate streaming by breaking up the full response into fixed-size character chunks."""
63
- try:
64
- # Get the full response first
65
- full_response = self._get_ai4chat_response(conversation_prompt, country, user_id, timeout=timeout, proxies=proxies)
66
-
67
- # Track token usage
68
- prompt_tokens = count_tokens(conversation_prompt)
69
- completion_tokens = 0
70
-
71
- # Stream fixed-size character chunks (e.g., 48 chars)
72
- buffer = full_response
73
- chunk_size = 48
74
- while buffer:
75
- chunk_text = buffer[:chunk_size]
76
- buffer = buffer[chunk_size:]
77
- completion_tokens += count_tokens(chunk_text)
78
-
79
- if chunk_text.strip():
80
- # Create the delta object
81
- delta = ChoiceDelta(
82
- content=chunk_text,
83
- role="assistant",
84
- tool_calls=None
85
- )
86
-
87
- # Create the choice object
88
- choice = Choice(
89
- index=0,
90
- delta=delta,
91
- finish_reason=None,
92
- logprobs=None
93
- )
94
-
95
- # Create the chunk object
96
- chunk = ChatCompletionChunk(
97
- id=request_id,
98
- choices=[choice],
99
- created=created_time,
100
- model=model,
101
- system_fingerprint=None
102
- )
103
-
104
- yield chunk
105
-
106
- # Final chunk with finish_reason="stop"
107
- delta = ChoiceDelta(
108
- content=None,
109
- role=None,
110
- tool_calls=None
111
- )
112
-
113
- choice = Choice(
114
- index=0,
115
- delta=delta,
116
- finish_reason="stop",
117
- logprobs=None
118
- )
119
-
120
- chunk = ChatCompletionChunk(
121
- id=request_id,
122
- choices=[choice],
123
- created=created_time,
124
- model=model,
125
- system_fingerprint=None
126
- )
127
-
128
- yield chunk
129
-
130
- except RequestsError as e:
131
- print(f"Error during AI4Chat stream request: {e}")
132
- raise IOError(f"AI4Chat request failed: {e}") from e
133
- except Exception as e:
134
- print(f"Unexpected error during AI4Chat stream request: {e}")
135
- raise IOError(f"AI4Chat request failed: {e}") from e
136
-
137
- def _create_non_stream(
138
- self, request_id: str, created_time: int, model: str,
139
- conversation_prompt: str, country: str, user_id: str,
140
- timeout: Optional[int] = None, proxies: Optional[dict] = None
141
- ) -> ChatCompletion:
142
- """Get a complete response from AI4Chat."""
143
- try:
144
- # Get the full response
145
- full_response = self._get_ai4chat_response(conversation_prompt, country, user_id, timeout=timeout, proxies=proxies)
146
-
147
- # Estimate token counts
148
- prompt_tokens = count_tokens(conversation_prompt)
149
- completion_tokens = count_tokens(full_response)
150
- total_tokens = prompt_tokens + completion_tokens
151
-
152
- # Create the message object
153
- message = ChatCompletionMessage(
154
- role="assistant",
155
- content=full_response
156
- )
157
-
158
- # Create the choice object
159
- choice = Choice(
160
- index=0,
161
- message=message,
162
- finish_reason="stop"
163
- )
164
-
165
- # Create the usage object
166
- usage = CompletionUsage(
167
- prompt_tokens=prompt_tokens,
168
- completion_tokens=completion_tokens,
169
- total_tokens=total_tokens
170
- )
171
-
172
- # Create the completion object
173
- completion = ChatCompletion(
174
- id=request_id,
175
- choices=[choice],
176
- created=created_time,
177
- model=model,
178
- usage=usage,
179
- )
180
-
181
- return completion
182
-
183
- except RequestsError as e:
184
- print(f"Error during AI4Chat non-stream request: {e}")
185
- raise IOError(f"AI4Chat request failed: {e}") from e
186
- except Exception as e:
187
- print(f"Unexpected error during AI4Chat non-stream request: {e}")
188
- raise IOError(f"AI4Chat request failed: {e}") from e
189
-
190
- def _get_ai4chat_response(self, prompt: str, country: str, user_id: str,
191
- timeout: Optional[int] = None, proxies: Optional[dict] = None) -> str:
192
- """Make the actual API request to AI4Chat."""
193
- timeout_val = timeout if timeout is not None else self._client.timeout
194
- original_proxies = self._client.session.proxies
195
- if proxies is not None:
196
- self._client.session.proxies = proxies
197
-
198
- try:
199
- # URL encode parameters
200
- encoded_text = urllib.parse.quote(prompt)
201
- encoded_country = urllib.parse.quote(country)
202
- encoded_user_id = urllib.parse.quote(user_id)
203
-
204
- # Construct the API URL
205
- url = f"{self._client.api_endpoint}?text={encoded_text}&country={encoded_country}&user_id={encoded_user_id}"
206
-
207
- # Make the request
208
- response = self._client.session.get(url, headers=self._client.headers, timeout=timeout_val)
209
- response.raise_for_status()
210
- except RequestsError as e:
211
- raise IOError(f"Failed to generate response: {e}")
212
- finally:
213
- if proxies is not None:
214
- self._client.session.proxies = original_proxies
215
-
216
- # Process the response text
217
- response_text = response.text
218
-
219
- # Remove surrounding quotes if present
220
- if response_text.startswith('"'):
221
- response_text = response_text[1:]
222
- if response_text.endswith('"'):
223
- response_text = response_text[:-1]
224
-
225
- # Replace escaped newlines
226
- response_text = response_text.replace('\\n', '\n').replace('\\n\\n', '\n\n')
227
-
228
- return response_text
229
-
230
- class Chat(BaseChat):
231
- def __init__(self, client: 'AI4Chat'):
232
- self.completions = Completions(client)
233
-
234
- class AI4Chat(OpenAICompatibleProvider):
235
- """
236
- OpenAI-compatible client for AI4Chat API.
237
-
238
- Usage:
239
- client = AI4Chat()
240
- response = client.chat.completions.create(
241
- model="default",
242
- messages=[{"role": "user", "content": "Hello!"}]
243
- )
244
- print(response.choices[0].message.content)
245
- """
246
-
247
- AVAILABLE_MODELS = ["default"]
248
-
249
- def __init__(
250
- self,
251
- system_prompt: str = "You are a helpful and informative AI assistant.",
252
- country: str = "Asia",
253
- user_id: str = "usersmjb2oaz7y"
254
- ):
255
- """
256
- Initialize the AI4Chat client.
257
-
258
- Args:
259
- system_prompt: System prompt to guide the AI's behavior
260
- country: Country parameter for API
261
- user_id: User ID for API
262
- """
263
- self.timeout = 30
264
- self.system_prompt = system_prompt
265
- self.country = country
266
- self.user_id = user_id
267
-
268
- # API endpoint
269
- self.api_endpoint = "https://yw85opafq6.execute-api.us-east-1.amazonaws.com/default/boss_mode_15aug"
270
-
271
- # Initialize session
272
- self.session = Session()
273
- self.session.proxies = {}
274
- # self.session.timeout = self.timeout # Timeout is per-request for curl_cffi
275
-
276
- # Set headers
277
- self.headers = {
278
- "Accept": "*/*",
279
- "Accept-Language": "id-ID,id;q=0.9",
280
- "Origin": "https://www.ai4chat.co",
281
- "Priority": "u=1, i",
282
- "Referer": "https://www.ai4chat.co/",
283
- "Sec-CH-UA": '"Chromium";v="131", "Not_A Brand";v="24", "Microsoft Edge Simulate";v="131", "Lemur";v="131"',
284
- "Sec-CH-UA-Mobile": "?1",
285
- "Sec-CH-UA-Platform": '"Android"',
286
- "Sec-Fetch-Dest": "empty",
287
- "Sec-Fetch-Mode": "cors",
288
- "Sec-Fetch-Site": "cross-site",
289
- "User-Agent": "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Mobile Safari/537.36"
290
- }
291
-
292
- # Update session headers
293
- self.session.headers.update(self.headers)
294
-
295
- # Initialize chat interface
296
- self.chat = Chat(self)
297
-
298
- @property
299
- def models(self):
300
- class _ModelList:
301
- def list(inner_self):
302
- return type(self).AVAILABLE_MODELS
303
- return _ModelList()
1
+ import time
2
+ import uuid
3
+ import urllib.parse
4
+ from curl_cffi.requests import Session, RequestsError
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ # Import base classes and utility structures
8
+ from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
+ from webscout.Provider.OPENAI.utils import (
10
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
+ ChatCompletionMessage, CompletionUsage, count_tokens
12
+ )
13
+
14
+ # --- AI4Chat Client ---
15
+
16
+ class Completions(BaseCompletions):
17
+ def __init__(self, client: 'AI4Chat'):
18
+ self._client = client
19
+
20
+ def create(
21
+ self,
22
+ *,
23
+ model: str,
24
+ messages: List[Dict[str, str]],
25
+ max_tokens: Optional[int] = None,
26
+ stream: bool = False,
27
+ temperature: Optional[float] = None,
28
+ top_p: Optional[float] = None,
29
+ timeout: Optional[int] = None,
30
+ proxies: Optional[dict] = None,
31
+ **kwargs: Any
32
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
33
+ """
34
+ Creates a model response for the given chat conversation.
35
+ Mimics openai.chat.completions.create
36
+ """
37
+ # Use the format_prompt utility to format the conversation
38
+ from webscout.Provider.OPENAI.utils import format_prompt
39
+
40
+ # Format the messages into a single string
41
+ conversation_prompt = format_prompt(messages, add_special_tokens=True, include_system=True)
42
+
43
+ # Set up request parameters
44
+ country_param = kwargs.get("country", self._client.country)
45
+ user_id_param = kwargs.get("user_id", self._client.user_id)
46
+
47
+ # Generate request ID and timestamp
48
+ request_id = f"chatcmpl-{uuid.uuid4()}"
49
+ created_time = int(time.time())
50
+
51
+ # AI4Chat doesn't support streaming, so we'll simulate it if requested
52
+ if stream:
53
+ return self._create_stream(request_id, created_time, model, conversation_prompt, country_param, user_id_param, timeout=timeout, proxies=proxies)
54
+ else:
55
+ return self._create_non_stream(request_id, created_time, model, conversation_prompt, country_param, user_id_param, timeout=timeout, proxies=proxies)
56
+
57
+ def _create_stream(
58
+ self, request_id: str, created_time: int, model: str,
59
+ conversation_prompt: str, country: str, user_id: str,
60
+ timeout: Optional[int] = None, proxies: Optional[dict] = None
61
+ ) -> Generator[ChatCompletionChunk, None, None]:
62
+ """Simulate streaming by breaking up the full response into fixed-size character chunks."""
63
+ try:
64
+ # Get the full response first
65
+ full_response = self._get_ai4chat_response(conversation_prompt, country, user_id, timeout=timeout, proxies=proxies)
66
+
67
+ # Track token usage
68
+ prompt_tokens = count_tokens(conversation_prompt)
69
+ completion_tokens = 0
70
+
71
+ # Stream fixed-size character chunks (e.g., 48 chars)
72
+ buffer = full_response
73
+ chunk_size = 48
74
+ while buffer:
75
+ chunk_text = buffer[:chunk_size]
76
+ buffer = buffer[chunk_size:]
77
+ completion_tokens += count_tokens(chunk_text)
78
+
79
+ if chunk_text.strip():
80
+ # Create the delta object
81
+ delta = ChoiceDelta(
82
+ content=chunk_text,
83
+ role="assistant",
84
+ tool_calls=None
85
+ )
86
+
87
+ # Create the choice object
88
+ choice = Choice(
89
+ index=0,
90
+ delta=delta,
91
+ finish_reason=None,
92
+ logprobs=None
93
+ )
94
+
95
+ # Create the chunk object
96
+ chunk = ChatCompletionChunk(
97
+ id=request_id,
98
+ choices=[choice],
99
+ created=created_time,
100
+ model=model,
101
+ system_fingerprint=None
102
+ )
103
+
104
+ yield chunk
105
+
106
+ # Final chunk with finish_reason="stop"
107
+ delta = ChoiceDelta(
108
+ content=None,
109
+ role=None,
110
+ tool_calls=None
111
+ )
112
+
113
+ choice = Choice(
114
+ index=0,
115
+ delta=delta,
116
+ finish_reason="stop",
117
+ logprobs=None
118
+ )
119
+
120
+ chunk = ChatCompletionChunk(
121
+ id=request_id,
122
+ choices=[choice],
123
+ created=created_time,
124
+ model=model,
125
+ system_fingerprint=None
126
+ )
127
+
128
+ yield chunk
129
+
130
+ except RequestsError as e:
131
+ print(f"Error during AI4Chat stream request: {e}")
132
+ raise IOError(f"AI4Chat request failed: {e}") from e
133
+ except Exception as e:
134
+ print(f"Unexpected error during AI4Chat stream request: {e}")
135
+ raise IOError(f"AI4Chat request failed: {e}") from e
136
+
137
+ def _create_non_stream(
138
+ self, request_id: str, created_time: int, model: str,
139
+ conversation_prompt: str, country: str, user_id: str,
140
+ timeout: Optional[int] = None, proxies: Optional[dict] = None
141
+ ) -> ChatCompletion:
142
+ """Get a complete response from AI4Chat."""
143
+ try:
144
+ # Get the full response
145
+ full_response = self._get_ai4chat_response(conversation_prompt, country, user_id, timeout=timeout, proxies=proxies)
146
+
147
+ # Estimate token counts
148
+ prompt_tokens = count_tokens(conversation_prompt)
149
+ completion_tokens = count_tokens(full_response)
150
+ total_tokens = prompt_tokens + completion_tokens
151
+
152
+ # Create the message object
153
+ message = ChatCompletionMessage(
154
+ role="assistant",
155
+ content=full_response
156
+ )
157
+
158
+ # Create the choice object
159
+ choice = Choice(
160
+ index=0,
161
+ message=message,
162
+ finish_reason="stop"
163
+ )
164
+
165
+ # Create the usage object
166
+ usage = CompletionUsage(
167
+ prompt_tokens=prompt_tokens,
168
+ completion_tokens=completion_tokens,
169
+ total_tokens=total_tokens
170
+ )
171
+
172
+ # Create the completion object
173
+ completion = ChatCompletion(
174
+ id=request_id,
175
+ choices=[choice],
176
+ created=created_time,
177
+ model=model,
178
+ usage=usage,
179
+ )
180
+
181
+ return completion
182
+
183
+ except RequestsError as e:
184
+ print(f"Error during AI4Chat non-stream request: {e}")
185
+ raise IOError(f"AI4Chat request failed: {e}") from e
186
+ except Exception as e:
187
+ print(f"Unexpected error during AI4Chat non-stream request: {e}")
188
+ raise IOError(f"AI4Chat request failed: {e}") from e
189
+
190
+ def _get_ai4chat_response(self, prompt: str, country: str, user_id: str,
191
+ timeout: Optional[int] = None, proxies: Optional[dict] = None) -> str:
192
+ """Make the actual API request to AI4Chat."""
193
+ timeout_val = timeout if timeout is not None else self._client.timeout
194
+ original_proxies = self._client.session.proxies
195
+ if proxies is not None:
196
+ self._client.session.proxies = proxies
197
+
198
+ try:
199
+ # URL encode parameters
200
+ encoded_text = urllib.parse.quote(prompt)
201
+ encoded_country = urllib.parse.quote(country)
202
+ encoded_user_id = urllib.parse.quote(user_id)
203
+
204
+ # Construct the API URL
205
+ url = f"{self._client.api_endpoint}?text={encoded_text}&country={encoded_country}&user_id={encoded_user_id}"
206
+
207
+ # Make the request
208
+ response = self._client.session.get(url, headers=self._client.headers, timeout=timeout_val)
209
+ response.raise_for_status()
210
+ except RequestsError as e:
211
+ raise IOError(f"Failed to generate response: {e}")
212
+ finally:
213
+ if proxies is not None:
214
+ self._client.session.proxies = original_proxies
215
+
216
+ # Process the response text
217
+ response_text = response.text
218
+
219
+ # Remove surrounding quotes if present
220
+ if response_text.startswith('"'):
221
+ response_text = response_text[1:]
222
+ if response_text.endswith('"'):
223
+ response_text = response_text[:-1]
224
+
225
+ # Replace escaped newlines
226
+ response_text = response_text.replace('\\n', '\n').replace('\\n\\n', '\n\n')
227
+
228
+ return response_text
229
+
230
+ class Chat(BaseChat):
231
+ def __init__(self, client: 'AI4Chat'):
232
+ self.completions = Completions(client)
233
+
234
+ class AI4Chat(OpenAICompatibleProvider):
235
+ """
236
+ OpenAI-compatible client for AI4Chat API.
237
+
238
+ Usage:
239
+ client = AI4Chat()
240
+ response = client.chat.completions.create(
241
+ model="default",
242
+ messages=[{"role": "user", "content": "Hello!"}]
243
+ )
244
+ print(response.choices[0].message.content)
245
+ """
246
+
247
+ AVAILABLE_MODELS = ["default"]
248
+
249
+ def __init__(
250
+ self,
251
+ system_prompt: str = "You are a helpful and informative AI assistant.",
252
+ country: str = "Asia",
253
+ user_id: str = "usersmjb2oaz7y",
254
+ proxies: Optional[Dict[str, str]] = None
255
+ ):
256
+ """
257
+ Initialize the AI4Chat client.
258
+
259
+ Args:
260
+ system_prompt: System prompt to guide the AI's behavior
261
+ country: Country parameter for API
262
+ user_id: User ID for API
263
+ proxies: Proxy configuration
264
+ """
265
+ super().__init__(proxies=proxies)
266
+ self.timeout = 30
267
+ self.system_prompt = system_prompt
268
+ self.country = country
269
+ self.user_id = user_id
270
+
271
+ # API endpoint
272
+ self.api_endpoint = "https://yw85opafq6.execute-api.us-east-1.amazonaws.com/default/boss_mode_15aug"
273
+
274
+ # Set headers
275
+ self.headers = {
276
+ "Accept": "*/*",
277
+ "Accept-Language": "id-ID,id;q=0.9",
278
+ "Origin": "https://www.ai4chat.co",
279
+ "Priority": "u=1, i",
280
+ "Referer": "https://www.ai4chat.co/",
281
+ "Sec-CH-UA": '"Chromium";v="131", "Not_A Brand";v="24", "Microsoft Edge Simulate";v="131", "Lemur";v="131"',
282
+ "Sec-CH-UA-Mobile": "?1",
283
+ "Sec-CH-UA-Platform": '"Android"',
284
+ "Sec-Fetch-Dest": "empty",
285
+ "Sec-Fetch-Mode": "cors",
286
+ "Sec-Fetch-Site": "cross-site",
287
+ "User-Agent": "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Mobile Safari/537.36"
288
+ }
289
+
290
+ # Update session headers
291
+ self.session.headers.update(self.headers)
292
+
293
+ # Initialize chat interface
294
+ self.chat = Chat(self)
295
+
296
+ @property
297
+ def models(self):
298
+ class _ModelList:
299
+ def list(inner_self):
300
+ return type(self).AVAILABLE_MODELS
301
+ return _ModelList()
302
+
303
+ if __name__ == "__main__":
304
+ # Example usage
305
+ client = AI4Chat()
306
+ response = client.chat.completions.create(
307
+ model="default",
308
+ messages=[
309
+ {"role": "system", "content": client.system_prompt},
310
+ {"role": "user", "content": "Hello, how are you?"}
311
+ ]
312
+ )
313
+ print(response.choices[0].message.content)