webscout 8.2.9__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (63) hide show
  1. webscout/AIauto.py +2 -2
  2. webscout/Provider/Blackboxai.py +2 -0
  3. webscout/Provider/ChatSandbox.py +2 -1
  4. webscout/Provider/Deepinfra.py +1 -1
  5. webscout/Provider/HeckAI.py +1 -1
  6. webscout/Provider/LambdaChat.py +1 -0
  7. webscout/Provider/MCPCore.py +7 -3
  8. webscout/Provider/OPENAI/BLACKBOXAI.py +1017 -766
  9. webscout/Provider/OPENAI/Cloudflare.py +31 -14
  10. webscout/Provider/OPENAI/FalconH1.py +457 -0
  11. webscout/Provider/OPENAI/FreeGemini.py +29 -13
  12. webscout/Provider/OPENAI/NEMOTRON.py +26 -14
  13. webscout/Provider/OPENAI/PI.py +427 -0
  14. webscout/Provider/OPENAI/Qwen3.py +303 -282
  15. webscout/Provider/OPENAI/TwoAI.py +29 -12
  16. webscout/Provider/OPENAI/__init__.py +3 -1
  17. webscout/Provider/OPENAI/ai4chat.py +33 -23
  18. webscout/Provider/OPENAI/api.py +78 -12
  19. webscout/Provider/OPENAI/base.py +2 -0
  20. webscout/Provider/OPENAI/c4ai.py +31 -10
  21. webscout/Provider/OPENAI/chatgpt.py +41 -22
  22. webscout/Provider/OPENAI/chatgptclone.py +32 -13
  23. webscout/Provider/OPENAI/chatsandbox.py +7 -3
  24. webscout/Provider/OPENAI/copilot.py +26 -10
  25. webscout/Provider/OPENAI/deepinfra.py +327 -321
  26. webscout/Provider/OPENAI/e2b.py +77 -99
  27. webscout/Provider/OPENAI/exaai.py +13 -10
  28. webscout/Provider/OPENAI/exachat.py +10 -6
  29. webscout/Provider/OPENAI/flowith.py +7 -3
  30. webscout/Provider/OPENAI/freeaichat.py +10 -6
  31. webscout/Provider/OPENAI/glider.py +10 -6
  32. webscout/Provider/OPENAI/heckai.py +11 -8
  33. webscout/Provider/OPENAI/llmchatco.py +9 -7
  34. webscout/Provider/OPENAI/mcpcore.py +10 -7
  35. webscout/Provider/OPENAI/multichat.py +3 -1
  36. webscout/Provider/OPENAI/netwrck.py +10 -6
  37. webscout/Provider/OPENAI/oivscode.py +12 -9
  38. webscout/Provider/OPENAI/opkfc.py +14 -3
  39. webscout/Provider/OPENAI/scirachat.py +14 -8
  40. webscout/Provider/OPENAI/sonus.py +10 -6
  41. webscout/Provider/OPENAI/standardinput.py +18 -9
  42. webscout/Provider/OPENAI/textpollinations.py +14 -7
  43. webscout/Provider/OPENAI/toolbaz.py +16 -10
  44. webscout/Provider/OPENAI/typefully.py +14 -7
  45. webscout/Provider/OPENAI/typegpt.py +10 -6
  46. webscout/Provider/OPENAI/uncovrAI.py +22 -8
  47. webscout/Provider/OPENAI/venice.py +10 -6
  48. webscout/Provider/OPENAI/writecream.py +166 -163
  49. webscout/Provider/OPENAI/x0gpt.py +367 -365
  50. webscout/Provider/OPENAI/yep.py +384 -382
  51. webscout/Provider/PI.py +2 -1
  52. webscout/Provider/__init__.py +0 -2
  53. webscout/Provider/granite.py +41 -6
  54. webscout/Provider/oivscode.py +37 -37
  55. webscout/Provider/scnet.py +1 -0
  56. webscout/version.py +1 -1
  57. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/METADATA +2 -1
  58. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/RECORD +62 -61
  59. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  60. webscout/Provider/ChatGPTGratis.py +0 -194
  61. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/entry_points.txt +0 -0
  62. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  63. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -44,6 +44,8 @@ class Completions(BaseCompletions):
44
44
  stream: bool = False,
45
45
  temperature: Optional[float] = None,
46
46
  top_p: Optional[float] = None,
47
+ timeout: Optional[int] = None,
48
+ proxies: Optional[dict] = None,
47
49
  **kwargs: Any
48
50
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
49
51
  """
@@ -69,20 +71,27 @@ class Completions(BaseCompletions):
69
71
  }
70
72
 
71
73
  if stream:
72
- return self._create_stream(request_id, created_time, model, api_payload)
74
+ return self._create_stream(request_id, created_time, model, api_payload, timeout=timeout, proxies=proxies)
73
75
  else:
74
- return self._create_non_stream(request_id, created_time, model, api_payload)
76
+ return self._create_non_stream(request_id, created_time, model, api_payload, timeout=timeout, proxies=proxies)
75
77
 
76
78
  def _create_stream(
77
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
79
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
80
+ timeout: Optional[int] = None, proxies: Optional[dict] = None
78
81
  ) -> Generator[ChatCompletionChunk, None, None]:
82
+ original_proxies = self._client.session.proxies
83
+ if proxies is not None:
84
+ self._client.session.proxies = proxies
85
+ else:
86
+ # Ensure session proxies are reset if no specific proxies are passed for this call
87
+ self._client.session.proxies = {}
79
88
  try:
80
89
  response = self._client.session.post(
81
90
  self._client.api_endpoint,
82
91
  json=payload,
83
92
  stream=True,
84
- timeout=self._client.timeout,
85
- impersonate="chrome120"
93
+ timeout=timeout if timeout is not None else self._client.timeout,
94
+ impersonate="chrome120"
86
95
  )
87
96
  response.raise_for_status()
88
97
 
@@ -127,18 +136,26 @@ class Completions(BaseCompletions):
127
136
  except Exception as e:
128
137
  print(f"{RED}Error during FreeGemini stream request: {e}{RESET}")
129
138
  raise IOError(f"FreeGemini stream request failed: {e}") from e
139
+ finally:
140
+ self._client.session.proxies = original_proxies
130
141
 
131
142
  def _create_non_stream(
132
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
143
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
144
+ timeout: Optional[int] = None, proxies: Optional[dict] = None
133
145
  ) -> ChatCompletion:
146
+ original_proxies = self._client.session.proxies
147
+ if proxies is not None:
148
+ self._client.session.proxies = proxies
149
+ else:
150
+ self._client.session.proxies = {}
134
151
  try:
135
152
  # For non-streaming, we'll still use streaming since the API returns data in chunks
136
153
  response = self._client.session.post(
137
154
  self._client.api_endpoint,
138
155
  json=payload,
139
156
  stream=True, # API always returns streaming format
140
- timeout=self._client.timeout,
141
- impersonate="chrome120"
157
+ timeout=timeout if timeout is not None else self._client.timeout,
158
+ impersonate="chrome120"
142
159
  )
143
160
  response.raise_for_status()
144
161
 
@@ -197,6 +214,8 @@ class Completions(BaseCompletions):
197
214
  except Exception as e:
198
215
  print(f"{RED}Error during FreeGemini non-stream request: {e}{RESET}")
199
216
  raise IOError(f"FreeGemini request failed: {e}") from e
217
+ finally:
218
+ self._client.session.proxies = original_proxies
200
219
 
201
220
  @staticmethod
202
221
  def _gemini_extractor(data: Dict) -> Optional[str]:
@@ -234,20 +253,17 @@ class FreeGemini(OpenAICompatibleProvider):
234
253
 
235
254
  def __init__(
236
255
  self,
237
- timeout: int = 30,
238
256
  ):
239
257
  """
240
258
  Initialize the FreeGemini client.
241
-
242
- Args:
243
- timeout: Request timeout in seconds
244
259
  """
245
- self.timeout = timeout
260
+ self.timeout = 30
246
261
  # Update the API endpoint to match the working implementation
247
262
  self.api_endpoint = "https://free-gemini.vercel.app/api/google/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse"
248
263
 
249
264
  # Initialize session with curl_cffi for better Cloudflare handling
250
265
  self.session = Session()
266
+ self.session.proxies = {}
251
267
 
252
268
  # Use LitAgent for fingerprinting
253
269
  self.agent = LitAgent()
@@ -34,6 +34,8 @@ class Completions(BaseCompletions):
34
34
  stream: bool = False,
35
35
  temperature: Optional[float] = None,
36
36
  top_p: Optional[float] = None,
37
+ timeout: Optional[int] = None,
38
+ proxies: Optional[dict] = None,
37
39
  **kwargs: Any
38
40
  ) -> ChatCompletion:
39
41
  nemotron_model_name = self._client.convert_model_name(model)
@@ -48,13 +50,14 @@ class Completions(BaseCompletions):
48
50
  request_id = f"chatcmpl-{uuid.uuid4()}"
49
51
  created_time = int(time.time())
50
52
  # Always use non-stream mode, ignore 'stream' argument
51
- return self._create_non_stream(request_id, created_time, model, payload)
53
+ return self._create_non_stream(request_id, created_time, model, payload, timeout=timeout, proxies=proxies)
52
54
 
53
55
  def _create_stream(
54
- self, request_id: str, created_time: int, model_name: str, payload: Dict[str, Any]
56
+ self, request_id: str, created_time: int, model_name: str, payload: Dict[str, Any],
57
+ timeout: Optional[int] = None, proxies: Optional[dict] = None
55
58
  ) -> Generator[ChatCompletionChunk, None, None]:
56
59
  try:
57
- response_generator = self._client._internal_make_request(payload, stream=True)
60
+ response_generator = self._client._internal_make_request(payload, stream=True, request_timeout=timeout, request_proxies=proxies)
58
61
  for text_chunk in response_generator:
59
62
  if text_chunk:
60
63
  delta = ChoiceDelta(content=text_chunk, role="assistant")
@@ -79,11 +82,12 @@ class Completions(BaseCompletions):
79
82
  raise IOError(f"NEMOTRON request failed: {e}") from e
80
83
 
81
84
  def _create_non_stream(
82
- self, request_id: str, created_time: int, model_name: str, payload: Dict[str, Any]
85
+ self, request_id: str, created_time: int, model_name: str, payload: Dict[str, Any],
86
+ timeout: Optional[int] = None, proxies: Optional[dict] = None
83
87
  ) -> ChatCompletion:
84
88
  full_response_content = ""
85
89
  try:
86
- response_generator = self._client._internal_make_request(payload, stream=False)
90
+ response_generator = self._client._internal_make_request(payload, stream=False, request_timeout=timeout, request_proxies=proxies)
87
91
  full_response_content = next(response_generator, "")
88
92
  except Exception as e:
89
93
  pass
@@ -117,12 +121,11 @@ class NEMOTRON(OpenAICompatibleProvider):
117
121
 
118
122
  API_BASE_URL = "https://nemotron.one/api/chat"
119
123
  def __init__(
120
- self,
121
- timeout: int = 30,
122
- proxies: dict = {}
124
+ self
123
125
  ):
124
126
  self.session = requests.Session()
125
- self.timeout = timeout
127
+ self.timeout = 30
128
+ self.session.proxies = {}
126
129
  agent = LitAgent()
127
130
  user_agent = agent.random()
128
131
  self.base_headers = {
@@ -137,8 +140,6 @@ class NEMOTRON(OpenAICompatibleProvider):
137
140
  "user-agent": user_agent
138
141
  }
139
142
  self.session.headers.update(self.base_headers)
140
- if proxies:
141
- self.session.proxies.update(proxies)
142
143
  self.chat = Chat(self)
143
144
 
144
145
  def _generate_random_email(self) -> str:
@@ -193,10 +194,19 @@ class NEMOTRON(OpenAICompatibleProvider):
193
194
  def _internal_make_request(
194
195
  self,
195
196
  payload: Dict[str, Any],
196
- stream: bool = False
197
+ stream: bool = False,
198
+ request_timeout: Optional[int] = None,
199
+ request_proxies: Optional[dict] = None
197
200
  ) -> Generator[str, None, None]:
198
201
  request_headers = self.base_headers.copy()
199
202
  request_headers["referer"] = f"https://nemotron.one/chat/{payload['model']}"
203
+ original_proxies = self.session.proxies.copy()
204
+ if request_proxies is not None:
205
+ self.session.proxies.update(request_proxies)
206
+ elif not self.session.proxies:
207
+ pass
208
+ else:
209
+ self.session.proxies = {}
200
210
  try:
201
211
  if stream:
202
212
  with self.session.post(
@@ -204,7 +214,7 @@ class NEMOTRON(OpenAICompatibleProvider):
204
214
  headers=request_headers,
205
215
  json=payload,
206
216
  stream=True,
207
- timeout=self.timeout
217
+ timeout=request_timeout if request_timeout is not None else self.timeout
208
218
  ) as response:
209
219
  response.raise_for_status()
210
220
  yield from sanitize_stream(
@@ -216,7 +226,7 @@ class NEMOTRON(OpenAICompatibleProvider):
216
226
  self.API_BASE_URL,
217
227
  headers=request_headers,
218
228
  json=payload,
219
- timeout=self.timeout
229
+ timeout=request_timeout if request_timeout is not None else self.timeout
220
230
  )
221
231
  response.raise_for_status()
222
232
  yield response.text
@@ -224,6 +234,8 @@ class NEMOTRON(OpenAICompatibleProvider):
224
234
  raise exceptions.ProviderConnectionError(f"NEMOTRON API Connection error: {str(e)}")
225
235
  except Exception as e:
226
236
  raise RuntimeError(f"NEMOTRON API request unexpected error: {str(e)}")
237
+ finally:
238
+ self.session.proxies = original_proxies
227
239
  @property
228
240
  def models(self):
229
241
  class _ModelList:
@@ -0,0 +1,427 @@
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
3
+ import json
4
+ import time
5
+ import uuid
6
+ import re
7
+ import threading
8
+ from typing import List, Dict, Optional, Union, Generator, Any
9
+ from uuid import uuid4
10
+
11
+ # Import base classes and utility structures
12
+ from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
13
+ from webscout.Provider.OPENAI.utils import (
14
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
15
+ ChatCompletionMessage, CompletionUsage
16
+ )
17
+
18
+ # Attempt to import LitAgent, fallback if not available
19
+ try:
20
+ from webscout.litagent import LitAgent
21
+ except ImportError:
22
+ pass
23
+
24
+ # --- PI.ai Client ---
25
+
26
+ class Completions(BaseCompletions):
27
+ def __init__(self, client: 'PiAI'):
28
+ self._client = client
29
+
30
+ def create(
31
+ self,
32
+ *,
33
+ model: str,
34
+ messages: List[Dict[str, str]],
35
+ max_tokens: Optional[int] = 2048,
36
+ stream: bool = False,
37
+ temperature: Optional[float] = None,
38
+ top_p: Optional[float] = None,
39
+ timeout: Optional[int] = None,
40
+ proxies: Optional[Dict[str, str]] = None,
41
+ voice: bool = False,
42
+ voice_name: str = "voice3",
43
+ output_file: str = "PiAI.mp3",
44
+ **kwargs: Any
45
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
46
+ """
47
+ Creates a model response for the given chat conversation.
48
+ Mimics openai.chat.completions.create with Pi.ai specific features.
49
+ """
50
+ # Validate voice settings
51
+ if voice and voice_name not in self._client.AVAILABLE_VOICES:
52
+ raise ValueError(f"Voice '{voice_name}' not available. Choose from: {list(self._client.AVAILABLE_VOICES.keys())}")
53
+
54
+ # Use format_prompt from utils.py to convert OpenAI messages format to Pi.ai prompt
55
+ from webscout.Provider.OPENAI.utils import format_prompt, count_tokens
56
+ prompt = format_prompt(messages, do_continue=True, add_special_tokens=True)
57
+
58
+ # Ensure conversation is started
59
+ if not self._client.conversation_id:
60
+ self._client.start_conversation()
61
+
62
+ request_id = f"chatcmpl-{uuid.uuid4()}"
63
+ created_time = int(time.time())
64
+
65
+ # Use count_tokens for prompt token counting
66
+ prompt_tokens = count_tokens(prompt)
67
+
68
+ if stream:
69
+ return self._create_stream(
70
+ request_id, created_time, model, prompt,
71
+ timeout, proxies, voice, voice_name, output_file, prompt_tokens
72
+ )
73
+ else:
74
+ return self._create_non_stream(
75
+ request_id, created_time, model, prompt,
76
+ timeout, proxies, voice, voice_name, output_file, prompt_tokens
77
+ )
78
+
79
+ def _create_stream(
80
+ self, request_id: str, created_time: int, model: str, prompt: str,
81
+ timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None,
82
+ voice: bool = False, voice_name: str = "voice3", output_file: str = "PiAI.mp3",
83
+ prompt_tokens: Optional[int] = None
84
+ ) -> Generator[ChatCompletionChunk, None, None]:
85
+
86
+ data = {
87
+ 'text': prompt,
88
+ 'conversation': self._client.conversation_id
89
+ }
90
+
91
+ try:
92
+ # Try primary URL first
93
+ current_url = self._client.primary_url
94
+ response = self._client.session.post(
95
+ current_url,
96
+ json=data,
97
+ stream=True,
98
+ timeout=timeout or self._client.timeout,
99
+ impersonate="chrome110"
100
+ )
101
+
102
+ # If primary URL fails, try fallback URL
103
+ if not response.ok and current_url == self._client.primary_url:
104
+ current_url = self._client.fallback_url
105
+ response = self._client.session.post(
106
+ current_url,
107
+ json=data,
108
+ stream=True,
109
+ timeout=timeout or self._client.timeout,
110
+ impersonate="chrome110"
111
+ )
112
+
113
+ response.raise_for_status()
114
+
115
+ # Track token usage across chunks
116
+ # prompt_tokens = len(prompt.split()) if prompt else 0
117
+ completion_tokens = 0
118
+ total_tokens = prompt_tokens
119
+
120
+ sids = []
121
+ streaming_text = ""
122
+ full_raw_data_for_sids = ""
123
+
124
+ # Process streaming response
125
+ for line_bytes in response.iter_lines():
126
+ if line_bytes:
127
+ line = line_bytes.decode('utf-8')
128
+ full_raw_data_for_sids += line + "\n"
129
+
130
+ if line.startswith("data: "):
131
+ json_line_str = line[6:]
132
+ try:
133
+ chunk_data = json.loads(json_line_str)
134
+ content = chunk_data.get('text', '')
135
+
136
+ if content:
137
+ # Calculate incremental content
138
+ new_content = content[len(streaming_text):] if len(content) > len(streaming_text) else content
139
+ streaming_text = content
140
+ completion_tokens += len(new_content.split()) if new_content else 0
141
+ total_tokens = prompt_tokens + completion_tokens
142
+
143
+ # Create OpenAI-compatible chunk
144
+ delta = ChoiceDelta(
145
+ content=new_content,
146
+ role="assistant"
147
+ )
148
+
149
+ choice = Choice(
150
+ index=0,
151
+ delta=delta,
152
+ finish_reason=None
153
+ )
154
+
155
+ chunk = ChatCompletionChunk(
156
+ id=request_id,
157
+ choices=[choice],
158
+ created=created_time,
159
+ model=model
160
+ )
161
+
162
+ yield chunk
163
+
164
+ except (json.JSONDecodeError, KeyError):
165
+ continue
166
+
167
+ # Send final chunk with finish_reason
168
+ final_choice = Choice(
169
+ index=0,
170
+ delta=ChoiceDelta(),
171
+ finish_reason="stop"
172
+ )
173
+
174
+ final_chunk = ChatCompletionChunk(
175
+ id=request_id,
176
+ choices=[final_choice],
177
+ created=created_time,
178
+ model=model
179
+ )
180
+
181
+ yield final_chunk
182
+
183
+ # Handle voice generation
184
+ if voice and voice_name:
185
+ sids = re.findall(r'"sid":"(.*?)"', full_raw_data_for_sids)
186
+ second_sid = sids[1] if len(sids) >= 2 else None
187
+ if second_sid:
188
+ threading.Thread(
189
+ target=self._client.download_audio_threaded,
190
+ args=(voice_name, second_sid, output_file)
191
+ ).start()
192
+
193
+ except CurlError as e:
194
+ raise IOError(f"PI.ai request failed (CurlError): {e}") from e
195
+ except Exception as e:
196
+ raise IOError(f"PI.ai request failed: {e}") from e
197
+
198
+ def _create_non_stream(
199
+ self, request_id: str, created_time: int, model: str, prompt: str,
200
+ timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None,
201
+ voice: bool = False, voice_name: str = "voice3", output_file: str = "PiAI.mp3",
202
+ prompt_tokens: Optional[int] = None
203
+ ) -> ChatCompletion:
204
+
205
+ # Collect streaming response into a single response
206
+ full_content = ""
207
+ completion_tokens = 0
208
+ # prompt_tokens = len(prompt.split()) if prompt else 0 # replaced
209
+
210
+ # Use provided prompt_tokens if available
211
+ if prompt_tokens is None:
212
+ from webscout.Provider.OPENAI.utils import count_tokens
213
+ prompt_tokens = count_tokens(prompt)
214
+
215
+ for chunk in self._create_stream(
216
+ request_id, created_time, model, prompt,
217
+ timeout, proxies, voice, voice_name, output_file, prompt_tokens
218
+ ):
219
+ if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
220
+ full_content += chunk.choices[0].delta.content
221
+ completion_tokens += len(chunk.choices[0].delta.content.split())
222
+
223
+ # Create final completion response
224
+ message = ChatCompletionMessage(
225
+ role="assistant",
226
+ content=full_content
227
+ )
228
+
229
+ choice = Choice(
230
+ index=0,
231
+ message=message,
232
+ finish_reason="stop"
233
+ )
234
+
235
+ usage = CompletionUsage(
236
+ prompt_tokens=prompt_tokens,
237
+ completion_tokens=completion_tokens,
238
+ total_tokens=prompt_tokens + completion_tokens
239
+ )
240
+
241
+ completion = ChatCompletion(
242
+ id=request_id,
243
+ choices=[choice],
244
+ created=created_time,
245
+ model=model,
246
+ usage=usage
247
+ )
248
+
249
+ return completion
250
+
251
+
252
+ class Chat(BaseChat):
253
+ def __init__(self, client: 'PiAI'):
254
+ self.completions = Completions(client)
255
+
256
+
257
+ class PiAI(OpenAICompatibleProvider):
258
+ """
259
+ PiAI provider following OpenAI-compatible interface.
260
+
261
+ Supports Pi.ai specific features like voice generation and conversation management.
262
+ """
263
+
264
+ AVAILABLE_MODELS = ["inflection_3_pi"]
265
+ AVAILABLE_VOICES: Dict[str, int] = {
266
+ "voice1": 1,
267
+ "voice2": 2,
268
+ "voice3": 3,
269
+ "voice4": 4,
270
+ "voice5": 5,
271
+ "voice6": 6,
272
+ "voice7": 7,
273
+ "voice8": 8
274
+ }
275
+
276
+ def __init__(
277
+ self,
278
+ api_key: Optional[str] = None,
279
+ timeout: int = 30,
280
+ proxies: Optional[Dict[str, str]] = None,
281
+ **kwargs: Any
282
+ ):
283
+ """
284
+ Initialize PI.ai provider.
285
+
286
+ Args:
287
+ api_key: Not used for Pi.ai but kept for compatibility
288
+ timeout: Request timeout in seconds
289
+ proxies: Proxy configuration
290
+ **kwargs: Additional arguments
291
+ """
292
+ self.timeout = timeout
293
+ self.conversation_id = None
294
+
295
+ # Initialize curl_cffi Session
296
+ self.session = Session()
297
+
298
+ # Setup URLs
299
+ self.primary_url = 'https://pi.ai/api/chat'
300
+ self.fallback_url = 'https://pi.ai/api/v2/chat'
301
+
302
+ # Setup headers
303
+ self.headers = {
304
+ 'Accept': 'text/event-stream',
305
+ 'Accept-Encoding': 'gzip, deflate, br, zstd',
306
+ 'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
307
+ 'Content-Type': 'application/json',
308
+ 'DNT': '1',
309
+ 'Origin': 'https://pi.ai',
310
+ 'Referer': 'https://pi.ai/talk',
311
+ 'Sec-Fetch-Dest': 'empty',
312
+ 'Sec-Fetch-Mode': 'cors',
313
+ 'Sec-Fetch-Site': 'same-origin',
314
+ 'User-Agent': LitAgent().random() if 'LitAgent' in globals() else 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36',
315
+ 'X-Api-Version': '3'
316
+ }
317
+
318
+ # Setup cookies
319
+ self.cookies = {
320
+ '__cf_bm': uuid4().hex
321
+ }
322
+
323
+ # Configure session
324
+ self.session.headers.update(self.headers)
325
+ if proxies:
326
+ self.session.proxies = proxies
327
+
328
+ # Set cookies on the session
329
+ for name, value in self.cookies.items():
330
+ self.session.cookies.set(name, value)
331
+
332
+ # Initialize chat interface
333
+ self.chat = Chat(self)
334
+
335
+ # Start conversation
336
+ self.start_conversation()
337
+
338
+ def start_conversation(self) -> str:
339
+ """
340
+ Initializes a new conversation and returns the conversation ID.
341
+ """
342
+ try:
343
+ response = self.session.post(
344
+ "https://pi.ai/api/chat/start",
345
+ json={},
346
+ timeout=self.timeout,
347
+ impersonate="chrome110"
348
+ )
349
+ response.raise_for_status()
350
+
351
+ data = response.json()
352
+ if 'conversations' in data and data['conversations'] and 'sid' in data['conversations'][0]:
353
+ self.conversation_id = data['conversations'][0]['sid']
354
+ return self.conversation_id
355
+ else:
356
+ raise IOError(f"Unexpected response structure from start API: {data}")
357
+
358
+ except CurlError as e:
359
+ raise IOError(f"Failed to start conversation (CurlError): {e}") from e
360
+ except Exception as e:
361
+ raise IOError(f"Failed to start conversation: {e}") from e
362
+
363
+ def download_audio_threaded(self, voice_name: str, second_sid: str, output_file: str) -> None:
364
+ """Downloads audio in a separate thread."""
365
+ params = {
366
+ 'mode': 'eager',
367
+ 'voice': f'voice{self.AVAILABLE_VOICES[voice_name]}',
368
+ 'messageSid': second_sid,
369
+ }
370
+
371
+ try:
372
+ audio_response = self.session.get(
373
+ 'https://pi.ai/api/chat/voice',
374
+ params=params,
375
+ timeout=self.timeout,
376
+ impersonate="chrome110"
377
+ )
378
+ audio_response.raise_for_status()
379
+
380
+ with open(output_file, "wb") as file:
381
+ file.write(audio_response.content)
382
+
383
+ except (CurlError, Exception):
384
+ # Optionally log the error
385
+ pass
386
+
387
+ @property
388
+ def models(self):
389
+ """Return available models in OpenAI-compatible format."""
390
+ class _ModelList:
391
+ def list(inner_self):
392
+ return PiAI.AVAILABLE_MODELS
393
+ return _ModelList()
394
+
395
+
396
+ # Example usage
397
+ if __name__ == "__main__":
398
+ # Test the OpenAI-compatible interface
399
+ client = PiAI()
400
+
401
+ # Test streaming
402
+ print("Testing streaming response:")
403
+ response = client.chat.completions.create(
404
+ model="inflection_3_pi",
405
+ messages=[
406
+ {"role": "user", "content": "Hello! Say 'Hi' in one word."}
407
+ ],
408
+ stream=True
409
+ )
410
+
411
+ for chunk in response:
412
+ if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
413
+ print(chunk.choices[0].delta.content, end="", flush=True)
414
+ print()
415
+
416
+ # Test non-streaming
417
+ print("\nTesting non-streaming response:")
418
+ response = client.chat.completions.create(
419
+ model="inflection_3_pi",
420
+ messages=[
421
+ {"role": "user", "content": "Tell me a short joke."}
422
+ ],
423
+ stream=False
424
+ )
425
+
426
+ print(response.choices[0].message.content)
427
+ print(f"Usage: {response.usage}")