webscout 2025.10.19.3__py3-none-any.whl → 2025.10.22.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

@@ -18,7 +18,7 @@ class DeepAI(Provider):
18
18
  A provider for DeepAI's chat functionality, supporting both streaming and non-streaming responses.
19
19
  Structured similarly to other providers like DeepInfra and X0GPT.
20
20
  """
21
- required_auth = False
21
+ required_auth = True
22
22
  AVAILABLE_MODELS = [
23
23
  "standard",
24
24
  "genius",
@@ -19,7 +19,7 @@ class Flowith(Provider):
19
19
  AVAILABLE_MODELS = [
20
20
  "gpt-5-nano", "gpt-5-mini", "glm-4.5", "gpt-oss-120b", "gpt-oss-20b", "kimi-k2",
21
21
  "gpt-4.1", "gpt-4.1-mini", "deepseek-chat", "deepseek-reasoner",
22
- "gemini-2.5-flash", "grok-3-mini"
22
+ "gemini-2.5-flash", "grok-3-mini", "claude-haiku-4.5"
23
23
  ]
24
24
 
25
25
  def __init__(
webscout/Provider/GMI.py CHANGED
@@ -29,7 +29,10 @@ class GMI(Provider):
29
29
  "Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8",
30
30
  "zai-org/GLM-4.5-Air-FP8",
31
31
  "zai-org/GLM-4.5-FP8",
32
- "zai-org/GLM-4.6"
32
+ "zai-org/GLM-4.6",
33
+ "openai/gpt-oss-20b",
34
+ "openai/gpt-oss-120b"
35
+
33
36
  ]
34
37
 
35
38
  def __init__(
@@ -5,9 +5,10 @@ from webscout.AIbase import Provider
5
5
  from typing import AsyncGenerator, Dict, List, Optional, Union
6
6
 
7
7
  try:
8
- from ollama import AsyncClient, Client, ResponseError
8
+ from ollama import AsyncClient, Client, ResponseError # type: ignore
9
9
  except ImportError as e:
10
- pass
10
+ print("Please install the 'ollama' package to use the OLLAMA provider: pip install ollama")
11
+ raise e
11
12
 
12
13
  class OLLAMA(Provider):
13
14
  required_auth = True
@@ -1,3 +1,4 @@
1
+ #!/usr/bin/env python3
1
2
  from typing import List, Dict, Optional, Union, Generator, Any
2
3
  import time
3
4
  import json
@@ -152,7 +153,7 @@ class Flowith(OpenAICompatibleProvider):
152
153
  AVAILABLE_MODELS = [
153
154
  "gpt-5-nano", "gpt-5-mini", "glm-4.5", "gpt-oss-120b", "gpt-oss-20b", "kimi-k2",
154
155
  "gpt-4.1", "gpt-4.1-mini", "deepseek-chat", "deepseek-reasoner",
155
- "gemini-2.5-flash", "grok-3-mini"
156
+ "gemini-2.5-flash", "grok-3-mini", "claude-haiku-4.5"
156
157
  ]
157
158
 
158
159
  chat: Chat
@@ -171,7 +172,7 @@ if __name__ == "__main__":
171
172
  client = Flowith()
172
173
  messages = [{"role": "user", "content": "Hello, how are you?"}]
173
174
  response = client.chat.completions.create(
174
- model="gpt-5-nano",
175
+ model="gpt-oss-120b",
175
176
  messages=messages,
176
177
  stream=True
177
178
  )
@@ -261,7 +261,10 @@ class GMI(OpenAICompatibleProvider):
261
261
  "Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8",
262
262
  "zai-org/GLM-4.5-Air-FP8",
263
263
  "zai-org/GLM-4.5-FP8",
264
- "zai-org/GLM-4.6"
264
+ "zai-org/GLM-4.6",
265
+ "openai/gpt-oss-20b",
266
+ "openai/gpt-oss-120b"
267
+
265
268
  ]
266
269
 
267
270
  def __init__(self, browser: str = "chrome", api_key: str = None, **kwargs):
webscout/version.py CHANGED
@@ -1,2 +1,2 @@
1
- __version__ = "2025.10.19.3"
1
+ __version__ = "2025.10.22.1"
2
2
  __prog__ = "webscout"
webscout/version.py.bak CHANGED
@@ -1,2 +1,2 @@
1
- __version__ = "2025.10.19.2"
1
+ __version__ = "2025.10.22"
2
2
  __prog__ = "webscout"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: webscout
3
- Version: 2025.10.19.3
3
+ Version: 2025.10.22.1
4
4
  Summary: Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
5
5
  Author-email: OEvortex <helpingai5@gmail.com>
6
6
  License: HelpingAI
@@ -14,8 +14,8 @@ webscout/prompt_manager.py,sha256=ysKFgPhkV3uqrOCilqcS9rG8xhzdU_d2wx0grC9WCCc,98
14
14
  webscout/sanitize.py,sha256=pw2Dzn-Jw9mOD4mpALYAvAf-medA-9AqdzsOmdXQbl0,46577
15
15
  webscout/update_checker.py,sha256=bz0TzRxip9DOIVMFyNz9HsGj4RKB0xZgo57AUVSJINo,3708
16
16
  webscout/utils.py,sha256=o2hU3qaVPk25sog3e4cyVZO3l8xwaZpYRziZPotEzNo,3075
17
- webscout/version.py,sha256=HrZ0YXrvSmy_XwCklNU3PpzJN38H-NfnCifb4qFvY1g,53
18
- webscout/version.py.bak,sha256=N_VqqQ31d_XNbPVnrX8RC1vAP0b8CdNu1xPPZHxN33A,53
17
+ webscout/version.py,sha256=tpDfve30bR6-BnhG1CAgIUu7vn3hmQXpXe5Hbzdr_hg,53
18
+ webscout/version.py.bak,sha256=nfux5jrqPs5E5KYUPEBPASSB2Z1rinLgUlMZThId2uo,51
19
19
  webscout/Extra/Act.md,sha256=_C2VW_Dc-dc7eejpGYKAOZhImHKPiQ7NSwE3bkzr6fg,18952
20
20
  webscout/Extra/__init__.py,sha256=KvJRsRBRO-fZp2jSCl6KQnPppi93hriA6O_U1O1s31c,177
21
21
  webscout/Extra/gguf.md,sha256=McXGz5sTfzOO9X4mH8yIqu5K3CgjzyXKi4_HQtezdZ4,12435
@@ -70,12 +70,12 @@ webscout/Provider/ChatSandbox.py,sha256=Hl8vOQzij7VyYVoL3DvJO6HGUs6tXZY3xrbCLKrF
70
70
  webscout/Provider/ClaudeOnline.py,sha256=3J5LEjvxzpYgIcycCq1aG_kFjks7ECkJS6l0HQ5bEyQ,12748
71
71
  webscout/Provider/Cloudflare.py,sha256=nrHCZ9SYNNRIxxzR_QRU1fy-jh31WnErxIimF0aDZms,14155
72
72
  webscout/Provider/Cohere.py,sha256=wPULeG_2JZdhN8oTBjs_QNqs6atjkYkjCa01mRmg8Fw,8082
73
- webscout/Provider/DeepAI.py,sha256=z7TBsidQkxfrGvlBGARcdUDclQnLBq5wC1euzHLiEI8,12661
73
+ webscout/Provider/DeepAI.py,sha256=eKPauGUMdWG14v4593HwfLKmAFhWE90WZ6_X5LCby9Q,12660
74
74
  webscout/Provider/Deepinfra.py,sha256=Z3FNMaaVd4KiitDG8LBgGWycNuT6Y1Z06sCFURd0Ynw,15882
75
75
  webscout/Provider/ExaAI.py,sha256=HQ0BH1lInjsrpPSfIZkZf52q_gbHmnFnMJtRiZoxTXw,9548
76
76
  webscout/Provider/ExaChat.py,sha256=6ryax7zFeUrFTBa3inMrOGPxY-tfbavDQIgOTZr0-cY,11700
77
- webscout/Provider/Flowith.py,sha256=5Foxpx1Jj4HhMkWrPNo8hZDMhtC-YfCCKgTCXsPhUoQ,8497
78
- webscout/Provider/GMI.py,sha256=NkFVwrGFPu_dqy4YWPAjgPHmNVX-jltYuBH623ooZvs,11366
77
+ webscout/Provider/Flowith.py,sha256=GUYdf_AFR2qWx_OHtSPzAK5vTcaBfgv0GeWVMoAQnC8,8517
78
+ webscout/Provider/GMI.py,sha256=fB2yIDel5MRQd_mjhh8mrbwqXKIkoLZDrIkRbxgNymc,11428
79
79
  webscout/Provider/Gemini.py,sha256=Idpl9B_2yF2hK8agb6B4Qnvg6jmaQT008aOx8M2w2O4,6288
80
80
  webscout/Provider/GeminiProxy.py,sha256=JzOnUMNEcriTXbVZvp9SauYWx4ekgCj2DyRyD-jUj9M,6515
81
81
  webscout/Provider/GithubChat.py,sha256=FeRQfy1C9gxPlDmfH0VfBgd6CSCmN1XI6YES1Mp9mQM,14374
@@ -88,7 +88,7 @@ webscout/Provider/Koboldai.py,sha256=jv0zVxMp_Y56qZGZY8K_2DY9ysB0GzneEujTNd8W-Hw
88
88
  webscout/Provider/LambdaChat.py,sha256=SrvKTlEiqTX-e6ixCQ68e7DJVyDmd9MBnWMlnfcvQOk,18983
89
89
  webscout/Provider/Nemotron.py,sha256=Sj2D3Vng6icocejV45wWKvXYh8NG_pYMkfH-F1UL4CA,8838
90
90
  webscout/Provider/Netwrck.py,sha256=Wni4zV1J2MLt_G-sKwEdgsSwQTlGCZ1nKrD8akdG9LY,10295
91
- webscout/Provider/OLLAMA.py,sha256=1FEXxOwPX67Hl022aHyQipk_MLptCiNkyZSSwxOqIOA,14692
91
+ webscout/Provider/OLLAMA.py,sha256=PSnRnxiW18L3Gy827D2JPsOBNTJDfOTG-8whR-jHj9E,14806
92
92
  webscout/Provider/OpenGPT.py,sha256=R2H0iewJmaaW-KeHVOCPaL1lMyagy1KvrTALxhOBgQU,9389
93
93
  webscout/Provider/Openai.py,sha256=yxPXvACdA7cOyBEUN_fCbDujCzhpzXHVXlhteeg6JRo,9381
94
94
  webscout/Provider/PI.py,sha256=CFD_z6UFm0FKMvALSSefCdQ_fM-fRqpLRuXVmMJ2s3w,16230
@@ -111,7 +111,6 @@ webscout/Provider/akashgpt.py,sha256=PjRgZL0hxfhZPydn4a6tOVCa18SCseV6QJjXu7LZauY
111
111
  webscout/Provider/cerebras.py,sha256=C0rbHL65sVFUHe7zx0UbIlWhA06qUKvip5txgRsp_bU,17030
112
112
  webscout/Provider/chatglm.py,sha256=hAWtwlAUefQLc9zh3ji3-IJwH7z2fV-4tLN5_Wi0VAM,15887
113
113
  webscout/Provider/cleeai.py,sha256=WpSOoJZ69ttEosbJNH3J4UAkoOTOCy1hXyTjZsAzMTw,7782
114
- webscout/Provider/deepseek_assistant.py,sha256=7jxTWEUwvGwvj8NsSjk8PSvNKUgxQXPp8GwD7JcufC0,14582
115
114
  webscout/Provider/elmo.py,sha256=tjqB8zxmpKb_Ps0zJ_nd63KQ8FbwzUEEKWR0_Mhc20Y,12618
116
115
  webscout/Provider/geminiapi.py,sha256=xvxQzTX36MTb2ukiKjhfzomGR3OXOmtg40eMrYLB5rA,8321
117
116
  webscout/Provider/granite.py,sha256=u5-kyemo3lmPMc_R-OWCfusZMy-olmKo1hhzJ9ZYWLQ,11015
@@ -145,7 +144,6 @@ webscout/Provider/AISEARCH/stellar_search.py,sha256=BFEGmcOHZUtFx-Z4tqUIrgZ-qgdz
145
144
  webscout/Provider/AISEARCH/webpilotai_search.py,sha256=C7j-xe2If6FwS-YyXkn8U5-Uw09eG7ZrESiCFJo9eYo,11256
146
145
  webscout/Provider/OPENAI/Cloudflare.py,sha256=RGf1aH08UzkxRq9hF3nmKbkOrDzGXU_KFkdtsE8SVpY,14454
147
146
  webscout/Provider/OPENAI/DeepAI.py,sha256=IeGpsbsW8URM3Lulfp3VGetZOqVEq9wK-AjfZA7d9Lw,13743
148
- webscout/Provider/OPENAI/FalconH1.py,sha256=SlMZF-2TzquEsKFTuPGR039OnJ3Z4ro49nuLyNFT0Sk,21880
149
147
  webscout/Provider/OPENAI/FreeGemini.py,sha256=C8ZdV0FxzP4D2g5scW1Fp7zG4BmV-Cjztdp0KeuQqIw,10919
150
148
  webscout/Provider/OPENAI/GeminiProxy.py,sha256=9_6VHFylM3-ct0m5XDvxfZ1tmd70RnyZl5HT-qv1g4E,11266
151
149
  webscout/Provider/OPENAI/K2Think.py,sha256=bNdq-oy2ie8PH7r6RDX7ZosYKFGjqzLSBvC2d_HAWAg,14822
@@ -165,10 +163,10 @@ webscout/Provider/OPENAI/deepinfra.py,sha256=RzlBVBTsrLeRpTV8PrZxlqEN0XTRgnL2Jtm
165
163
  webscout/Provider/OPENAI/e2b.py,sha256=1Eg70mzeh31kyCfctvVLQVODLBz3LPPtUPcQBbksYZ4,72311
166
164
  webscout/Provider/OPENAI/exaai.py,sha256=NKsmz8mka3jncDe7S-jeJpRbw26ds2fqAvChd9ltNpM,14646
167
165
  webscout/Provider/OPENAI/exachat.py,sha256=xxT-COXVbCgjUYyi4Zu469eUSSwABYYLdQ7HljLm6a8,15409
168
- webscout/Provider/OPENAI/flowith.py,sha256=ZNgVFeEBf0Pj4Ey11-RrEP8qlLKNzJul6FA-Tg-Y8hA,6638
166
+ webscout/Provider/OPENAI/flowith.py,sha256=LMof65tofDbYfg4oqzvjolC1DEpSj_4BOUEAhzsHWnY,6683
169
167
  webscout/Provider/OPENAI/friendli.py,sha256=NlTNz-3nBFPKA1xXwZx8aJPsuQh-_QB3AzM14x5Z3Qw,10214
170
168
  webscout/Provider/OPENAI/generate_api_key.py,sha256=yh8rUBbNLdbe-uetelw2sVfPaNNx7CYIHoDfcyEjRy4,1490
171
- webscout/Provider/OPENAI/gmi.py,sha256=cUuSsi0kAvTxp1RbhwfKIX2UAIqyv2uKssRs57cB4Bo,13009
169
+ webscout/Provider/OPENAI/gmi.py,sha256=oNAiEiJkxBAR7Vmw0kqmGlasV17f9hRkNqY6f6EV3VE,13071
172
170
  webscout/Provider/OPENAI/groq.py,sha256=Kw5mm___iKDte1XXumEd0aCWQSDr9WioX_lpL07KGx4,14200
173
171
  webscout/Provider/OPENAI/heckai.py,sha256=XCh_D8KccmLtDATcp9WJ0RuE0tXhklq9dBrmVctcVto,11457
174
172
  webscout/Provider/OPENAI/llmchatco.py,sha256=izvK7XENNZCm6QugZ4f6DfALuMCjO4tLlg2izpyO3fM,15034
@@ -332,9 +330,9 @@ webscout/zeroart/__init__.py,sha256=Cy9AUtXnOaFBQjNvCpN19IXJo7Lg15VTaNcTBxOTFek,
332
330
  webscout/zeroart/base.py,sha256=I-xhDEfArBb6q7hiF5oPoyXeu2hzL6orp7uWgS_YtG8,2299
333
331
  webscout/zeroart/effects.py,sha256=XUNZY1-wMPd6GNL3glFXtWaF9wDis_z55qTyCdnzHDo,5063
334
332
  webscout/zeroart/fonts.py,sha256=S7qDhUmDXl1makMreZl_eVW_7-sqVQiGn-kQKl0Hg_A,51006
335
- webscout-2025.10.19.3.dist-info/licenses/LICENSE.md,sha256=hyfFlVn7pWcrvuvs-piB8k4J8DlXdOsYje9RyPxc6Ik,7543
336
- webscout-2025.10.19.3.dist-info/METADATA,sha256=c2w2uSFFgwSBIFHKH1w3qRtTWAaAYtHICbOvJhm1Lp0,21640
337
- webscout-2025.10.19.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
338
- webscout-2025.10.19.3.dist-info/entry_points.txt,sha256=4xAgKHWwNhAvJyShLCFs_IU8Reb8zR3wqf8egrsDr8g,118
339
- webscout-2025.10.19.3.dist-info/top_level.txt,sha256=nYIw7OKBQDr_Z33IzZUKidRD3zQEo8jOJYkMVMeN334,9
340
- webscout-2025.10.19.3.dist-info/RECORD,,
333
+ webscout-2025.10.22.1.dist-info/licenses/LICENSE.md,sha256=hyfFlVn7pWcrvuvs-piB8k4J8DlXdOsYje9RyPxc6Ik,7543
334
+ webscout-2025.10.22.1.dist-info/METADATA,sha256=hOXQdp-SN7a864HFBckSK_EUBn-ZfauOnxTnK0AP4Gk,21640
335
+ webscout-2025.10.22.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
336
+ webscout-2025.10.22.1.dist-info/entry_points.txt,sha256=4xAgKHWwNhAvJyShLCFs_IU8Reb8zR3wqf8egrsDr8g,118
337
+ webscout-2025.10.22.1.dist-info/top_level.txt,sha256=nYIw7OKBQDr_Z33IzZUKidRD3zQEo8jOJYkMVMeN334,9
338
+ webscout-2025.10.22.1.dist-info/RECORD,,
@@ -1,452 +0,0 @@
1
- import requests
2
- import json
3
- import time
4
- import uuid
5
- from typing import List, Dict, Optional, Union, Generator, Any
6
-
7
- from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
8
- from webscout.Provider.OPENAI.utils import (
9
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
10
- ChatCompletionMessage, CompletionUsage,
11
- get_system_prompt,
12
- count_tokens,
13
- format_prompt
14
- )
15
- from webscout.litagent import LitAgent
16
-
17
- def convert_openai_to_falcon_history(messages: List[Dict[str, str]]) -> list:
18
- """
19
- Converts a list of OpenAI-style chat messages to Falcon/Gradio chat history format.
20
-
21
- Args:
22
- messages (List[Dict[str, str]]):
23
- A list of message dictionaries, each with 'role' and 'content' keys, following the OpenAI API format.
24
-
25
- Returns:
26
- list: A single-turn Falcon/Gradio chat history in the format [[prompt, None]].
27
- """
28
- prompt = format_prompt(messages, add_special_tokens=False, do_continue=True, include_system=True)
29
- return [[prompt, None]]
30
-
31
- class Completions(BaseCompletions):
32
- """
33
- Handles text completion requests for the FalconH1 provider, supporting both streaming and non-streaming modes.
34
-
35
- Attributes:
36
- _client (Any): Reference to the FalconH1 client instance.
37
- _last_yielded_content_stream (str): Tracks the last yielded content in streaming mode.
38
- """
39
- def __init__(self, client):
40
- """
41
- Initializes the Completions handler.
42
-
43
- Args:
44
- client: The FalconH1 client instance.
45
- """
46
- self._client = client
47
- self._last_yielded_content_stream = ""
48
-
49
- def create(
50
- self,
51
- *,
52
- model: str,
53
- messages: List[Dict[str, str]],
54
- max_tokens: Optional[int] = 1024,
55
- stream: bool = False,
56
- temperature: Optional[float] = 0.1,
57
- top_p: Optional[float] = 1.0,
58
- timeout: Optional[int] = None,
59
- proxies: Optional[dict] = None,
60
- **kwargs: Any
61
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
62
- """
63
- Creates a chat completion using the FalconH1 API, supporting both streaming and non-streaming responses.
64
-
65
- Args:
66
- model (str): The model identifier to use for completion.
67
- messages (List[Dict[str, str]]): List of chat messages in OpenAI format.
68
- max_tokens (Optional[int]): Maximum number of tokens to generate in the completion.
69
- stream (bool): Whether to stream the response as chunks.
70
- temperature (Optional[float]): Sampling temperature.
71
- top_p (Optional[float]): Nucleus sampling probability.
72
- timeout (Optional[int]): Request timeout in seconds.
73
- proxies (Optional[dict]): Optional proxy settings for the request.
74
- **kwargs: Additional keyword arguments for advanced options (e.g., top_k, repetition_penalty).
75
-
76
- Returns:
77
- Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]: The chat completion result or a generator yielding streamed chunks.
78
- """
79
- session_hash = str(uuid.uuid4()).replace('-', '')
80
- chat_history = convert_openai_to_falcon_history(messages)
81
- if not chat_history or chat_history[-1][0] is None:
82
- raise ValueError("Messages must contain at least one user message for Falcon API.")
83
- resolved_model_name = self._client.get_model(model)
84
- payload_data = [
85
- chat_history,
86
- resolved_model_name,
87
- temperature,
88
- max_tokens,
89
- top_p,
90
- kwargs.get("top_k", 20),
91
- kwargs.get("repetition_penalty", 1.2)
92
- ]
93
- payload = {
94
- "data": payload_data,
95
- "event_data": None,
96
- "fn_index": 5,
97
- "trigger_id": 12,
98
- "session_hash": session_hash
99
- }
100
- request_id = f"chatcmpl-{uuid.uuid4()}"
101
- created_time = int(time.time())
102
- if stream:
103
- self._last_yielded_content_stream = ""
104
- return self._create_stream(request_id, created_time, resolved_model_name, payload, session_hash, timeout=timeout, proxies=proxies)
105
- else:
106
- return self._create_non_stream(request_id, created_time, resolved_model_name, payload, session_hash, timeout=timeout, proxies=proxies)
107
-
108
- def _create_stream(
109
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], session_hash: str,
110
- timeout: Optional[int] = None, proxies: Optional[dict] = None
111
- ) -> Generator[ChatCompletionChunk, None, None]:
112
- """
113
- Internal method to handle streaming chat completions from the FalconH1 API.
114
-
115
- Args:
116
- request_id (str): Unique request identifier.
117
- created_time (int): Timestamp of request creation.
118
- model (str): Model identifier.
119
- payload (Dict[str, Any]): Request payload for the API.
120
- session_hash (str): Unique session hash for the request.
121
- timeout (Optional[int]): Request timeout in seconds.
122
- proxies (Optional[dict]): Optional proxy settings.
123
-
124
- Yields:
125
- ChatCompletionChunk: Chunks of the chat completion as they are received from the API.
126
- """
127
- original_proxies = self._client.session.proxies.copy()
128
- if proxies is not None:
129
- self._client.session.proxies = proxies
130
- else:
131
- self._client.session.proxies = {}
132
- try:
133
- session = self._client.session
134
- join_resp = session.post(
135
- self._client.api_join_endpoint,
136
- headers=self._client.headers,
137
- json=payload,
138
- timeout=timeout if timeout is not None else self._client.timeout
139
- )
140
- join_resp.raise_for_status()
141
- data_url = f"{self._client.api_data_endpoint}?session_hash={session_hash}"
142
- stream_resp = session.get(
143
- data_url,
144
- headers=self._client.stream_headers,
145
- stream=True,
146
- timeout=timeout if timeout is not None else self._client.timeout
147
- )
148
- stream_resp.raise_for_status()
149
- for line in stream_resp.iter_lines():
150
- if line:
151
- decoded_line = line.decode('utf-8')
152
- if decoded_line.startswith('data: '):
153
- try:
154
- json_data = json.loads(decoded_line[6:])
155
- msg_type = json_data.get('msg')
156
- if msg_type == 'process_generating':
157
- output_field = json_data.get('output', {})
158
- data_field = output_field.get('data')
159
- if data_field and isinstance(data_field, list) and len(data_field) > 0:
160
- inner_data = data_field[0]
161
- content_to_yield = None
162
- if isinstance(inner_data, list) and len(inner_data) > 0:
163
- if isinstance(inner_data[0], list) and len(inner_data[0]) == 3 and inner_data[0][0] == "append":
164
- content_to_yield = inner_data[0][2]
165
- elif isinstance(inner_data[0], list) and len(inner_data[0]) == 2 and \
166
- isinstance(inner_data[0][1], str):
167
- current_full_response = inner_data[0][1]
168
- if current_full_response.startswith(self._last_yielded_content_stream):
169
- content_to_yield = current_full_response[len(self._last_yielded_content_stream):]
170
- else:
171
- content_to_yield = current_full_response
172
- self._last_yielded_content_stream = current_full_response
173
- if content_to_yield:
174
- delta = ChoiceDelta(content=content_to_yield, role="assistant")
175
- yield ChatCompletionChunk(id=request_id, choices=[Choice(index=0, delta=delta)], created=created_time, model=model)
176
- elif msg_type == 'process_completed' or msg_type == 'close_stream':
177
- break
178
- except json.JSONDecodeError:
179
- continue
180
- except Exception as e:
181
- continue
182
- finally:
183
- self._client.session.proxies = original_proxies
184
-
185
- def _create_non_stream(
186
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], session_hash: str,
187
- timeout: Optional[int] = None, proxies: Optional[dict] = None
188
- ) -> ChatCompletion:
189
- """
190
- Internal method to handle non-streaming chat completions from the FalconH1 API.
191
-
192
- Args:
193
- request_id (str): Unique request identifier.
194
- created_time (int): Timestamp of request creation.
195
- model (str): Model identifier.
196
- payload (Dict[str, Any]): Request payload for the API.
197
- session_hash (str): Unique session hash for the request.
198
- timeout (Optional[int]): Request timeout in seconds.
199
- proxies (Optional[dict]): Optional proxy settings.
200
-
201
- Returns:
202
- ChatCompletion: The full chat completion result.
203
- """
204
- original_proxies = self._client.session.proxies.copy()
205
- if proxies is not None:
206
- self._client.session.proxies = proxies
207
- else:
208
- self._client.session.proxies = {}
209
- full_response_content = ""
210
- last_full_response_chunk_ns = ""
211
- response_parts = []
212
- try:
213
- session = self._client.session
214
- join_resp = session.post(
215
- self._client.api_join_endpoint, headers=self._client.headers, json=payload,
216
- timeout=timeout if timeout is not None else self._client.timeout
217
- )
218
- join_resp.raise_for_status()
219
- data_url = f"{self._client.api_data_endpoint}?session_hash={session_hash}"
220
- overall_start_time = time.time()
221
- effective_timeout = timeout if timeout is not None else self._client.timeout
222
- while True:
223
- if time.time() - overall_start_time > effective_timeout:
224
- raise TimeoutError("Timeout waiting for non-stream response completion.")
225
- stream_resp = session.get(
226
- data_url, headers=self._client.stream_headers, stream=True,
227
- timeout=effective_timeout
228
- )
229
- stream_resp.raise_for_status()
230
- found_completion_message = False
231
- for line in stream_resp.iter_lines():
232
- if time.time() - overall_start_time > effective_timeout:
233
- raise TimeoutError("Timeout during non-stream response processing.")
234
- if line:
235
- decoded_line = line.decode('utf-8')
236
- if decoded_line.startswith('data: '):
237
- try:
238
- json_data = json.loads(decoded_line[6:])
239
- msg_type = json_data.get('msg')
240
- if msg_type == 'process_generating':
241
- output_field = json_data.get('output', {})
242
- data_field = output_field.get('data')
243
- if data_field and isinstance(data_field, list) and len(data_field) > 0:
244
- inner_data = data_field[0]
245
- current_chunk_text = None
246
- if isinstance(inner_data, list) and len(inner_data) > 0:
247
- if isinstance(inner_data[0], list) and len(inner_data[0]) == 3 and inner_data[0][0] == "append":
248
- current_chunk_text = inner_data[0][2]
249
- elif isinstance(inner_data[0], list) and len(inner_data[0]) == 2 and isinstance(inner_data[0][1], str):
250
- current_full_response = inner_data[0][1]
251
- if current_full_response.startswith(last_full_response_chunk_ns):
252
- current_chunk_text = current_full_response[len(last_full_response_chunk_ns):]
253
- else:
254
- current_chunk_text = current_full_response
255
- last_full_response_chunk_ns = current_full_response
256
- if current_chunk_text:
257
- response_parts.append(current_chunk_text)
258
- elif msg_type == 'process_completed' or msg_type == 'close_stream':
259
- if msg_type == 'process_completed':
260
- output_field = json_data.get('output', {})
261
- data_field = output_field.get('data')
262
- if data_field and isinstance(data_field, list) and len(data_field) > 0:
263
- inner_data = data_field[0]
264
- if isinstance(inner_data, list) and len(inner_data) > 0 and \
265
- isinstance(inner_data[0], list) and len(inner_data[0]) == 2 and \
266
- isinstance(inner_data[0][1], str):
267
- final_full_response = inner_data[0][1]
268
- if final_full_response != last_full_response_chunk_ns:
269
- if final_full_response.startswith(last_full_response_chunk_ns):
270
- response_parts.append(final_full_response[len(last_full_response_chunk_ns):])
271
- else:
272
- response_parts = [final_full_response]
273
- last_full_response_chunk_ns = final_full_response
274
- found_completion_message = True
275
- break
276
- except json.JSONDecodeError:
277
- continue
278
- except Exception as e:
279
- raise e
280
- if found_completion_message:
281
- break
282
- full_response_content = "".join(response_parts)
283
- message = ChatCompletionMessage(role="assistant", content=full_response_content)
284
- choice = Choice(index=0, message=message, finish_reason="stop")
285
-
286
- # Simplified token counting without history iteration
287
- chat_history = payload['data'][0]
288
- prompt = chat_history[0][0] if chat_history and chat_history[0] and chat_history[0][0] else ""
289
- prompt_tokens = count_tokens(prompt)
290
- completion_tokens = count_tokens(full_response_content)
291
- usage = CompletionUsage(
292
- prompt_tokens=prompt_tokens,
293
- completion_tokens=completion_tokens,
294
- total_tokens=prompt_tokens + completion_tokens
295
- )
296
- return ChatCompletion(
297
- id=request_id, choices=[choice], created=created_time,
298
- model=model, usage=usage
299
- )
300
- finally:
301
- self._client.session.proxies = original_proxies
302
-
303
- class Chat(BaseChat):
304
- """
305
- Provides a chat interface for the FalconH1 provider, exposing the completions API.
306
-
307
- Attributes:
308
- completions (Completions): The completions handler for chat requests.
309
- """
310
- def __init__(self, client):
311
- """
312
- Initializes the Chat interface for FalconH1.
313
-
314
- Args:
315
- client: The FalconH1 client instance.
316
- """
317
- self.completions = Completions(client)
318
-
319
- class FalconH1(OpenAICompatibleProvider):
320
- """
321
- FalconH1 provider implementation compatible with the OpenAI API interface.
322
- Handles chat completions using FalconH1 models via the Hugging Face Spaces API.
323
-
324
- Attributes:
325
- base_url (str): Base URL for the FalconH1 API.
326
- api_join_endpoint (str): Endpoint for joining the chat queue.
327
- api_data_endpoint (str): Endpoint for retrieving chat data.
328
- AVAILABLE_MODELS (List[str]): List of supported FalconH1 model identifiers.
329
- timeout (int): Default request timeout in seconds.
330
- session (requests.Session): HTTP session for API requests.
331
- headers (dict): Default HTTP headers for requests.
332
- stream_headers (dict): HTTP headers for streaming requests.
333
- chat (Chat): Chat interface for completions.
334
- """
335
- base_url = "https://tiiuae-falcon-h1-playground.hf.space"
336
- api_join_endpoint = f"{base_url}/gradio_api/queue/join?__theme=dark"
337
- api_data_endpoint = f"{base_url}/gradio_api/queue/data"
338
- AVAILABLE_MODELS = [
339
- "Falcon-H1-34B-Instruct",
340
- "Falcon-H1-7B-Instruct",
341
- "Falcon-H1-3B-Instruct",
342
- "Falcon-H1-1.5B-Deep-Instruct",
343
- "Falcon-H1-1.5B-Instruct",
344
- "Falcon-H1-0.5B-Instruct",
345
- ]
346
- def __init__(self, timeout: int = 120, proxies: Optional[dict] = None):
347
- """
348
- Initializes the FalconH1 provider with optional timeout and proxy settings.
349
-
350
- Args:
351
- timeout (int): Default request timeout in seconds (default: 120).
352
- proxies (Optional[dict]): Optional proxy settings for HTTP requests.
353
- """
354
- super().__init__(proxies=proxies)
355
- self.timeout = timeout
356
- self.headers = {
357
- 'User-Agent': LitAgent().random(),
358
- 'Accept': '*/*',
359
- 'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
360
- 'Accept-Encoding': 'gzip, deflate, br, zstd',
361
- 'Referer': f'{self.base_url}/?__theme=dark',
362
- 'Content-Type': 'application/json',
363
- 'Origin': self.base_url,
364
- 'Connection': 'keep-alive',
365
- 'Sec-Fetch-Dest': 'empty',
366
- 'Sec-Fetch-Mode': 'cors',
367
- 'Sec-Fetch-Site': 'same-origin',
368
- 'DNT': '1',
369
- 'Sec-GPC': '1',
370
- }
371
- self.stream_headers = {
372
- 'Accept': 'text/event-stream',
373
- 'Accept-Language': self.headers['Accept-Language'],
374
- 'Referer': self.headers['Referer'],
375
- 'User-Agent': self.headers['User-Agent'],
376
- 'Connection': 'keep-alive',
377
- 'Cache-Control': 'no-cache',
378
- }
379
- self.session.headers.update(self.headers)
380
- self.chat = Chat(self)
381
- def get_model(self, model_identifier: str) -> str:
382
- """
383
- Returns the resolved model name for the given identifier.
384
-
385
- Args:
386
- model_identifier (str): The model identifier string.
387
-
388
- Returns:
389
- str: The resolved model name (currently returns the identifier as-is).
390
- """
391
- return model_identifier
392
- @property
393
- def models(self):
394
- """
395
- Returns a list-like object containing available FalconH1 models.
396
-
397
- Returns:
398
- ModelList: An object with a .list() method returning model data objects.
399
- """
400
- class ModelData:
401
- def __init__(self, id_str):
402
- self.id = id_str
403
- class ModelList:
404
- def __init__(self, models_available):
405
- self.data = [ModelData(m) for m in models_available]
406
- def list(self):
407
- return self.data
408
- return ModelList(self.AVAILABLE_MODELS)
409
-
410
- if __name__ == "__main__":
411
- """
412
- Example usage of the FalconH1 provider for both non-streaming and streaming chat completions.
413
- """
414
- print("FalconH1 Provider Example")
415
- client = FalconH1()
416
- print("\n--- Non-Streaming Example ---")
417
- try:
418
- response = client.chat.completions.create(
419
- model="Falcon-H1-34B-Instruct",
420
- messages=[
421
- {"role": "system", "content": "You are a helpful AI assistant named Falcon."},
422
- {"role": "user", "content": "Hello, what is your name and what can you do?"}
423
- ]
424
- )
425
- print(f"ID: {response.id}")
426
- print(f"Model: {response.model}")
427
- if response.choices:
428
- print(f"Response: {response.choices[0].message.content}")
429
- if response.usage:
430
- print(f"Usage: {response.usage}")
431
- except Exception as e:
432
- print(f"Error in non-streaming example: {e}")
433
- print("\n--- Streaming Example ---")
434
- try:
435
- stream_response = client.chat.completions.create(
436
- model="Falcon-H1-34B-Instruct",
437
- messages=[
438
- {"role": "user", "content": "Tell me a short story about a brave falcon."}
439
- ],
440
- stream=True,
441
- max_tokens=150
442
- )
443
- print("Streaming response:")
444
- full_streamed_content = ""
445
- for chunk in stream_response:
446
- if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
447
- content_piece = chunk.choices[0].delta.content
448
- print(content_piece, end="", flush=True)
449
- full_streamed_content += content_piece
450
- print("\n--- End of Stream ---")
451
- except Exception as e:
452
- print(f"Error in streaming example: {e}")
@@ -1,378 +0,0 @@
1
- from curl_cffi.requests import Session
2
- from curl_cffi import CurlError
3
- import json
4
- import re
5
- from typing import Any, Dict, Optional, Generator, Union, List
6
-
7
- from webscout.AIutel import Optimizers
8
- from webscout.AIutel import Conversation
9
- from webscout.AIutel import AwesomePrompts, sanitize_stream
10
- from webscout.AIbase import Provider
11
- from webscout import exceptions
12
- from webscout.litagent import LitAgent
13
-
14
-
15
- class DeepSeekAssistant(Provider):
16
- """
17
- A class to interact with the DeepSeek Assistant API.
18
-
19
- This provider interfaces with the deepseek-assistant.com API to provide
20
- AI chat completions using the V3 model.
21
-
22
- Attributes:
23
- AVAILABLE_MODELS (list): List of available models for the provider.
24
-
25
- Examples:
26
- >>> from webscout.Provider.deepseek_assistant import DeepSeekAssistant
27
- >>> ai = DeepSeekAssistant()
28
- >>> response = ai.chat("What's the weather today?")
29
- >>> print(response)
30
- 'I can help you with weather information...'
31
- """
32
-
33
- AVAILABLE_MODELS = ["V3 model", "R1 model"]
34
- required_auth = False
35
- @staticmethod
36
- def _deepseek_assistant_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
37
- """Extracts content from DeepSeek Assistant stream JSON objects."""
38
- if isinstance(chunk, dict):
39
- return chunk.get("choices", [{}])[0].get("delta", {}).get("content")
40
- return None
41
-
42
- def __init__(
43
- self,
44
- is_conversation: bool = True,
45
- max_tokens: int = 2049,
46
- timeout: int = 30,
47
- intro: str = None,
48
- filepath: str = None,
49
- update_file: bool = True,
50
- proxies: dict = {},
51
- history_offset: int = 10250,
52
- act: str = None,
53
- model: str = "V3 model",
54
- system_prompt: str = "You are a helpful assistant.",
55
- browser: str = "chrome"
56
- ):
57
- """
58
- Initializes the DeepSeek Assistant API client.
59
-
60
- Args:
61
- is_conversation (bool): Whether the provider is in conversation mode.
62
- max_tokens (int): Maximum number of tokens to sample.
63
- timeout (int): Timeout for API requests.
64
- intro (str): Introduction message for the conversation.
65
- filepath (str): Filepath for storing conversation history.
66
- update_file (bool): Whether to update the conversation history file.
67
- proxies (dict): Proxies for the API requests.
68
- history_offset (int): Offset for conversation history.
69
- act (str): Act for the conversation.
70
- model (str): The model to use for completions.
71
- system_prompt (str): The system prompt to define the assistant's role.
72
- browser (str): Browser type for fingerprinting.
73
-
74
- Examples:
75
- >>> ai = DeepSeekAssistant(model="V3 model")
76
- >>> print(ai.model)
77
- 'V3 model'
78
- """
79
- if model not in self.AVAILABLE_MODELS:
80
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
81
-
82
- self.url = "https://deepseek-assistant.com/api/search-stream-deep-chat-testing.php"
83
-
84
- # Initialize LitAgent for user agent generation
85
- self.agent = LitAgent()
86
- self.fingerprint = self.agent.generate_fingerprint(browser)
87
-
88
- # Headers based on the JavaScript code
89
- self.headers = {
90
- "accept": "*/*",
91
- "accept-language": "id-ID,id;q=0.9",
92
- "cache-control": "no-cache",
93
- "content-type": "application/json",
94
- "cookie": "click_id=OS3Hz0E1yKfu4YnZNwedESMEdKEgMTzL; organic_user_deepseek_assistant_ch=%7B%22pixel%22%3A%22OS3Hz0E1yKfu4YnZNwedESMEdKEgMTzL%22%2C%22cc%22%3A%22ID%22%2C%22channel%22%3A%22organic_flag%22%7D",
95
- "origin": "https://deepseek-assistant.com",
96
- **self.fingerprint
97
-
98
- }
99
-
100
- # Initialize curl_cffi Session
101
- self.session = Session()
102
- self.session.headers.update(self.headers)
103
- self.session.proxies = proxies
104
-
105
- self.system_prompt = system_prompt
106
- self.is_conversation = is_conversation
107
- self.max_tokens_to_sample = max_tokens
108
- self.timeout = timeout
109
- self.last_response = {}
110
- self.model = model
111
-
112
- self.__available_optimizers = (
113
- method
114
- for method in dir(Optimizers)
115
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
116
- )
117
-
118
- Conversation.intro = (
119
- AwesomePrompts().get_act(
120
- act, raise_not_found=True, default=None, case_insensitive=True
121
- )
122
- if act
123
- else intro or Conversation.intro
124
- )
125
-
126
- self.conversation = Conversation(
127
- is_conversation, self.max_tokens_to_sample, filepath, update_file
128
- )
129
- self.conversation.history_offset = history_offset
130
-
131
- def refresh_identity(self, browser: str = None):
132
- """
133
- Refreshes the browser identity fingerprint.
134
-
135
- Args:
136
- browser: Specific browser to use for the new fingerprint
137
- """
138
- browser = browser or self.fingerprint.get("browser_type", "chrome")
139
- self.fingerprint = self.agent.generate_fingerprint(browser)
140
-
141
- # Update user-agent header with new fingerprint
142
- self.headers.update({
143
- "user-agent": self.fingerprint.get("user_agent", self.headers["user-agent"])
144
- })
145
-
146
- # Update session headers
147
- self.session.headers.update(self.headers)
148
-
149
- return self.fingerprint
150
-
151
- def _parse_chat_response(self, input_text: str) -> str:
152
- """
153
- Parses the chat response from the API, similar to the JavaScript parseChatResponse method.
154
-
155
- Args:
156
- input_text (str): The raw response text from the API
157
-
158
- Returns:
159
- str: The parsed content from the response
160
- """
161
- lines = input_text.strip().split("\n")
162
- result = ""
163
-
164
- for line in lines:
165
- trimmed_line = line.strip()
166
- if trimmed_line.startswith("data: {") and trimmed_line.endswith("}"):
167
- try:
168
- # Extract JSON from the line
169
- json_start = trimmed_line.find("{")
170
- if json_start != -1:
171
- json_str = trimmed_line[json_start:]
172
- parsed_data = json.loads(json_str)
173
-
174
- # Extract content from the parsed data
175
- content = parsed_data.get("choices", [{}])[0].get("delta", {}).get("content")
176
- if content is not None:
177
- result += content
178
- except (json.JSONDecodeError, KeyError, IndexError):
179
- # Skip malformed JSON or missing keys
180
- continue
181
-
182
- return result.strip()
183
-
184
- def ask(
185
- self,
186
- prompt: str,
187
- stream: bool = False,
188
- raw: bool = False,
189
- optimizer: str = None,
190
- conversationally: bool = False,
191
- ) -> Union[Dict[str, Any], Generator]:
192
- """
193
- Sends a prompt to the DeepSeek Assistant API and returns the response.
194
-
195
- Args:
196
- prompt (str): The prompt to send to the API.
197
- stream (bool): Whether to stream the response.
198
- raw (bool): Whether to return the raw response.
199
- optimizer (str): Optimizer to use for the prompt.
200
- conversationally (bool): Whether to generate the prompt conversationally.
201
-
202
- Returns:
203
- Union[Dict[str, Any], Generator]: The API response.
204
-
205
- Examples:
206
- >>> ai = DeepSeekAssistant()
207
- >>> response = ai.ask("Tell me a joke!")
208
- >>> print(response)
209
- {'text': 'Why did the scarecrow win an award? Because he was outstanding in his field!'}
210
- """
211
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
212
- if optimizer:
213
- if optimizer in self.__available_optimizers:
214
- conversation_prompt = getattr(Optimizers, optimizer)(
215
- conversation_prompt if conversationally else prompt
216
- )
217
- else:
218
- raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
219
-
220
- payload = {
221
- "model": self.model,
222
- "messages": [
223
- {"role": "system", "content": self.system_prompt}, # Add system role
224
- {"role": "user", "content": conversation_prompt}
225
- ]
226
- }
227
-
228
- def for_stream():
229
- streaming_text = ""
230
- try:
231
- response = self.session.post(
232
- self.url,
233
- data=json.dumps(payload),
234
- stream=True,
235
- timeout=self.timeout,
236
- impersonate="chrome110"
237
- )
238
- response.raise_for_status()
239
-
240
- # Use sanitize_stream to process the response
241
- processed_stream = sanitize_stream(
242
- data=response.iter_content(chunk_size=None),
243
- intro_value="data:",
244
- to_json=True,
245
- skip_markers=["[DONE]"],
246
- content_extractor=self._deepseek_assistant_extractor,
247
- yield_raw_on_error=False
248
- )
249
-
250
- for content_chunk in processed_stream:
251
- if content_chunk and isinstance(content_chunk, str):
252
- streaming_text += content_chunk
253
- resp = dict(text=content_chunk)
254
- yield resp if not raw else content_chunk
255
-
256
- except CurlError as e:
257
- raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
258
- except Exception as e:
259
- raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)}") from e
260
- finally:
261
- # Update history after stream finishes or fails
262
- if streaming_text:
263
- self.last_response = {"text": streaming_text}
264
- self.conversation.update_chat_history(prompt, streaming_text)
265
-
266
- def for_non_stream():
267
- try:
268
- response = self.session.post(
269
- self.url,
270
- data=json.dumps(payload),
271
- timeout=self.timeout,
272
- impersonate="chrome110"
273
- )
274
- response.raise_for_status()
275
-
276
- # Parse the response using the custom parser
277
- content = self._parse_chat_response(response.text)
278
-
279
- self.last_response = {"text": content}
280
- self.conversation.update_chat_history(prompt, content)
281
- return self.last_response if not raw else content
282
-
283
- except CurlError as e:
284
- raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
285
- except Exception as e:
286
- err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
287
- raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e} - {err_text}") from e
288
-
289
- return for_stream() if stream else for_non_stream()
290
-
291
- def chat(
292
- self,
293
- prompt: str,
294
- stream: bool = False,
295
- optimizer: str = None,
296
- conversationally: bool = False,
297
- ) -> Union[str, Generator[str, None, None]]:
298
- """
299
- Initiates a chat with the DeepSeek Assistant API using the provided prompt.
300
-
301
- Args:
302
- prompt (str): The prompt to send to the API.
303
- stream (bool): Whether to stream the response.
304
- optimizer (str): Optimizer to use for the prompt.
305
- conversationally (bool): Whether to generate the prompt conversationally.
306
-
307
- Returns:
308
- Union[str, Generator[str, None, None]]: The chat response.
309
-
310
- Examples:
311
- >>> ai = DeepSeekAssistant()
312
- >>> response = ai.chat("Tell me a joke")
313
- >>> print(response)
314
- 'Why did the scarecrow win an award? Because he was outstanding in his field!'
315
- """
316
- def for_stream_chat():
317
- gen = self.ask(
318
- prompt, stream=True, raw=False,
319
- optimizer=optimizer, conversationally=conversationally
320
- )
321
- for response_dict in gen:
322
- yield self.get_message(response_dict)
323
-
324
- def for_non_stream_chat():
325
- response_data = self.ask(
326
- prompt, stream=False, raw=False,
327
- optimizer=optimizer, conversationally=conversationally
328
- )
329
- return self.get_message(response_data)
330
-
331
- return for_stream_chat() if stream else for_non_stream_chat()
332
-
333
- def get_message(self, response: dict) -> str:
334
- """
335
- Extracts the message content from the API response.
336
-
337
- Args:
338
- response (dict): The API response.
339
-
340
- Returns:
341
- str: The message content.
342
-
343
- Examples:
344
- >>> ai = DeepSeekAssistant()
345
- >>> response = ai.ask("Tell me a joke!")
346
- >>> message = ai.get_message(response)
347
- >>> print(message)
348
- 'Why did the scarecrow win an award? Because he was outstanding in his field!'
349
- """
350
- assert isinstance(response, dict), "Response should be of dict data-type only"
351
- return response["text"]
352
-
353
-
354
- if __name__ == "__main__":
355
- # Test the provider
356
- print("-" * 80)
357
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
358
- print("-" * 80)
359
-
360
- for model in DeepSeekAssistant.AVAILABLE_MODELS:
361
- try:
362
- test_ai = DeepSeekAssistant(model=model, timeout=60)
363
- response = test_ai.chat("Say 'Hello' in one word", stream=True)
364
- response_text = ""
365
- for chunk in response:
366
- response_text += chunk
367
-
368
- if response_text and len(response_text.strip()) > 0:
369
- status = "✓"
370
- # Clean and truncate response
371
- clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
372
- display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
373
- else:
374
- status = "✗"
375
- display_text = "Empty or invalid response"
376
- print(f"\r{model:<50} {status:<10} {display_text}")
377
- except Exception as e:
378
- print(f"\r{model:<50} {'✗':<10} {str(e)}")