webscout 8.2.4__py3-none-any.whl → 8.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (110) hide show
  1. webscout/AIauto.py +112 -22
  2. webscout/AIutel.py +240 -344
  3. webscout/Extra/autocoder/autocoder.py +66 -5
  4. webscout/Extra/gguf.py +2 -0
  5. webscout/Provider/AISEARCH/scira_search.py +3 -5
  6. webscout/Provider/Aitopia.py +75 -51
  7. webscout/Provider/AllenAI.py +64 -67
  8. webscout/Provider/ChatGPTClone.py +33 -34
  9. webscout/Provider/ChatSandbox.py +342 -0
  10. webscout/Provider/Cloudflare.py +79 -32
  11. webscout/Provider/Deepinfra.py +69 -56
  12. webscout/Provider/ElectronHub.py +48 -39
  13. webscout/Provider/ExaChat.py +36 -20
  14. webscout/Provider/GPTWeb.py +24 -18
  15. webscout/Provider/GithubChat.py +52 -49
  16. webscout/Provider/GizAI.py +285 -0
  17. webscout/Provider/Glider.py +39 -28
  18. webscout/Provider/Groq.py +48 -20
  19. webscout/Provider/HeckAI.py +18 -36
  20. webscout/Provider/Jadve.py +30 -37
  21. webscout/Provider/LambdaChat.py +36 -59
  22. webscout/Provider/MCPCore.py +18 -21
  23. webscout/Provider/Marcus.py +23 -14
  24. webscout/Provider/Nemotron.py +218 -0
  25. webscout/Provider/Netwrck.py +35 -26
  26. webscout/Provider/OPENAI/__init__.py +1 -1
  27. webscout/Provider/OPENAI/exachat.py +4 -0
  28. webscout/Provider/OPENAI/scirachat.py +3 -4
  29. webscout/Provider/OPENAI/textpollinations.py +20 -22
  30. webscout/Provider/OPENAI/toolbaz.py +1 -0
  31. webscout/Provider/PI.py +22 -13
  32. webscout/Provider/StandardInput.py +42 -30
  33. webscout/Provider/TeachAnything.py +24 -12
  34. webscout/Provider/TextPollinationsAI.py +78 -76
  35. webscout/Provider/TwoAI.py +120 -88
  36. webscout/Provider/TypliAI.py +305 -0
  37. webscout/Provider/Venice.py +24 -22
  38. webscout/Provider/VercelAI.py +31 -12
  39. webscout/Provider/WiseCat.py +1 -1
  40. webscout/Provider/WrDoChat.py +370 -0
  41. webscout/Provider/__init__.py +11 -13
  42. webscout/Provider/ai4chat.py +5 -3
  43. webscout/Provider/akashgpt.py +59 -66
  44. webscout/Provider/asksteve.py +53 -44
  45. webscout/Provider/cerebras.py +77 -31
  46. webscout/Provider/chatglm.py +47 -37
  47. webscout/Provider/elmo.py +38 -32
  48. webscout/Provider/freeaichat.py +57 -43
  49. webscout/Provider/granite.py +24 -21
  50. webscout/Provider/hermes.py +27 -20
  51. webscout/Provider/learnfastai.py +25 -20
  52. webscout/Provider/llmchatco.py +48 -78
  53. webscout/Provider/multichat.py +13 -3
  54. webscout/Provider/scira_chat.py +50 -30
  55. webscout/Provider/scnet.py +27 -21
  56. webscout/Provider/searchchat.py +16 -24
  57. webscout/Provider/sonus.py +37 -39
  58. webscout/Provider/toolbaz.py +24 -46
  59. webscout/Provider/turboseek.py +37 -41
  60. webscout/Provider/typefully.py +30 -22
  61. webscout/Provider/typegpt.py +47 -51
  62. webscout/Provider/uncovr.py +46 -40
  63. webscout/__init__.py +0 -1
  64. webscout/cli.py +256 -0
  65. webscout/conversation.py +305 -448
  66. webscout/exceptions.py +3 -0
  67. webscout/swiftcli/__init__.py +80 -794
  68. webscout/swiftcli/core/__init__.py +7 -0
  69. webscout/swiftcli/core/cli.py +297 -0
  70. webscout/swiftcli/core/context.py +104 -0
  71. webscout/swiftcli/core/group.py +241 -0
  72. webscout/swiftcli/decorators/__init__.py +28 -0
  73. webscout/swiftcli/decorators/command.py +221 -0
  74. webscout/swiftcli/decorators/options.py +220 -0
  75. webscout/swiftcli/decorators/output.py +252 -0
  76. webscout/swiftcli/exceptions.py +21 -0
  77. webscout/swiftcli/plugins/__init__.py +9 -0
  78. webscout/swiftcli/plugins/base.py +135 -0
  79. webscout/swiftcli/plugins/manager.py +262 -0
  80. webscout/swiftcli/utils/__init__.py +59 -0
  81. webscout/swiftcli/utils/formatting.py +252 -0
  82. webscout/swiftcli/utils/parsing.py +267 -0
  83. webscout/version.py +1 -1
  84. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/METADATA +166 -45
  85. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/RECORD +89 -89
  86. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/WHEEL +1 -1
  87. webscout-8.2.6.dist-info/entry_points.txt +3 -0
  88. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/top_level.txt +0 -1
  89. inferno/__init__.py +0 -6
  90. inferno/__main__.py +0 -9
  91. inferno/cli.py +0 -6
  92. inferno/lol.py +0 -589
  93. webscout/LLM.py +0 -442
  94. webscout/Local/__init__.py +0 -12
  95. webscout/Local/__main__.py +0 -9
  96. webscout/Local/api.py +0 -576
  97. webscout/Local/cli.py +0 -516
  98. webscout/Local/config.py +0 -75
  99. webscout/Local/llm.py +0 -287
  100. webscout/Local/model_manager.py +0 -253
  101. webscout/Local/server.py +0 -721
  102. webscout/Local/utils.py +0 -93
  103. webscout/Provider/Chatify.py +0 -175
  104. webscout/Provider/PizzaGPT.py +0 -228
  105. webscout/Provider/askmyai.py +0 -158
  106. webscout/Provider/gaurish.py +0 -244
  107. webscout/Provider/promptrefine.py +0 -193
  108. webscout/Provider/tutorai.py +0 -270
  109. webscout-8.2.4.dist-info/entry_points.txt +0 -5
  110. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/licenses/LICENSE.md +0 -0
webscout/LLM.py DELETED
@@ -1,442 +0,0 @@
1
- """
2
- >>> from webscout.LLM import LLM, VLM
3
- >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
4
- >>> response = llm.chat([{"role": "user", "content": "What's good?"}])
5
- >>> print(response)
6
- 'Hey! I'm doing great, thanks for asking! How can I help you today? 😊'
7
-
8
- >>> # For vision tasks
9
- >>> vlm = VLM("cogvlm-grounding-generalist")
10
- >>> response = vlm.chat([{"role": "user", "content": [{"type": "image", "image_url": "path/to/image.jpg"}, {"type": "text", "text": "What's in this image?"}]}])
11
- """
12
-
13
- import requests
14
- import base64
15
- import json
16
- from typing import List, Dict, Union, Generator, Optional, Any
17
-
18
- class LLMError(Exception):
19
- """Custom exception for LLM API errors 🚫
20
-
21
- Examples:
22
- >>> try:
23
- ... raise LLMError("API key not found!")
24
- ... except LLMError as e:
25
- ... print(f"Error: {e}")
26
- Error: API key not found!
27
- """
28
- pass
29
-
30
- class LLM:
31
- """A class for chatting with DeepInfra's powerful language models! 🚀
32
-
33
- This class lets you:
34
- - Chat with state-of-the-art language models 💬
35
- - Stream responses in real-time ⚡
36
- - Control temperature and token limits 🎮
37
- - Handle system messages and chat history 📝
38
-
39
- Examples:
40
- >>> from webscout.LLM import LLM
41
- >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
42
- >>> response = llm.chat([
43
- ... {"role": "user", "content": "Write a short poem!"}
44
- ... ])
45
- >>> print(response)
46
- 'Through starlit skies and morning dew,
47
- Nature's beauty, forever new.
48
- In every moment, magic gleams,
49
- Life's poetry flows like gentle streams.'
50
- """
51
-
52
- def __init__(self, model: str, system_message: str = "You are a Helpful AI."):
53
- """
54
- Initialize the LLM client.
55
-
56
- Args:
57
- model: The model identifier (e.g., "meta-llama/Meta-Llama-3-70B-Instruct")
58
- system_message: The system message to use for the conversation
59
-
60
- Examples:
61
- >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
62
- >>> print(llm.model)
63
- 'meta-llama/Meta-Llama-3-70B-Instruct'
64
- """
65
- self.model = model
66
- self.api_url = "https://api.deepinfra.com/v1/openai/chat/completions"
67
- self.conversation_history = [{"role": "system", "content": system_message}]
68
- self.headers = {
69
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
70
- 'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
71
- 'Cache-Control': 'no-cache',
72
- 'Connection': 'keep-alive',
73
- 'Content-Type': 'application/json',
74
- 'Origin': 'https://deepinfra.com',
75
- 'Pragma': 'no-cache',
76
- 'Referer': 'https://deepinfra.com/',
77
- 'Sec-Fetch-Dest': 'empty',
78
- 'Sec-Fetch-Mode': 'cors',
79
- 'Sec-Fetch-Site': 'same-site',
80
- 'X-Deepinfra-Source': 'web-embed',
81
- 'accept': 'text/event-stream',
82
- 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
83
- 'sec-ch-ua-mobile': '?0',
84
- 'sec-ch-ua-platform': '"macOS"'
85
- }
86
-
87
- def _prepare_payload(
88
- self,
89
- messages: List[Dict[str, str]],
90
- stream: bool = False,
91
- temperature: float = 0.7,
92
- max_tokens: int = 8028,
93
- stop: Optional[List[str]] = None,
94
- ) -> Dict[str, Any]:
95
- """Prepare the chat payload with all the right settings! 🎯
96
-
97
- Args:
98
- messages: Your chat messages (role & content)
99
- stream: Want real-time responses? Set True! ⚡
100
- temperature: Creativity level (0-1) 🎨
101
- max_tokens: Max words to generate 📝
102
- stop: Words to stop at (optional) 🛑
103
-
104
- Returns:
105
- Dict with all the API settings ready to go! 🚀
106
-
107
- Examples:
108
- >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
109
- >>> payload = llm._prepare_payload([
110
- ... {"role": "user", "content": "Hi!"}
111
- ... ])
112
- >>> print(payload['model'])
113
- 'meta-llama/Meta-Llama-3-70B-Instruct'
114
- """
115
- return {
116
- 'model': self.model,
117
- 'messages': messages,
118
- 'temperature': temperature,
119
- 'max_tokens': max_tokens,
120
- 'stop': stop or [],
121
- 'stream': stream
122
- }
123
-
124
- def chat(
125
- self,
126
- messages: List[Dict[str, str]],
127
- stream: bool = False,
128
- temperature: float = 0.7,
129
- max_tokens: int = 8028,
130
- stop: Optional[List[str]] = None,
131
- ) -> Union[str, Generator[str, None, None]]:
132
- """Start chatting with the AI! 💬
133
-
134
- This method is your gateway to:
135
- - Having awesome conversations 🗣️
136
- - Getting creative responses 🎨
137
- - Streaming real-time replies ⚡
138
- - Controlling the output style 🎮
139
-
140
- Args:
141
- messages: Your chat messages (role & content)
142
- stream: Want real-time responses? Set True!
143
- temperature: Creativity level (0-1)
144
- max_tokens: Max words to generate
145
- stop: Words to stop at (optional)
146
-
147
- Returns:
148
- Either a complete response or streaming generator
149
-
150
- Raises:
151
- LLMError: If something goes wrong 🚫
152
-
153
- Examples:
154
- >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
155
- >>> # Regular chat
156
- >>> response = llm.chat([
157
- ... {"role": "user", "content": "Tell me a joke!"}
158
- ... ])
159
- >>> # Streaming chat
160
- >>> for chunk in llm.chat([
161
- ... {"role": "user", "content": "Tell me a story!"}
162
- ... ], stream=True):
163
- ... print(chunk, end='')
164
- """
165
- payload = self._prepare_payload(messages, stream, temperature, max_tokens, stop)
166
-
167
- try:
168
- if stream:
169
- return self._stream_response(payload)
170
- else:
171
- return self._send_request(payload)
172
- except Exception as e:
173
- raise LLMError(f"API request failed: {str(e)}")
174
-
175
- def _stream_response(self, payload: Dict[str, Any]) -> Generator[str, None, None]:
176
- """Stream the chat response in real-time! ⚡
177
-
178
- Args:
179
- payload: The prepared chat payload
180
-
181
- Yields:
182
- Streaming chunks of the response
183
-
184
- Raises:
185
- LLMError: If the stream request fails 🚫
186
-
187
- Examples:
188
- >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
189
- >>> for chunk in llm._stream_response(llm._prepare_payload([
190
- ... {"role": "user", "content": "Tell me a story!"}
191
- ... ])):
192
- ... print(chunk, end='')
193
- """
194
- try:
195
- with requests.post(self.api_url, json=payload, headers=self.headers, stream=True) as response:
196
- response.raise_for_status()
197
- for line in response.iter_lines():
198
- if line:
199
- if line.strip() == b'data: [DONE]':
200
- break
201
- if line.startswith(b'data: '):
202
- try:
203
- chunk = json.loads(line.decode('utf-8').removeprefix('data: '))
204
- if content := chunk.get('choices', [{}])[0].get('delta', {}).get('content'):
205
- yield content
206
- except json.JSONDecodeError:
207
- continue
208
- except requests.RequestException as e:
209
- raise LLMError(f"Stream request failed: {str(e)}")
210
-
211
- def _send_request(self, payload: Dict[str, Any]) -> str:
212
- """Send a non-streaming chat request.
213
-
214
- Args:
215
- payload: The prepared chat payload
216
-
217
- Returns:
218
- The complete response
219
-
220
- Raises:
221
- LLMError: If the request fails 🚫
222
-
223
- Examples:
224
- >>> llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
225
- >>> response = llm._send_request(llm._prepare_payload([
226
- ... {"role": "user", "content": "Tell me a joke!"}
227
- ... ]))
228
- >>> print(response)
229
- """
230
- try:
231
- response = requests.post(self.api_url, json=payload, headers=self.headers)
232
- response.raise_for_status()
233
- result = response.json()
234
- return result['choices'][0]['message']['content']
235
- except requests.RequestException as e:
236
- raise LLMError(f"Request failed: {str(e)}")
237
- except (KeyError, IndexError) as e:
238
- raise LLMError(f"Invalid response format: {str(e)}")
239
- except json.JSONDecodeError as e:
240
- raise LLMError(f"Invalid JSON response: {str(e)}")
241
-
242
-
243
- class VLM:
244
- """Your gateway to vision-language AI magic! 🖼️
245
-
246
- This class lets you:
247
- - Chat about images with AI 🎨
248
- - Get detailed image descriptions 📝
249
- - Answer questions about images 🤔
250
- - Stream responses in real-time ⚡
251
-
252
- Examples:
253
- >>> from webscout.LLM import VLM
254
- >>> vlm = VLM("cogvlm-grounding-generalist")
255
- >>> # Chat about an image
256
- >>> response = vlm.chat([{
257
- ... "role": "user",
258
- ... "content": [
259
- ... {"type": "image", "image_url": "path/to/image.jpg"},
260
- ... {"type": "text", "text": "What's in this image?"}
261
- ... ]
262
- ... }])
263
- >>> print(response)
264
- 'I see a beautiful sunset over mountains...'
265
- """
266
-
267
- def __init__(self, model: str, system_message: str = "You are a Helpful AI."):
268
- """Get ready for some vision-language magic! 🚀
269
-
270
- Args:
271
- model: Your chosen vision model
272
- system_message: Set the AI's personality
273
-
274
- Examples:
275
- >>> vlm = VLM("cogvlm-grounding-generalist")
276
- >>> print(vlm.model)
277
- 'cogvlm-grounding-generalist'
278
- """
279
- self.model = model
280
- self.api_url = "https://api.deepinfra.com/v1/openai/chat/completions"
281
- self.conversation_history = [{"role": "system", "content": system_message}]
282
- self.headers = {
283
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
284
- 'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
285
- 'Cache-Control': 'no-cache',
286
- 'Connection': 'keep-alive',
287
- 'Content-Type': 'application/json',
288
- 'Origin': 'https://deepinfra.com',
289
- 'Pragma': 'no-cache',
290
- 'Referer': 'https://deepinfra.com/',
291
- 'Sec-Fetch-Dest': 'empty',
292
- 'Sec-Fetch-Mode': 'cors',
293
- 'Sec-Fetch-Site': 'same-site',
294
- 'X-Deepinfra-Source': 'web-embed',
295
- 'accept': 'text/event-stream',
296
- 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
297
- 'sec-ch-ua-mobile': '?0',
298
- 'sec-ch-ua-platform': '"macOS"'
299
- }
300
-
301
- def chat(
302
- self,
303
- messages: List[Dict[str, Union[str, List[Dict[str, Union[str, Dict[str, str]]]]]]],
304
- stream: bool = False,
305
- temperature: float = 0.7,
306
- max_tokens: int = 8028,
307
- ) -> Union[str, Generator[str, None, None]]:
308
- """Chat about images with AI! 🖼️
309
-
310
- This method lets you:
311
- - Ask questions about images 🤔
312
- - Get detailed descriptions 📝
313
- - Stream responses in real-time ⚡
314
- - Control response creativity 🎨
315
-
316
- Args:
317
- messages: Your chat + image data
318
- stream: Want real-time responses?
319
- temperature: Creativity level (0-1)
320
- max_tokens: Max words to generate
321
-
322
- Returns:
323
- Either a complete response or streaming generator
324
-
325
- Raises:
326
- LLMError: If something goes wrong 🚫
327
-
328
- Examples:
329
- >>> vlm = VLM("cogvlm-grounding-generalist")
330
- >>> # Regular chat with image
331
- >>> response = vlm.chat([{
332
- ... "role": "user",
333
- ... "content": [
334
- ... {"type": "image", "image_url": "sunset.jpg"},
335
- ... {"type": "text", "text": "Describe this scene"}
336
- ... ]
337
- ... }])
338
- >>> # Streaming chat
339
- >>> for chunk in vlm.chat([...], stream=True):
340
- ... print(chunk, end='')
341
- """
342
- payload = {
343
- "model": self.model,
344
- "messages": messages,
345
- "stream": stream,
346
- "temperature": temperature,
347
- "max_tokens": max_tokens
348
- }
349
-
350
- try:
351
- if stream:
352
- return self._stream_response(payload)
353
- else:
354
- return self._send_request(payload)
355
- except Exception as e:
356
- raise LLMError(f"VLM API request failed: {str(e)}")
357
-
358
- def _stream_response(self, payload: Dict[str, Any]) -> Generator[str, None, None]:
359
- """Stream the VLM chat response."""
360
- try:
361
- with requests.post(self.api_url, json=payload, headers=self.headers, stream=True) as response:
362
- response.raise_for_status()
363
- for line in response.iter_lines():
364
- if line:
365
- if line.strip() == b'data: [DONE]':
366
- break
367
- if line.startswith(b'data: '):
368
- try:
369
- chunk = json.loads(line.decode('utf-8').removeprefix('data: '))
370
- if content := chunk.get('choices', [{}])[0].get('delta', {}).get('content'):
371
- yield content
372
- except json.JSONDecodeError:
373
- continue
374
- except requests.RequestException as e:
375
- raise LLMError(f"VLM stream request failed: {str(e)}")
376
-
377
- def _send_request(self, payload: Dict[str, Any]) -> str:
378
- """Send a non-streaming VLM chat request."""
379
- try:
380
- response = requests.post(self.api_url, json=payload, headers=self.headers)
381
- response.raise_for_status()
382
- result = response.json()
383
- return result['choices'][0]['message']['content']
384
- except requests.RequestException as e:
385
- raise LLMError(f"VLM request failed: {str(e)}")
386
- except (KeyError, IndexError) as e:
387
- raise LLMError(f"Invalid VLM response format: {str(e)}")
388
- except json.JSONDecodeError as e:
389
- raise LLMError(f"Invalid VLM JSON response: {str(e)}")
390
-
391
-
392
- def encode_image_to_base64(image_path: str) -> str:
393
- """Turn your image into base64 magic! 🎨
394
-
395
- Args:
396
- image_path: Where's your image at?
397
-
398
- Returns:
399
- Your image as a base64 string ✨
400
-
401
- Raises:
402
- IOError: If we can't read your image 🚫
403
-
404
- Examples:
405
- >>> from webscout.LLM import encode_image_to_base64
406
- >>> image_data = encode_image_to_base64("cool_pic.jpg")
407
- >>> print(len(image_data)) # Check the encoded length
408
- 12345
409
- """
410
- try:
411
- with open(image_path, "rb") as image_file:
412
- return base64.b64encode(image_file.read()).decode("utf-8")
413
- except IOError as e:
414
- raise LLMError(f"Failed to read image file: {str(e)}")
415
-
416
-
417
- if __name__ == "__main__":
418
- # Example usage
419
- try:
420
- # Initialize LLM with Llama 3 model
421
- llm = LLM(model="mistralai/Mistral-Small-24B-Instruct-2501")
422
-
423
- # Example messages
424
- messages = [
425
- {"role": "user", "content": "Write a short poem about AI."}
426
- ]
427
-
428
- # Example 1: Non-streaming response
429
- print("\nNon-streaming response:")
430
- response = llm.chat(messages, stream=False)
431
- print(response)
432
-
433
- # Example 2: Streaming response
434
- print("\nStreaming response:")
435
- for chunk in llm.chat(messages, stream=True):
436
- print(chunk, end='', flush=True)
437
- print("\n")
438
-
439
- except LLMError as e:
440
- print(f"Error: {str(e)}")
441
- except KeyboardInterrupt:
442
- print("\nOperation cancelled by user")
@@ -1,12 +0,0 @@
1
- """
2
- Webscout.Local - A llama-cpp-python based LLM serving tool with Ollama-compatible API
3
- """
4
- from webscout.version import __version__
5
-
6
- # Import main components for easier access
7
- from .llm import LLMInterface
8
- from .model_manager import ModelManager
9
- from .server import start_server
10
-
11
- # Define what's available when using `from webscout.Local import *`
12
- __all__ = ["LLMInterface", "ModelManager", "start_server"]
@@ -1,9 +0,0 @@
1
- """
2
- Entry point for running webscout.Local as a module.
3
- This allows running the CLI using 'python -m webscout.Local'.
4
- """
5
-
6
- from webscout.Local.cli import app
7
-
8
- if __name__ == "__main__":
9
- app()