camel-ai 0.2.41__py3-none-any.whl → 0.2.43__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (42) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +24 -4
  3. camel/configs/__init__.py +3 -0
  4. camel/configs/anthropic_config.py +2 -24
  5. camel/configs/ppio_config.py +102 -0
  6. camel/configs/reka_config.py +1 -7
  7. camel/configs/samba_config.py +1 -7
  8. camel/configs/togetherai_config.py +1 -7
  9. camel/embeddings/__init__.py +4 -0
  10. camel/embeddings/azure_embedding.py +119 -0
  11. camel/embeddings/together_embedding.py +136 -0
  12. camel/environments/__init__.py +3 -0
  13. camel/environments/multi_step.py +12 -10
  14. camel/environments/single_step.py +28 -11
  15. camel/environments/tic_tac_toe.py +518 -0
  16. camel/loaders/__init__.py +2 -0
  17. camel/loaders/crawl4ai_reader.py +230 -0
  18. camel/models/__init__.py +2 -0
  19. camel/models/azure_openai_model.py +10 -2
  20. camel/models/base_model.py +111 -28
  21. camel/models/cohere_model.py +5 -1
  22. camel/models/deepseek_model.py +4 -0
  23. camel/models/gemini_model.py +8 -2
  24. camel/models/model_factory.py +3 -0
  25. camel/models/ollama_model.py +8 -2
  26. camel/models/openai_compatible_model.py +8 -2
  27. camel/models/openai_model.py +16 -4
  28. camel/models/ppio_model.py +184 -0
  29. camel/models/vllm_model.py +147 -48
  30. camel/societies/workforce/workforce.py +26 -3
  31. camel/toolkits/__init__.py +2 -0
  32. camel/toolkits/browser_toolkit.py +7 -3
  33. camel/toolkits/google_calendar_toolkit.py +432 -0
  34. camel/toolkits/search_toolkit.py +119 -1
  35. camel/toolkits/terminal_toolkit.py +729 -115
  36. camel/types/enums.py +68 -3
  37. camel/types/unified_model_type.py +5 -0
  38. camel/verifiers/python_verifier.py +93 -9
  39. {camel_ai-0.2.41.dist-info → camel_ai-0.2.43.dist-info}/METADATA +21 -2
  40. {camel_ai-0.2.41.dist-info → camel_ai-0.2.43.dist-info}/RECORD +42 -35
  41. {camel_ai-0.2.41.dist-info → camel_ai-0.2.43.dist-info}/WHEEL +0 -0
  42. {camel_ai-0.2.41.dist-info → camel_ai-0.2.43.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,184 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ import os
16
+ from typing import Any, Dict, List, Optional, Type, Union
17
+
18
+ from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
+ from pydantic import BaseModel
20
+
21
+ from camel.configs import PPIO_API_PARAMS, PPIOConfig
22
+ from camel.messages import OpenAIMessage
23
+ from camel.models import BaseModelBackend
24
+ from camel.types import (
25
+ ChatCompletion,
26
+ ChatCompletionChunk,
27
+ ModelType,
28
+ )
29
+ from camel.utils import (
30
+ BaseTokenCounter,
31
+ OpenAITokenCounter,
32
+ api_keys_required,
33
+ )
34
+
35
+
36
+ class PPIOModel(BaseModelBackend):
37
+ r"""Constructor for PPIO backend with OpenAI compatibility.
38
+
39
+ Args:
40
+ model_type (Union[ModelType, str]): Model for which a backend is
41
+ created, supported model can be found here:
42
+ https://ppinfra.com/model-api/product/llm-api?utm_source=github_owl
43
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
44
+ that will be fed into:obj:`openai.ChatCompletion.create()`. If
45
+ :obj:`None`, :obj:`PPIOConfig().as_dict()` will be used.
46
+ (default: :obj:`None`)
47
+ api_key (Optional[str], optional): The API key for authenticating with
48
+ the PPIO service. (default: :obj:`None`)
49
+ url (Optional[str], optional): The url to the PPIO service.
50
+ If not provided, "https://api.ppinfra.com/v3/openai" will be used.
51
+ (default: :obj:`None`)
52
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
53
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
54
+ ModelType.GPT_4O_MINI)` will be used.
55
+ timeout (Optional[float], optional): The timeout value in seconds for
56
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
57
+ environment variable or default to 180 seconds.
58
+ (default: :obj:`None`)
59
+ """
60
+
61
+ @api_keys_required(
62
+ [
63
+ ("api_key", 'PPIO_API_KEY'),
64
+ ]
65
+ )
66
+ def __init__(
67
+ self,
68
+ model_type: Union[ModelType, str],
69
+ model_config_dict: Optional[Dict[str, Any]] = None,
70
+ api_key: Optional[str] = None,
71
+ url: Optional[str] = None,
72
+ token_counter: Optional[BaseTokenCounter] = None,
73
+ timeout: Optional[float] = None,
74
+ ) -> None:
75
+ if model_config_dict is None:
76
+ model_config_dict = PPIOConfig().as_dict()
77
+ api_key = api_key or os.environ.get("PPIO_API_KEY")
78
+ url = url or os.environ.get(
79
+ "PPIO_API_BASE_URL", "https://api.ppinfra.com/v3/openai"
80
+ )
81
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
82
+
83
+ super().__init__(
84
+ model_type, model_config_dict, api_key, url, token_counter, timeout
85
+ )
86
+
87
+ self._client = OpenAI(
88
+ timeout=self._timeout,
89
+ max_retries=3,
90
+ api_key=self._api_key,
91
+ base_url=self._url,
92
+ )
93
+ self._async_client = AsyncOpenAI(
94
+ timeout=self._timeout,
95
+ max_retries=3,
96
+ api_key=self._api_key,
97
+ base_url=self._url,
98
+ )
99
+
100
+ async def _arun(
101
+ self,
102
+ messages: List[OpenAIMessage],
103
+ response_format: Optional[Type[BaseModel]] = None,
104
+ tools: Optional[List[Dict[str, Any]]] = None,
105
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
106
+ r"""Runs inference of OpenAI chat completion.
107
+
108
+ Args:
109
+ messages (List[OpenAIMessage]): Message list with the chat history
110
+ in OpenAI API format.
111
+
112
+ Returns:
113
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
114
+ `ChatCompletion` in the non-stream mode, or
115
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
116
+ """
117
+ response = await self._async_client.chat.completions.create(
118
+ messages=messages,
119
+ model=self.model_type,
120
+ **self.model_config_dict,
121
+ )
122
+ return response
123
+
124
+ def _run(
125
+ self,
126
+ messages: List[OpenAIMessage],
127
+ response_format: Optional[Type[BaseModel]] = None,
128
+ tools: Optional[List[Dict[str, Any]]] = None,
129
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
130
+ r"""Runs inference of OpenAI chat completion.
131
+
132
+ Args:
133
+ messages (List[OpenAIMessage]): Message list with the chat history
134
+ in OpenAI API format.
135
+
136
+ Returns:
137
+ Union[ChatCompletion, Stream[ChatCompletionChunk]]:
138
+ `ChatCompletion` in the non-stream mode, or
139
+ `Stream[ChatCompletionChunk]` in the stream mode.
140
+ """
141
+ response = self._client.chat.completions.create(
142
+ messages=messages,
143
+ model=self.model_type,
144
+ **self.model_config_dict,
145
+ )
146
+ return response
147
+
148
+ @property
149
+ def token_counter(self) -> BaseTokenCounter:
150
+ r"""Initialize the token counter for the model backend.
151
+
152
+ Returns:
153
+ OpenAITokenCounter: The token counter following the model's
154
+ tokenization style.
155
+ """
156
+
157
+ if not self._token_counter:
158
+ self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
159
+ return self._token_counter
160
+
161
+ def check_model_config(self):
162
+ r"""Check whether the model configuration contains any
163
+ unexpected arguments to PPIO API.
164
+
165
+ Raises:
166
+ ValueError: If the model configuration dictionary contains any
167
+ unexpected arguments to PPIO API.
168
+ """
169
+ for param in self.model_config_dict:
170
+ if param not in PPIO_API_PARAMS:
171
+ raise ValueError(
172
+ f"Unexpected argument `{param}` is "
173
+ "input into PPIO model backend."
174
+ )
175
+
176
+ @property
177
+ def stream(self) -> bool:
178
+ r"""Returns whether the model is in stream mode, which sends partial
179
+ results each time.
180
+
181
+ Returns:
182
+ bool: Whether the model is in stream mode.
183
+ """
184
+ return self.model_config_dict.get('stream', False)
@@ -119,20 +119,34 @@ class VLLMModel(BaseModelBackend):
119
119
  self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
120
120
  return self._token_counter
121
121
 
122
- def check_model_config(self):
123
- r"""Check whether the model configuration contains any
124
- unexpected arguments to vLLM API.
122
+ def _run(
123
+ self,
124
+ messages: List[OpenAIMessage],
125
+ response_format: Optional[Type[BaseModel]] = None,
126
+ tools: Optional[List[Dict[str, Any]]] = None,
127
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
128
+ r"""Runs inference of OpenAI chat completion.
125
129
 
126
- Raises:
127
- ValueError: If the model configuration dictionary contains any
128
- unexpected arguments to OpenAI API.
130
+ Args:
131
+ messages (List[OpenAIMessage]): Message list with the chat history
132
+ in OpenAI API format.
133
+ response_format (Optional[Type[BaseModel]]): The format of the
134
+ response.
135
+ tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
136
+ use for the request.
137
+
138
+ Returns:
139
+ Union[ChatCompletion, Stream[ChatCompletionChunk]]:
140
+ `ChatCompletion` in the non-stream mode, or
141
+ `Stream[ChatCompletionChunk]` in the stream mode.
129
142
  """
130
- for param in self.model_config_dict:
131
- if param not in VLLM_API_PARAMS:
132
- raise ValueError(
133
- f"Unexpected argument `{param}` is "
134
- "input into vLLM model backend."
135
- )
143
+ response_format = response_format or self.model_config_dict.get(
144
+ "response_format", None
145
+ )
146
+ if response_format:
147
+ return self._request_parse(messages, response_format, tools)
148
+ else:
149
+ return self._request_chat_completion(messages, tools)
136
150
 
137
151
  async def _arun(
138
152
  self,
@@ -140,69 +154,154 @@ class VLLMModel(BaseModelBackend):
140
154
  response_format: Optional[Type[BaseModel]] = None,
141
155
  tools: Optional[List[Dict[str, Any]]] = None,
142
156
  ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
143
- r"""Runs inference of OpenAI chat completion.
157
+ r"""Runs inference of OpenAI chat completion in async mode.
144
158
 
145
159
  Args:
146
160
  messages (List[OpenAIMessage]): Message list with the chat history
147
161
  in OpenAI API format.
148
- response_format (Optional[Type[BaseModel]], optional): The format
149
- to return the response in.
150
- tools (Optional[List[Dict[str, Any]]], optional): List of tools
151
- the model may call.
162
+ response_format (Optional[Type[BaseModel]]): The format of the
163
+ response.
164
+ tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
165
+ use for the request.
152
166
 
153
167
  Returns:
154
168
  Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
155
169
  `ChatCompletion` in the non-stream mode, or
156
170
  `AsyncStream[ChatCompletionChunk]` in the stream mode.
157
171
  """
172
+ response_format = response_format or self.model_config_dict.get(
173
+ "response_format", None
174
+ )
175
+ if response_format:
176
+ return await self._arequest_parse(messages, response_format, tools)
177
+ else:
178
+ return await self._arequest_chat_completion(messages, tools)
179
+
180
+ def _request_chat_completion(
181
+ self,
182
+ messages: List[OpenAIMessage],
183
+ tools: Optional[List[Dict[str, Any]]] = None,
184
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
185
+ request_config = self.model_config_dict.copy()
158
186
 
159
- kwargs = self.model_config_dict.copy()
160
187
  if tools:
161
- kwargs["tools"] = tools
162
- if response_format:
163
- kwargs["response_format"] = {"type": "json_object"}
188
+ request_config["tools"] = tools
189
+
190
+ # Remove additionalProperties from each tool's function parameters
191
+ if tools and "tools" in request_config:
192
+ for tool in request_config["tools"]:
193
+ if "function" in tool and "parameters" in tool["function"]:
194
+ tool["function"]["parameters"].pop(
195
+ "additionalProperties", None
196
+ )
164
197
 
165
- response = await self._async_client.chat.completions.create(
198
+ return self._client.chat.completions.create(
166
199
  messages=messages,
167
200
  model=self.model_type,
168
- **kwargs,
201
+ **request_config,
169
202
  )
170
- return response
171
203
 
172
- def _run(
204
+ async def _arequest_chat_completion(
173
205
  self,
174
206
  messages: List[OpenAIMessage],
175
- response_format: Optional[Type[BaseModel]] = None,
176
207
  tools: Optional[List[Dict[str, Any]]] = None,
177
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
178
- r"""Runs inference of OpenAI chat completion.
208
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
209
+ request_config = self.model_config_dict.copy()
179
210
 
180
- Args:
181
- messages (List[OpenAIMessage]): Message list with the chat history
182
- in OpenAI API format.
183
- response_format (Optional[Type[BaseModel]], optional): The format
184
- to return the response in.
185
- tools (Optional[List[Dict[str, Any]]], optional): List of tools
186
- the model may call.
211
+ if tools:
212
+ request_config["tools"] = tools
213
+ # Remove additionalProperties from each tool's function parameters
214
+ if "tools" in request_config:
215
+ for tool in request_config["tools"]:
216
+ if "function" in tool and "parameters" in tool["function"]:
217
+ tool["function"]["parameters"].pop(
218
+ "additionalProperties", None
219
+ )
187
220
 
188
- Returns:
189
- Union[ChatCompletion, Stream[ChatCompletionChunk]]:
190
- `ChatCompletion` in the non-stream mode, or
191
- `Stream[ChatCompletionChunk]` in the stream mode.
192
- """
221
+ return await self._async_client.chat.completions.create(
222
+ messages=messages,
223
+ model=self.model_type,
224
+ **request_config,
225
+ )
193
226
 
194
- kwargs = self.model_config_dict.copy()
195
- if tools:
196
- kwargs["tools"] = tools
197
- if response_format:
198
- kwargs["response_format"] = {"type": "json_object"}
227
+ def _request_parse(
228
+ self,
229
+ messages: List[OpenAIMessage],
230
+ response_format: Type[BaseModel],
231
+ tools: Optional[List[Dict[str, Any]]] = None,
232
+ ) -> ChatCompletion:
233
+ request_config = self.model_config_dict.copy()
234
+
235
+ request_config["response_format"] = response_format
236
+ request_config.pop("stream", None)
237
+ if tools is not None:
238
+ # Create a deep copy of tools to avoid modifying the original
239
+ import copy
240
+
241
+ request_config["tools"] = copy.deepcopy(tools)
242
+ # Remove additionalProperties and strict from each tool's function
243
+ # parameters since vLLM does not support them
244
+ if "tools" in request_config:
245
+ for tool in request_config["tools"]:
246
+ if "function" in tool and "parameters" in tool["function"]:
247
+ tool["function"]["parameters"].pop(
248
+ "additionalProperties", None
249
+ )
250
+ if "strict" in tool.get("function", {}):
251
+ tool["function"].pop("strict")
252
+
253
+ return self._client.beta.chat.completions.parse(
254
+ messages=messages,
255
+ model=self.model_type,
256
+ **request_config,
257
+ )
258
+
259
+ async def _arequest_parse(
260
+ self,
261
+ messages: List[OpenAIMessage],
262
+ response_format: Type[BaseModel],
263
+ tools: Optional[List[Dict[str, Any]]] = None,
264
+ ) -> ChatCompletion:
265
+ request_config = self.model_config_dict.copy()
266
+
267
+ request_config["response_format"] = response_format
268
+ request_config.pop("stream", None)
269
+ if tools is not None:
270
+ # Create a deep copy of tools to avoid modifying the original
271
+ import copy
199
272
 
200
- response = self._client.chat.completions.create(
273
+ request_config["tools"] = copy.deepcopy(tools)
274
+ # Remove additionalProperties and strict from each tool's function
275
+ # parameters since vLLM does not support them
276
+ if "tools" in request_config:
277
+ for tool in request_config["tools"]:
278
+ if "function" in tool and "parameters" in tool["function"]:
279
+ tool["function"]["parameters"].pop(
280
+ "additionalProperties", None
281
+ )
282
+ if "strict" in tool.get("function", {}):
283
+ tool["function"].pop("strict")
284
+
285
+ return await self._async_client.beta.chat.completions.parse(
201
286
  messages=messages,
202
287
  model=self.model_type,
203
- **kwargs,
288
+ **request_config,
204
289
  )
205
- return response
290
+
291
+ def check_model_config(self):
292
+ r"""Check whether the model configuration contains any
293
+ unexpected arguments to vLLM API.
294
+
295
+ Raises:
296
+ ValueError: If the model configuration dictionary contains any
297
+ unexpected arguments to OpenAI API.
298
+ """
299
+ for param in self.model_config_dict:
300
+ if param not in VLLM_API_PARAMS:
301
+ raise ValueError(
302
+ f"Unexpected argument `{param}` is "
303
+ "input into vLLM model backend."
304
+ )
206
305
 
207
306
  @property
208
307
  def stream(self) -> bool:
@@ -15,7 +15,6 @@ from __future__ import annotations
15
15
 
16
16
  import asyncio
17
17
  import json
18
- import logging
19
18
  from collections import deque
20
19
  from typing import Deque, Dict, List, Optional
21
20
 
@@ -23,6 +22,7 @@ from colorama import Fore
23
22
 
24
23
  from camel.agents import ChatAgent
25
24
  from camel.configs import ChatGPTConfig
25
+ from camel.logger import get_logger
26
26
  from camel.messages.base import BaseMessage
27
27
  from camel.models import ModelFactory
28
28
  from camel.societies.workforce.base import BaseNode
@@ -44,7 +44,7 @@ from camel.tasks.task import Task, TaskState
44
44
  from camel.toolkits import GoogleMapsToolkit, SearchToolkit, WeatherToolkit
45
45
  from camel.types import ModelPlatformType, ModelType
46
46
 
47
- logger = logging.getLogger(__name__)
47
+ logger = get_logger(__name__)
48
48
 
49
49
 
50
50
  class Workforce(BaseNode):
@@ -60,13 +60,16 @@ class Workforce(BaseNode):
60
60
  another workforce node. (default: :obj:`None`)
61
61
  coordinator_agent_kwargs (Optional[Dict], optional): Keyword
62
62
  arguments for the coordinator agent, e.g. `model`, `api_key`,
63
- `tools`, etc. (default: :obj:`None`)
63
+ `tools`, etc. If not provided, default model settings will be used.
64
+ (default: :obj:`None`)
64
65
  task_agent_kwargs (Optional[Dict], optional): Keyword arguments for
65
66
  the task agent, e.g. `model`, `api_key`, `tools`, etc.
67
+ If not provided, default model settings will be used.
66
68
  (default: :obj:`None`)
67
69
  new_worker_agent_kwargs (Optional[Dict]): Default keyword arguments
68
70
  for the worker agent that will be created during runtime to
69
71
  handle failed tasks, e.g. `model`, `api_key`, `tools`, etc.
72
+ If not provided, default model settings will be used.
70
73
  (default: :obj:`None`)
71
74
  """
72
75
 
@@ -83,6 +86,26 @@ class Workforce(BaseNode):
83
86
  self._children = children or []
84
87
  self.new_worker_agent_kwargs = new_worker_agent_kwargs
85
88
 
89
+ # Warning messages for default model usage
90
+ if coordinator_agent_kwargs is None:
91
+ logger.warning(
92
+ "No coordinator_agent_kwargs provided. "
93
+ "Using `ModelPlatformType.DEFAULT` and `ModelType.DEFAULT` "
94
+ "for coordinator agent."
95
+ )
96
+ if task_agent_kwargs is None:
97
+ logger.warning(
98
+ "No task_agent_kwargs provided. "
99
+ "Using `ModelPlatformType.DEFAULT` and `ModelType.DEFAULT` "
100
+ "for task agent."
101
+ )
102
+ if new_worker_agent_kwargs is None:
103
+ logger.warning(
104
+ "No new_worker_agent_kwargs provided. "
105
+ "Using `ModelPlatformType.DEFAULT` and `ModelType.DEFAULT` "
106
+ "for worker agents created during runtime."
107
+ )
108
+
86
109
  coord_agent_sys_msg = BaseMessage.make_assistant_message(
87
110
  role_name="Workforce Manager",
88
111
  content="You are coordinating a group of workers. A worker can be "
@@ -35,6 +35,7 @@ from .google_maps_toolkit import GoogleMapsToolkit
35
35
  from .code_execution import CodeExecutionToolkit
36
36
  from .github_toolkit import GithubToolkit
37
37
  from .google_scholar_toolkit import GoogleScholarToolkit
38
+ from .google_calendar_toolkit import GoogleCalendarToolkit
38
39
  from .arxiv_toolkit import ArxivToolkit
39
40
  from .slack_toolkit import SlackToolkit
40
41
  from .whatsapp_toolkit import WhatsAppToolkit
@@ -91,6 +92,7 @@ __all__ = [
91
92
  'AskNewsToolkit',
92
93
  'AsyncAskNewsToolkit',
93
94
  'GoogleScholarToolkit',
95
+ 'GoogleCalendarToolkit',
94
96
  'NotionToolkit',
95
97
  'ArxivToolkit',
96
98
  'HumanToolkit',
@@ -20,6 +20,7 @@ import random
20
20
  import re
21
21
  import shutil
22
22
  import time
23
+ import urllib.parse
23
24
  from copy import deepcopy
24
25
  from typing import (
25
26
  TYPE_CHECKING,
@@ -546,8 +547,10 @@ class BaseBrowser:
546
547
  file_path = None
547
548
  if save_image:
548
549
  # Get url name to form a file name
549
- # TODO: Use a safer way for the url name
550
- url_name = self.page_url.split("/")[-1]
550
+ # Use urlparser for a safer extraction the url name
551
+ parsed_url = urllib.parse.urlparse(self.page_url)
552
+ url_name = os.path.basename(str(parsed_url.path)) or "index"
553
+
551
554
  for char in ['\\', '/', ':', '*', '?', '"', '<', '>', '|', '.']:
552
555
  url_name = url_name.replace(char, "_")
553
556
 
@@ -673,7 +676,8 @@ class BaseBrowser:
673
676
  rects, # type: ignore[arg-type]
674
677
  )
675
678
  if save_image:
676
- url_name = self.page_url.split("/")[-1]
679
+ parsed_url = urllib.parse.urlparse(self.page_url)
680
+ url_name = os.path.basename(str(parsed_url.path)) or "index"
677
681
  for char in ['\\', '/', ':', '*', '?', '"', '<', '>', '|', '.']:
678
682
  url_name = url_name.replace(char, "_")
679
683
  timestamp = datetime.datetime.now().strftime("%m%d%H%M%S")