camel-ai 0.2.29__py3-none-any.whl → 0.2.31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (47) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/_utils.py +1 -1
  3. camel/benchmarks/apibank.py +8 -2
  4. camel/benchmarks/apibench.py +4 -1
  5. camel/benchmarks/gaia.py +6 -2
  6. camel/benchmarks/nexus.py +4 -1
  7. camel/data_collector/sharegpt_collector.py +16 -5
  8. camel/datahubs/huggingface.py +3 -2
  9. camel/datasets/__init__.py +7 -5
  10. camel/datasets/base_generator.py +335 -0
  11. camel/datasets/models.py +61 -0
  12. camel/datasets/static_dataset.py +346 -0
  13. camel/embeddings/openai_compatible_embedding.py +4 -4
  14. camel/environments/__init__.py +11 -2
  15. camel/environments/models.py +111 -0
  16. camel/environments/multi_step.py +271 -0
  17. camel/environments/single_step.py +293 -0
  18. camel/loaders/base_io.py +1 -1
  19. camel/loaders/chunkr_reader.py +1 -1
  20. camel/logger.py +56 -0
  21. camel/messages/conversion/conversation_models.py +2 -2
  22. camel/messages/func_message.py +1 -1
  23. camel/models/cohere_model.py +3 -1
  24. camel/models/openai_compatible_model.py +4 -2
  25. camel/models/samba_model.py +4 -2
  26. camel/personas/persona.py +1 -0
  27. camel/runtime/api.py +6 -2
  28. camel/runtime/docker_runtime.py +1 -1
  29. camel/runtime/remote_http_runtime.py +1 -1
  30. camel/storages/key_value_storages/json.py +5 -1
  31. camel/storages/key_value_storages/redis.py +1 -1
  32. camel/toolkits/browser_toolkit.py +59 -1
  33. camel/toolkits/file_write_toolkit.py +2 -2
  34. camel/toolkits/linkedin_toolkit.py +3 -1
  35. camel/toolkits/networkx_toolkit.py +2 -2
  36. camel/toolkits/search_toolkit.py +183 -1
  37. camel/toolkits/semantic_scholar_toolkit.py +2 -2
  38. camel/toolkits/stripe_toolkit.py +17 -8
  39. camel/toolkits/sympy_toolkit.py +54 -27
  40. camel/types/enums.py +3 -0
  41. camel/utils/commons.py +1 -1
  42. {camel_ai-0.2.29.dist-info → camel_ai-0.2.31.dist-info}/METADATA +2 -1
  43. {camel_ai-0.2.29.dist-info → camel_ai-0.2.31.dist-info}/RECORD +45 -41
  44. camel/datasets/base.py +0 -639
  45. camel/environments/base.py +0 -509
  46. {camel_ai-0.2.29.dist-info → camel_ai-0.2.31.dist-info}/WHEEL +0 -0
  47. {camel_ai-0.2.29.dist-info → camel_ai-0.2.31.dist-info}/licenses/LICENSE +0 -0
@@ -133,7 +133,7 @@ class FunctionCallingMessage(BaseMessage):
133
133
  "type": "function",
134
134
  "function": {
135
135
  "name": self.func_name,
136
- "arguments": json.dumps(self.args),
136
+ "arguments": json.dumps(self.args, ensure_ascii=False),
137
137
  },
138
138
  }
139
139
  ],
@@ -176,7 +176,9 @@ class CohereModel(BaseModelBackend):
176
176
  else:
177
177
  arguments = function_call.get("arguments") # type: ignore[attr-defined]
178
178
  arguments_dict = ast.literal_eval(arguments)
179
- arguments_json = json.dumps(arguments_dict)
179
+ arguments_json = json.dumps(
180
+ arguments_dict, ensure_ascii=False
181
+ )
180
182
 
181
183
  assis_tool_call_id = str(uuid.uuid4())
182
184
  tool_call_id = assis_tool_call_id
@@ -56,8 +56,10 @@ class OpenAICompatibleModel(BaseModelBackend):
56
56
  url: Optional[str] = None,
57
57
  token_counter: Optional[BaseTokenCounter] = None,
58
58
  ) -> None:
59
- self.api_key = api_key or os.environ.get("OPENAI_COMPATIBILIY_API_KEY")
60
- self.url = url or os.environ.get("OPENAI_COMPATIBILIY_API_BASE_URL")
59
+ self.api_key = api_key or os.environ.get(
60
+ "OPENAI_COMPATIBILITY_API_KEY"
61
+ )
62
+ self.url = url or os.environ.get("OPENAI_COMPATIBILITY_API_BASE_URL")
61
63
  super().__init__(
62
64
  model_type, model_config_dict, api_key, url, token_counter
63
65
  )
@@ -307,7 +307,8 @@ class SambaModel(BaseModelBackend):
307
307
  {
308
308
  "conversation_id": str(uuid.uuid4()),
309
309
  "messages": messages,
310
- }
310
+ },
311
+ ensure_ascii=False,
311
312
  ),
312
313
  "params": {
313
314
  "do_sample": {"type": "bool", "value": "true"},
@@ -537,7 +538,8 @@ class SambaModel(BaseModelBackend):
537
538
  {
538
539
  "conversation_id": str(uuid.uuid4()),
539
540
  "messages": messages,
540
- }
541
+ },
542
+ ensure_ascii=False,
541
543
  ),
542
544
  "params": {
543
545
  "do_sample": {"type": "bool", "value": "true"},
camel/personas/persona.py CHANGED
@@ -100,4 +100,5 @@ class Persona(BaseModel):
100
100
  ",",
101
101
  ": ",
102
102
  ), # Fine-tune separators for better readability
103
+ ensure_ascii=False,
103
104
  )
camel/runtime/api.py CHANGED
@@ -80,10 +80,14 @@ for module_function in modules_functions:
80
80
  output = sys.stdout.read()
81
81
  sys.stdout = sys.__stdout__
82
82
  return {
83
- "output": json.dumps(response_data),
83
+ "output": json.dumps(
84
+ response_data, ensure_ascii=False
85
+ ),
84
86
  "stdout": output,
85
87
  }
86
- return {"output": json.dumps(response_data)}
88
+ return {
89
+ "output": json.dumps(response_data, ensure_ascii=False)
90
+ }
87
91
 
88
92
  except (ImportError, AttributeError) as e:
89
93
  logger.error(f"Error importing {module_function}: {e}")
@@ -271,7 +271,7 @@ class DockerRuntime(BaseRuntime):
271
271
  funcs = [funcs]
272
272
 
273
273
  if arguments is not None:
274
- entrypoint += json.dumps(arguments)
274
+ entrypoint += json.dumps(arguments, ensure_ascii=False)
275
275
 
276
276
  for func in funcs:
277
277
  inner_func = func.func
@@ -100,7 +100,7 @@ class RemoteHttpRuntime(BaseRuntime):
100
100
  if not isinstance(funcs, list):
101
101
  funcs = [funcs]
102
102
  if arguments is not None:
103
- entrypoint += json.dumps(arguments)
103
+ entrypoint += json.dumps(arguments, ensure_ascii=False)
104
104
 
105
105
  for func in funcs:
106
106
  inner_func = func.func
@@ -75,7 +75,11 @@ class JsonStorage(BaseKeyValueStorage):
75
75
  """
76
76
  with self.json_path.open("a") as f:
77
77
  f.writelines(
78
- [json.dumps(r, cls=_CamelJSONEncoder) + "\n" for r in records]
78
+ [
79
+ json.dumps(r, cls=_CamelJSONEncoder, ensure_ascii=False)
80
+ + "\n"
81
+ for r in records
82
+ ]
79
83
  )
80
84
 
81
85
  def load(self) -> List[Dict[str, Any]]:
@@ -133,7 +133,7 @@ class RedisStorage(BaseKeyValueStorage):
133
133
  if self._client is None:
134
134
  raise ValueError("Redis client is not initialized")
135
135
  try:
136
- value = json.dumps(records)
136
+ value = json.dumps(records, ensure_ascii=False)
137
137
  if expire:
138
138
  await self._client.setex(self._sid, expire, value)
139
139
  else:
@@ -1049,7 +1049,7 @@ check the history actions to avoid the same mistakes.
1049
1049
  - `action_code`: The action code you want to take. It is only one step action
1050
1050
  code, without any other texts (such as annotation)
1051
1051
 
1052
- Here are an example of the output:
1052
+ Here is two example of the output:
1053
1053
  ```json
1054
1054
  {{
1055
1055
  "observation": [IMAGE_DESCRIPTION],
@@ -1057,6 +1057,12 @@ Here are an example of the output:
1057
1057
  "action_code": "fill_input_id([ID], [TEXT])"
1058
1058
  }}
1059
1059
 
1060
+ {{
1061
+ "observation": "The current page is a CAPTCHA verification page on Amazon. It asks the user to ..",
1062
+ "reasoning": "To proceed with the task of searching for products, I need to complete..",
1063
+ "action_code": "fill_input_id(3, 'AUXPMR')"
1064
+ }}
1065
+
1060
1066
  Here are some tips for you:
1061
1067
  - Never forget the overall question: **{task_prompt}**
1062
1068
  - Maybe after a certain operation (e.g. click_id), the page content has not
@@ -1150,6 +1156,58 @@ out the information you need. Sometimes they are extremely useful.
1150
1156
 
1151
1157
  return False
1152
1158
 
1159
+ def _fix_action_code(action_code: str) -> str:
1160
+ r"""Fix potential missing quotes in action code"""
1161
+
1162
+ match = re.match(r'(\w+)\((.*)\)', action_code)
1163
+ if not match:
1164
+ return action_code
1165
+
1166
+ func_name, args_str = match.groups()
1167
+
1168
+ args = []
1169
+ current_arg = ""
1170
+ in_quotes = False
1171
+ quote_char = None
1172
+
1173
+ for char in args_str:
1174
+ if char in ['"', "'"]:
1175
+ if not in_quotes:
1176
+ in_quotes = True
1177
+ quote_char = char
1178
+ current_arg += char
1179
+ elif char == quote_char:
1180
+ in_quotes = False
1181
+ quote_char = None
1182
+ current_arg += char
1183
+ else:
1184
+ current_arg += char
1185
+ elif char == ',' and not in_quotes:
1186
+ args.append(current_arg.strip())
1187
+ current_arg = ""
1188
+ else:
1189
+ current_arg += char
1190
+
1191
+ if current_arg:
1192
+ args.append(current_arg.strip())
1193
+
1194
+ fixed_args = []
1195
+ for arg in args:
1196
+ if (
1197
+ (arg.startswith('"') and arg.endswith('"'))
1198
+ or (arg.startswith("'") and arg.endswith("'"))
1199
+ or re.match(r'^-?\d+(\.\d+)?$', arg)
1200
+ or re.match(r'^-?\d+\.?\d*[eE][-+]?\d+$', arg)
1201
+ or re.match(r'^0[xX][0-9a-fA-F]+$', arg)
1202
+ ):
1203
+ fixed_args.append(arg)
1204
+
1205
+ else:
1206
+ fixed_args.append(f"'{arg}'")
1207
+
1208
+ return f"{func_name}({', '.join(fixed_args)})"
1209
+
1210
+ action_code = _fix_action_code(action_code)
1153
1211
  prefix = "self.browser."
1154
1212
  code = f"{prefix}{action_code}"
1155
1213
 
@@ -219,13 +219,13 @@ class FileWriteToolkit(BaseToolkit):
219
219
  try:
220
220
  # Try parsing as JSON string first
221
221
  data = json.loads(content)
222
- json.dump(data, f)
222
+ json.dump(data, f, ensure_ascii=False)
223
223
  except json.JSONDecodeError:
224
224
  # If not valid JSON string, write as is
225
225
  f.write(content)
226
226
  else:
227
227
  # If not string, dump as JSON
228
- json.dump(content, f)
228
+ json.dump(content, f, ensure_ascii=False)
229
229
  logger.debug(f"Wrote JSON to {file_path} with {encoding} encoding")
230
230
 
231
231
  def _write_yaml_file(
@@ -75,7 +75,9 @@ class LinkedInToolkit(BaseToolkit):
75
75
  }
76
76
 
77
77
  response = requests.post(
78
- url, headers=headers, data=json.dumps(post_data)
78
+ url,
79
+ headers=headers,
80
+ data=json.dumps(post_data, ensure_ascii=False),
79
81
  )
80
82
  if response.status_code == 201:
81
83
  post_response = response.json()
@@ -178,7 +178,7 @@ class NetworkXToolkit(BaseToolkit):
178
178
  """
179
179
  logger.info("Serializing the graph.")
180
180
  nx = self._get_nx()
181
- return json.dumps(nx.node_link_data(self.graph))
181
+ return json.dumps(nx.node_link_data(self.graph), ensure_ascii=False)
182
182
 
183
183
  def deserialize_graph(self, data: str) -> None:
184
184
  r"""Loads a graph from a serialized JSON string.
@@ -199,7 +199,7 @@ class NetworkXToolkit(BaseToolkit):
199
199
  logger.info(f"Exporting graph to file: {file_path}")
200
200
  nx = self._get_nx()
201
201
  with open(file_path, "w") as file:
202
- json.dump(nx.node_link_data(self.graph), file)
202
+ json.dump(nx.node_link_data(self.graph), file, ensure_ascii=False)
203
203
 
204
204
  def import_from_file(self, file_path: str) -> None:
205
205
  r"""Imports a graph from a JSON file.
@@ -751,7 +751,8 @@ class SearchToolkit(BaseToolkit):
751
751
  "summary": summary,
752
752
  "count": count,
753
753
  "page": page,
754
- }
754
+ },
755
+ ensure_ascii=False,
755
756
  )
756
757
  try:
757
758
  response = requests.post(url, headers=headers, data=payload)
@@ -766,6 +767,185 @@ class SearchToolkit(BaseToolkit):
766
767
  except requests.exceptions.RequestException as e:
767
768
  return {"error": f"Bocha AI search failed: {e!s}"}
768
769
 
770
+ def search_baidu(self, query: str, max_results: int = 5) -> Dict[str, Any]:
771
+ r"""Search Baidu using web scraping to retrieve relevant search
772
+ results. This method queries Baidu's search engine and extracts search
773
+ results including titles, descriptions, and URLs.
774
+
775
+ Args:
776
+ query (str): Search query string to submit to Baidu.
777
+ max_results (int): Maximum number of results to return.
778
+ (default: :obj:`5`)
779
+
780
+ Returns:
781
+ Dict[str, Any]: A dictionary containing search results or error
782
+ message.
783
+ """
784
+ from bs4 import BeautifulSoup
785
+
786
+ try:
787
+ url = "https://www.baidu.com/s"
788
+ headers = {
789
+ "User-Agent": (
790
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
791
+ "AppleWebKit/537.36 (KHTML, like Gecko) "
792
+ "Chrome/120.0.0.0 Safari/537.36"
793
+ ),
794
+ "Referer": "https://www.baidu.com",
795
+ }
796
+ params = {"wd": query, "rn": str(max_results)}
797
+
798
+ response = requests.get(url, headers=headers, params=params)
799
+ response.encoding = "utf-8"
800
+
801
+ soup = BeautifulSoup(response.text, "html.parser")
802
+
803
+ results = []
804
+ for idx, item in enumerate(soup.select(".result"), 1):
805
+ title_element = item.select_one("h3 > a")
806
+ title = (
807
+ title_element.get_text(strip=True) if title_element else ""
808
+ )
809
+
810
+ link = title_element["href"] if title_element else ""
811
+
812
+ desc_element = item.select_one(".c-abstract, .c-span-last")
813
+ desc = (
814
+ desc_element.get_text(strip=True) if desc_element else ""
815
+ )
816
+
817
+ results.append(
818
+ {
819
+ "result_id": idx,
820
+ "title": title,
821
+ "description": desc,
822
+ "url": link,
823
+ }
824
+ )
825
+ if len(results) >= max_results:
826
+ break
827
+
828
+ if not results:
829
+ print(
830
+ "Warning: No results found. Check "
831
+ "if Baidu HTML structure has changed."
832
+ )
833
+
834
+ return {"results": results}
835
+
836
+ except Exception as e:
837
+ return {"error": f"Baidu scraping error: {e!s}"}
838
+
839
+ def search_bing(self, query: str, max_results: int = 5) -> Dict[str, Any]:
840
+ r"""Use Bing search engine to search information for the given query.
841
+
842
+ This function queries the Chinese version of Bing search engine (cn.
843
+ bing.com) using web scraping to retrieve relevant search results. It
844
+ extracts search results including titles, snippets, and URLs. This
845
+ function is particularly useful when the query is in Chinese or when
846
+ Chinese search results are desired.
847
+
848
+ Args:
849
+ query (str): The search query string to submit to Bing. Works best
850
+ with Chinese queries or when Chinese results are preferred.
851
+ max_results (int): Maximum number of results to return.
852
+ (default: :obj:`5`)
853
+
854
+ Returns:
855
+ Dict ([str, Any]): A dictionary containing either:
856
+ - 'results': A list of dictionaries, each with:
857
+ - 'result_id': The index of the result.
858
+ - 'snippet': A brief description of the search result.
859
+ - 'title': The title of the search result.
860
+ - 'link': The URL of the search result.
861
+ - or 'error': An error message if something went wrong.
862
+ """
863
+ from typing import Any, Dict, List, cast
864
+ from urllib.parse import urlencode
865
+
866
+ from bs4 import BeautifulSoup, Tag
867
+
868
+ try:
869
+ query = urlencode({"q": query})
870
+ url = f'https://cn.bing.com/search?{query}'
871
+ headers = {
872
+ "User-Agent": (
873
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
874
+ "AppleWebKit/537.36 (KHTML, like Gecko) "
875
+ "Chrome/120.0.0.0 Safari/537.36"
876
+ ),
877
+ }
878
+ # Add timeout to prevent hanging
879
+ response = requests.get(url, headers=headers, timeout=10)
880
+
881
+ # Check if the request was successful
882
+ if response.status_code != 200:
883
+ return {
884
+ "error": (
885
+ f"Bing returned status code: "
886
+ f"{response.status_code}"
887
+ )
888
+ }
889
+
890
+ response.encoding = 'utf-8'
891
+ soup = BeautifulSoup(response.text, 'html.parser')
892
+
893
+ b_results_element = soup.find("ol", id="b_results")
894
+ if b_results_element is None:
895
+ return {"results": []}
896
+
897
+ # Ensure b_results is a Tag and find all li elements
898
+ b_results_tag = cast(Tag, b_results_element)
899
+ result_items = b_results_tag.find_all("li")
900
+
901
+ results: List[Dict[str, Any]] = []
902
+ for i in range(min(len(result_items), max_results)):
903
+ row = result_items[i]
904
+ if not isinstance(row, Tag):
905
+ continue
906
+
907
+ h2_element = row.find("h2")
908
+ if h2_element is None:
909
+ continue
910
+ h2_tag = cast(Tag, h2_element)
911
+
912
+ title = h2_tag.get_text().strip()
913
+
914
+ link_tag_element = h2_tag.find("a")
915
+ if link_tag_element is None:
916
+ continue
917
+ link_tag = cast(Tag, link_tag_element)
918
+
919
+ link = link_tag.get("href")
920
+ if link is None:
921
+ continue
922
+
923
+ content_element = row.find("p", class_="b_algoSlug")
924
+ content_text = ""
925
+ if content_element is not None and isinstance(
926
+ content_element, Tag
927
+ ):
928
+ content_text = content_element.get_text()
929
+
930
+ row_data = {
931
+ "result_id": i + 1,
932
+ "snippet": content_text,
933
+ "title": title,
934
+ "link": link,
935
+ }
936
+ results.append(row_data)
937
+
938
+ if not results:
939
+ return {
940
+ "warning": "No results found. Check if "
941
+ "Bing HTML structure has changed."
942
+ }
943
+
944
+ return {"results": results}
945
+
946
+ except Exception as e:
947
+ return {"error": f"Bing scraping error: {e!s}"}
948
+
769
949
  def get_tools(self) -> List[FunctionTool]:
770
950
  r"""Returns a list of FunctionTool objects representing the
771
951
  functions in the toolkit.
@@ -783,4 +963,6 @@ class SearchToolkit(BaseToolkit):
783
963
  FunctionTool(self.tavily_search),
784
964
  FunctionTool(self.search_brave),
785
965
  FunctionTool(self.search_bocha),
966
+ FunctionTool(self.search_baidu),
967
+ FunctionTool(self.search_bing),
786
968
  ]
@@ -238,7 +238,7 @@ class SemanticScholarToolkit(BaseToolkit):
238
238
  papers = response.json()
239
239
  if save_to_file:
240
240
  with open('recommended_papers.json', 'w') as output:
241
- json.dump(papers, output)
241
+ json.dump(papers, output, ensure_ascii=False)
242
242
  return papers
243
243
  except requests.exceptions.RequestException as e:
244
244
  return {"error": str(e)}
@@ -282,7 +282,7 @@ class SemanticScholarToolkit(BaseToolkit):
282
282
  response_data = response.json()
283
283
  if save_to_file:
284
284
  with open('author_information.json', 'w') as output:
285
- json.dump(response_data, output)
285
+ json.dump(response_data, output, ensure_ascii=False)
286
286
  return response_data
287
287
  except requests.exceptions.RequestException as e:
288
288
  return {"error": str(e)}
@@ -85,7 +85,7 @@ class StripeToolkit(BaseToolkit):
85
85
  self.logger.info(f"Retrieving customer with ID: {customer_id}")
86
86
  customer = stripe.Customer.retrieve(customer_id)
87
87
  self.logger.info(f"Retrieved customer: {customer.id}")
88
- json_string = json.dumps(customer)
88
+ json_string = json.dumps(customer, ensure_ascii=False)
89
89
  return json_string
90
90
  except Exception as e:
91
91
  return self.handle_exception("customer_get", e)
@@ -109,7 +109,9 @@ class StripeToolkit(BaseToolkit):
109
109
  self.logger.info(
110
110
  f"Successfully retrieved {len(customers)} customers."
111
111
  )
112
- return json.dumps([customer for customer in customers])
112
+ return json.dumps(
113
+ [customer for customer in customers], ensure_ascii=False
114
+ )
113
115
  except Exception as e:
114
116
  return self.handle_exception("customer_list", e)
115
117
 
@@ -128,7 +130,7 @@ class StripeToolkit(BaseToolkit):
128
130
  self.logger.info(
129
131
  f"Successfully retrieved account balance: {balance}."
130
132
  )
131
- return json.dumps(balance)
133
+ return json.dumps(balance, ensure_ascii=False)
132
134
  except Exception as e:
133
135
  return self.handle_exception("balance_get", e)
134
136
 
@@ -154,7 +156,10 @@ class StripeToolkit(BaseToolkit):
154
156
  f"Successfully retrieved {len(transactions)} "
155
157
  "balance transactions."
156
158
  )
157
- return json.dumps([transaction for transaction in transactions])
159
+ return json.dumps(
160
+ [transaction for transaction in transactions],
161
+ ensure_ascii=False,
162
+ )
158
163
  except Exception as e:
159
164
  return self.handle_exception("balance_transaction_list", e)
160
165
 
@@ -174,7 +179,7 @@ class StripeToolkit(BaseToolkit):
174
179
  self.logger.info(f"Retrieving payment with ID: {payment_id}")
175
180
  payment = stripe.PaymentIntent.retrieve(payment_id)
176
181
  self.logger.info(f"Retrieved payment: {payment.id}")
177
- return json.dumps(payment)
182
+ return json.dumps(payment, ensure_ascii=False)
178
183
  except Exception as e:
179
184
  return self.handle_exception("payment_get", e)
180
185
 
@@ -197,7 +202,9 @@ class StripeToolkit(BaseToolkit):
197
202
  self.logger.info(
198
203
  f"Successfully retrieved {len(payments)} payments."
199
204
  )
200
- return json.dumps([payment for payment in payments])
205
+ return json.dumps(
206
+ [payment for payment in payments], ensure_ascii=False
207
+ )
201
208
  except Exception as e:
202
209
  return self.handle_exception("payment_list", e)
203
210
 
@@ -217,7 +224,7 @@ class StripeToolkit(BaseToolkit):
217
224
  self.logger.info(f"Retrieving refund with ID: {refund_id}")
218
225
  refund = stripe.Refund.retrieve(refund_id)
219
226
  self.logger.info(f"Retrieved refund: {refund.id}")
220
- return json.dumps(refund)
227
+ return json.dumps(refund, ensure_ascii=False)
221
228
  except Exception as e:
222
229
  return self.handle_exception("refund_get", e)
223
230
 
@@ -238,7 +245,9 @@ class StripeToolkit(BaseToolkit):
238
245
  self.logger.info(f"Listing refunds with limit={limit}")
239
246
  refunds = stripe.Refund.list(limit=limit).data
240
247
  self.logger.info(f"Successfully retrieved {len(refunds)} refunds.")
241
- return json.dumps([refund for refund in refunds])
248
+ return json.dumps(
249
+ [refund for refund in refunds], ensure_ascii=False
250
+ )
242
251
  except Exception as e:
243
252
  return self.handle_exception("refund_list", e)
244
253