camel-ai 0.2.15a0__py3-none-any.whl → 0.2.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (61) hide show
  1. camel/__init__.py +1 -1
  2. camel/benchmarks/__init__.py +11 -1
  3. camel/benchmarks/apibank.py +560 -0
  4. camel/benchmarks/apibench.py +496 -0
  5. camel/benchmarks/gaia.py +2 -2
  6. camel/benchmarks/nexus.py +518 -0
  7. camel/datagen/__init__.py +6 -2
  8. camel/datagen/{o1datagen.py → cotdatagen.py} +19 -6
  9. camel/datagen/self_instruct/__init__.py +36 -0
  10. camel/datagen/self_instruct/filter/__init__.py +34 -0
  11. camel/datagen/self_instruct/filter/filter_function.py +216 -0
  12. camel/datagen/self_instruct/filter/filter_registry.py +56 -0
  13. camel/datagen/self_instruct/filter/instruction_filter.py +81 -0
  14. camel/datagen/self_instruct/self_instruct.py +393 -0
  15. camel/datagen/self_instruct/templates.py +384 -0
  16. camel/datahubs/huggingface.py +12 -2
  17. camel/datahubs/models.py +2 -3
  18. camel/embeddings/mistral_embedding.py +5 -1
  19. camel/embeddings/openai_compatible_embedding.py +6 -1
  20. camel/embeddings/openai_embedding.py +5 -1
  21. camel/interpreters/e2b_interpreter.py +5 -1
  22. camel/loaders/apify_reader.py +5 -1
  23. camel/loaders/chunkr_reader.py +5 -1
  24. camel/loaders/firecrawl_reader.py +0 -30
  25. camel/logger.py +11 -5
  26. camel/models/anthropic_model.py +5 -1
  27. camel/models/azure_openai_model.py +1 -2
  28. camel/models/cohere_model.py +5 -1
  29. camel/models/deepseek_model.py +5 -1
  30. camel/models/gemini_model.py +5 -1
  31. camel/models/groq_model.py +5 -1
  32. camel/models/mistral_model.py +5 -1
  33. camel/models/nemotron_model.py +5 -1
  34. camel/models/nvidia_model.py +5 -1
  35. camel/models/openai_model.py +5 -1
  36. camel/models/qwen_model.py +5 -1
  37. camel/models/reka_model.py +5 -1
  38. camel/models/reward/nemotron_model.py +5 -1
  39. camel/models/samba_model.py +5 -1
  40. camel/models/togetherai_model.py +5 -1
  41. camel/models/yi_model.py +5 -1
  42. camel/models/zhipuai_model.py +5 -1
  43. camel/schemas/openai_converter.py +5 -1
  44. camel/storages/graph_storages/nebula_graph.py +89 -20
  45. camel/storages/graph_storages/neo4j_graph.py +138 -0
  46. camel/toolkits/__init__.py +4 -0
  47. camel/toolkits/arxiv_toolkit.py +20 -3
  48. camel/toolkits/dappier_toolkit.py +196 -0
  49. camel/toolkits/function_tool.py +61 -61
  50. camel/toolkits/meshy_toolkit.py +5 -1
  51. camel/toolkits/notion_toolkit.py +1 -1
  52. camel/toolkits/openbb_toolkit.py +869 -0
  53. camel/toolkits/search_toolkit.py +91 -5
  54. camel/toolkits/stripe_toolkit.py +5 -1
  55. camel/toolkits/twitter_toolkit.py +24 -16
  56. camel/utils/__init__.py +2 -0
  57. camel/utils/commons.py +104 -19
  58. {camel_ai-0.2.15a0.dist-info → camel_ai-0.2.16.dist-info}/METADATA +16 -4
  59. {camel_ai-0.2.15a0.dist-info → camel_ai-0.2.16.dist-info}/RECORD +61 -49
  60. {camel_ai-0.2.15a0.dist-info → camel_ai-0.2.16.dist-info}/LICENSE +0 -0
  61. {camel_ai-0.2.15a0.dist-info → camel_ai-0.2.16.dist-info}/WHEEL +0 -0
camel/__init__.py CHANGED
@@ -14,7 +14,7 @@
14
14
 
15
15
  from camel.logger import disable_logging, enable_logging, set_log_level
16
16
 
17
- __version__ = '0.2.15a0'
17
+ __version__ = '0.2.16'
18
18
 
19
19
  __all__ = [
20
20
  '__version__',
@@ -12,7 +12,17 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
+ from .apibank import APIBankBenchmark
16
+ from .apibench import APIBenchBenchmark
15
17
  from .base import BaseBenchmark
16
18
  from .gaia import DefaultGAIARetriever, GAIABenchmark
19
+ from .nexus import NexusBenchmark
17
20
 
18
- __all__ = ["BaseBenchmark", "GAIABenchmark", "DefaultGAIARetriever"]
21
+ __all__ = [
22
+ "BaseBenchmark",
23
+ "GAIABenchmark",
24
+ "DefaultGAIARetriever",
25
+ "NexusBenchmark",
26
+ "APIBenchBenchmark",
27
+ "APIBankBenchmark",
28
+ ]
@@ -0,0 +1,560 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ import json
16
+ import logging
17
+ import os
18
+ import random
19
+ import re
20
+ import sys
21
+ from pathlib import Path
22
+ from typing import Any, Dict, List, Literal, Optional
23
+
24
+ import numpy as np
25
+ from rouge import Rouge
26
+ from tqdm import tqdm
27
+
28
+ from camel.agents import ChatAgent
29
+ from camel.benchmarks.base import BaseBenchmark
30
+ from camel.messages import BaseMessage
31
+ from camel.utils import download_github_subdirectory
32
+
33
+ logger = logging.getLogger(__name__)
34
+
35
+
36
+ def process_messages(
37
+ chat_history: List[Dict[str, Any]],
38
+ prompt: str,
39
+ ) -> List[Dict[str, str]]:
40
+ """
41
+ Processes chat history into a structured format for further use.
42
+
43
+ Args:
44
+ chat_history (List[Dict[str, Any]):
45
+ A list of dictionaries representing the chat history.
46
+ prompt (str): A propmt to be set as the system message.
47
+
48
+ Returns:
49
+ List[Dict[str, str]]: A list of dictionaries representing
50
+ the processed messages, where each dictionary has:
51
+ - 'role': The role of the message ('system', 'user', or 'assistant').
52
+ - 'content': The content of the message, including formatted
53
+ API responses when applicable.
54
+ """
55
+ messages = [{'role': 'system', 'content': prompt}]
56
+ for item in chat_history:
57
+ role_map = {'User': 'user', 'AI': 'assistant', 'API': 'system'}
58
+ chat_role = role_map.get(
59
+ item['role'], 'unknown'
60
+ ) # default role to 'unknown'
61
+ if item['role'] == 'API':
62
+ chat_content = '[{}({})] Response: {}'.format(
63
+ item['api_name'],
64
+ ', '.join(
65
+ [
66
+ '{}=\'{}\''.format(k, v)
67
+ for k, v in item['param_dict'].items()
68
+ ]
69
+ ),
70
+ str(item['result']['output']),
71
+ )
72
+ else:
73
+ chat_content = item['text']
74
+ messages.append({'role': chat_role, 'content': chat_content})
75
+ return messages
76
+
77
+
78
+ class APIBankBenchmark(BaseBenchmark):
79
+ r"""API-Bank Benchmark adapted from `API-Bank:
80
+ A Comprehensive Benchmark for Tool-Augmented LLMs`
81
+ <https://github.com/AlibabaResearch/DAMO-ConvAI/tree/main/api-bank>.
82
+
83
+ Args:
84
+ save_to (str): The file to save the results.
85
+ processes (int, optional): The number of processes to use.
86
+ (default: :obj:`1`)
87
+ """
88
+
89
+ def __init__(
90
+ self,
91
+ save_to: str,
92
+ processes: int = 1,
93
+ ):
94
+ r"""Initialize the APIBank benchmark.
95
+
96
+ Args:
97
+ save_to (str): The file to save the results.
98
+ processes (int, optional): The number of processes to use for
99
+ parallel processing. (default: :obj:`1`)
100
+ """
101
+ # Predefine data_dir for better import management
102
+ super().__init__("apibank", "api_bank", save_to, processes)
103
+ self._data: Dict[str, List[APIBankSample]] = dict() # type: ignore[assignment]
104
+
105
+ def download(self):
106
+ r"""Download APIBank dataset and code from Github."""
107
+
108
+ repo = "AlibabaResearch/DAMO-ConvAI"
109
+ subdir = "api-bank"
110
+ data_dir = self.data_dir
111
+
112
+ download_github_subdirectory(repo, subdir, data_dir)
113
+
114
+ sys.path.insert(0, self.data_dir)
115
+ logger.info("Download completed.")
116
+
117
+ def load(self, level: str, force_download: bool = False): # type: ignore[override]
118
+ r"""Load the APIBank Benchmark dataset.
119
+
120
+ Args:
121
+ level (str): Level to run benchmark on.
122
+ force_download (bool, optional): Whether to
123
+ force download the data.
124
+ """
125
+ if force_download:
126
+ logger.info("Force downloading data.")
127
+ self.download()
128
+
129
+ if level == "level-1":
130
+ file_path = Path("api_bank/lv1-lv2-samples/level-1-given-desc")
131
+ elif level == 'level-2':
132
+ file_path = Path("api_bank/lv1-lv2-samples/level-2-toolsearcher")
133
+ jsonl_files = [
134
+ f for f in os.listdir(file_path) if f.endswith('.jsonl')
135
+ ]
136
+ for file in tqdm(jsonl_files, desc="Processing files"):
137
+ history = []
138
+ with open(file_path / file, 'r') as f:
139
+ for line in f:
140
+ history.append(json.loads(line))
141
+ samples = APIBankSample.from_chat_history(history)
142
+ self._data[file.rsplit('.', 1)[0]] = samples
143
+
144
+ # Change import to relative import in the downloaded python files
145
+ def process_files(folder_path, replacements):
146
+ r"""Replace absolute imports in downloaded files with
147
+ relative import."""
148
+ for file in os.listdir(folder_path):
149
+ if file.endswith(".py"):
150
+ file_path = os.path.join(folder_path, file)
151
+ try:
152
+ with open(file_path, "r", encoding="utf-8") as file:
153
+ content = file.read()
154
+
155
+ original_content = content
156
+
157
+ for pattern, replacement in replacements:
158
+ content = re.sub(pattern, replacement, content)
159
+
160
+ if content != original_content:
161
+ with open(
162
+ file_path, "w", encoding="utf-8"
163
+ ) as file:
164
+ file.write(content)
165
+ logger.info(f"Updated file: {file_path}")
166
+
167
+ except Exception as e:
168
+ logger.info(f"Error processing file {file_path}: {e}")
169
+
170
+ api_bank_folder = "api_bank"
171
+ apis_folder = os.path.join(api_bank_folder, "apis")
172
+
173
+ apis_replacements = [
174
+ (r"from apis.api", "from .api"),
175
+ (r"from apis import", "from .api import"),
176
+ ]
177
+
178
+ api_bank_replacements = [
179
+ (r"from apis", "from .apis"),
180
+ (r"from api_call_extraction", "from .api_call_extraction"),
181
+ (r"f'{basename}", r"f'api_bank.{basename}"),
182
+ ]
183
+
184
+ process_files(apis_folder, apis_replacements)
185
+ process_files(api_bank_folder, api_bank_replacements)
186
+
187
+ def run( # type: ignore[override, return]
188
+ self,
189
+ agent: ChatAgent,
190
+ level: Literal["level-1", "level-2"],
191
+ api_test_enabled=True,
192
+ randomize: bool = False,
193
+ subset: Optional[int] = None,
194
+ ) -> Dict[str, Any]:
195
+ r"""Run the benchmark.
196
+
197
+ Args:
198
+ agent (ChatAgent): The agent to run the
199
+ benchmark.
200
+ level (Literal['level-1', 'level-2']):
201
+ The level to run the benchmark on.
202
+ randomize (bool, optional): Whether to
203
+ randomize the data.
204
+ api_test_enabled (bool): Whether to test
205
+ API calling (`True`) or response (`False`)
206
+ (default: :obj:`False`)
207
+ subset (Optional[int], optional):
208
+ The subset of data to run.
209
+ (default: :obj:`None`)
210
+
211
+ Returns:
212
+ Dict[str, Any]: The results of the benchmark.
213
+ """
214
+ logger.info(f"Running APIBench benchmark on {level}.")
215
+ self.load(level)
216
+ datas = self._data
217
+
218
+ # Shuffle and subset data if necessary
219
+ if randomize:
220
+ randomized_items = list(datas.items())
221
+ random.shuffle(randomized_items)
222
+ datas = dict(randomized_items)
223
+ if subset:
224
+ datas = dict(list(datas.items())[:subset])
225
+
226
+ logger.info(f"Number of tasks: {len(datas)}")
227
+
228
+ # Initialize results storage
229
+ self._results = []
230
+
231
+ # The following code are adapted from the evaluator
232
+ # from the original repo:
233
+ tool_search_enabled = level == "level-2"
234
+ dialog_test_enabled = not api_test_enabled
235
+ total_api_calls, correct_api_calls, rougel_scores = 0, 0, []
236
+
237
+ with open(self.save_to, "w") as f:
238
+ for test in tqdm(datas, desc="Running"):
239
+ samples = self._data[test]
240
+ evaluator = Evaluator(samples) # type: ignore[arg-type]
241
+
242
+ for sample_id in evaluator.get_all_sample_ids():
243
+ # Process sample and generate response
244
+ sample = evaluator.dataset[sample_id]
245
+
246
+ if (
247
+ sample.ground_truth['role'] == 'API'
248
+ and api_test_enabled
249
+ ):
250
+ if tool_search_enabled:
251
+ _, chat_history = evaluator.get_model_input(
252
+ sample_id
253
+ )
254
+ api_descriptions = evaluator.get_api_description(
255
+ 'ToolSearcher'
256
+ )
257
+ else:
258
+ api_descriptions, chat_history = (
259
+ evaluator.get_model_input(sample_id)
260
+ )
261
+ messages = process_messages(
262
+ chat_history, API_CALL_PROMPT + api_descriptions
263
+ )
264
+ model_output = agent_call(messages, agent)
265
+ api_call = get_api_call(model_output)
266
+
267
+ # Evaluate API call
268
+ if api_call:
269
+ try:
270
+ correct, model_output_result = (
271
+ evaluator.evaluate(sample_id, api_call)
272
+ )
273
+ except AssertionError as e:
274
+ if 'The API name is not correct.' not in str(
275
+ e
276
+ ):
277
+ raise e
278
+ logging.info('AssertionError: {}'.format(e))
279
+ correct = False
280
+ else:
281
+ model_output_result = 'No API call found'
282
+ correct = False
283
+ if correct:
284
+ correct_api_calls += 1
285
+ logging.info(
286
+ 'Correct API call: {} Ground truth: {}'.format(
287
+ api_call, sample.ground_truth
288
+ )
289
+ )
290
+ else:
291
+ logging.info(
292
+ 'Incorrect model output: {} Result: {} \
293
+ Ground truth: {} File: {} Sample ID: {} \
294
+ Messages: {}'.format(
295
+ model_output.replace('\n', ' '),
296
+ model_output_result,
297
+ sample.ground_truth,
298
+ test,
299
+ sample_id,
300
+ messages[1:],
301
+ )
302
+ )
303
+ total_api_calls += 1
304
+ self._results.append(
305
+ {
306
+ 'Role': 'API',
307
+ 'Model_output': model_output,
308
+ 'Model_output_result': model_output_result,
309
+ 'Ground_truth': sample.ground_truth,
310
+ 'Test': test,
311
+ 'Correct': correct,
312
+ }
313
+ )
314
+ f.write(json.dumps(self._results[-1], indent=2) + "\n")
315
+
316
+ elif (
317
+ sample.ground_truth['role'] == 'AI'
318
+ and dialog_test_enabled
319
+ ):
320
+ # Process sample and generate response
321
+ api_descriptions, chat_history = (
322
+ evaluator.get_model_input(sample_id)
323
+ )
324
+
325
+ messages = process_messages(
326
+ chat_history, RESPONSE_PROMPT + api_descriptions
327
+ )
328
+ model_output = agent_call(messages, agent)
329
+
330
+ # Evaluate model response
331
+ if model_output:
332
+ score = evaluator.evaluate(sample_id, model_output)
333
+ else:
334
+ score = 0
335
+ rougel_scores.append(score)
336
+ if score < 0.2:
337
+ logging.info(
338
+ 'Low score: {} Score: {} Ground truth: {} \
339
+ Test: {} Sample ID: {} \
340
+ Messages: {}'.format(
341
+ model_output.replace('\n', ' '),
342
+ score,
343
+ sample.ground_truth,
344
+ test,
345
+ sample_id,
346
+ messages[1:],
347
+ )
348
+ )
349
+
350
+ self._results.append(
351
+ {
352
+ 'Role': 'AI',
353
+ 'Model_output': model_output,
354
+ 'Score': score,
355
+ 'Ground_truth': sample.ground_truth,
356
+ 'Test': test,
357
+ }
358
+ )
359
+ f.write(json.dumps(self._results[-1], indent=2) + "\n")
360
+
361
+ f.flush()
362
+
363
+ if api_test_enabled:
364
+ return {
365
+ 'total': total_api_calls,
366
+ 'correct': correct_api_calls,
367
+ "accuracy": correct_api_calls / total_api_calls
368
+ if total_api_calls
369
+ else 0,
370
+ }
371
+ elif dialog_test_enabled:
372
+ return {'Dialog_score': np.mean(rougel_scores)}
373
+
374
+
375
+ # The following code are migrated from the original repo:
376
+ # https://github.com/AlibabaResearch/DAMO-ConvAI/tree/main/api-bank
377
+ def agent_call(messages: List[Dict], agent: ChatAgent):
378
+ r"""Add messages to agent memory and get response."""
379
+ for i, msg in enumerate(messages):
380
+ if msg['role'] == 'user':
381
+ message = BaseMessage.make_user_message(
382
+ role_name="CAMEL User", content=msg['content']
383
+ )
384
+ elif msg['role'] == 'assistant':
385
+ message = BaseMessage.make_assistant_message(
386
+ role_name="CAMEL Assistant", content=msg['content']
387
+ )
388
+ elif msg['role'] == 'system':
389
+ message = BaseMessage.make_assistant_message(
390
+ role_name="System", content=msg['content']
391
+ )
392
+ else:
393
+ raise ValueError(f"Unrecognized role: {msg['role']}")
394
+
395
+ if i == len(messages) - 1:
396
+ break
397
+ agent.record_message(message)
398
+
399
+ response = agent.step(message)
400
+ model_output = response.msgs[0].content
401
+ agent.reset()
402
+ return model_output
403
+
404
+
405
+ def calculate_rouge_l_score(reference, hypothesis):
406
+ r"""Calculate rouge l score between hypothesis and reference."""
407
+ rouge = Rouge()
408
+ scores = rouge.get_scores(hypothesis, reference)
409
+ rouge_l_score = scores[0]['rouge-l']['f']
410
+ return rouge_l_score
411
+
412
+
413
+ def get_api_call(model_output):
414
+ r"""Parse api call from model output."""
415
+ api_call_pattern = r"\[(\w+)\((.*)\)\]"
416
+ api_call_pattern = re.compile(api_call_pattern)
417
+ match = api_call_pattern.search(model_output)
418
+ if match:
419
+ return match.group(0)
420
+ else:
421
+ return None
422
+
423
+
424
+ class APIBankSample:
425
+ r"""APIBank sample used to load the datasets."""
426
+
427
+ def __init__(self, chat_history, apis, ground_truth):
428
+ self.chat_history = chat_history
429
+ self.apis = apis
430
+ self.ground_truth = ground_truth
431
+
432
+ def __repr__(self):
433
+ return 'Sample(chat_history={}, apis={}, ground_truth={})'.format(
434
+ self.chat_history, self.apis, self.ground_truth
435
+ )
436
+
437
+ @classmethod
438
+ def from_chat_history(cls, chat_history):
439
+ apis = set()
440
+ api_positions = []
441
+ for i, item in enumerate(chat_history):
442
+ if item['role'] == 'API':
443
+ apis.add(item['api_name'])
444
+ api_positions.append(i)
445
+
446
+ samples = []
447
+ for i in api_positions:
448
+ sample = cls(chat_history[:i], apis, chat_history[i])
449
+ samples.append(sample)
450
+ sample = cls(chat_history[: i + 1], apis, chat_history[i + 1])
451
+ samples.append(sample)
452
+
453
+ return samples
454
+
455
+
456
+ class Evaluator:
457
+ r"""Evaluator for APIBank benchmark."""
458
+
459
+ def __init__(self, samples: List[APIBankSample]):
460
+ # Place holder for import as the import
461
+ # only works after the files have been downloaded
462
+ try:
463
+ from api_bank.tool_manager import ( # type: ignore[import-not-found]
464
+ ToolManager,
465
+ )
466
+ except Exception as e:
467
+ logger.info(f"{e}, Module will be imported after download.")
468
+ self.dataset = samples
469
+ self.sample_ids = list(range(len(self.dataset)))
470
+ os.chdir("api_bank")
471
+ self.tool_manager = ToolManager("apis")
472
+ os.chdir("..")
473
+
474
+ def get_all_sample_ids(self):
475
+ return self.sample_ids
476
+
477
+ def get_api_description(self, api_name):
478
+ return self.tool_manager.get_api_description(api_name)
479
+
480
+ def get_model_input(self, sample_id: int):
481
+ sample = self.dataset[sample_id]
482
+ apis = sample.apis
483
+ chat_history = sample.chat_history
484
+ api_descriptions = []
485
+ for api_name in apis:
486
+ api_descriptions.append(
487
+ self.tool_manager.get_api_description(api_name)
488
+ )
489
+ api_description = '\n'.join(api_descriptions)
490
+ return api_description, chat_history
491
+
492
+ def evaluate(self, sample_id, model_output):
493
+ try:
494
+ from api_bank.api_call_extraction import ( # type: ignore[import-not-found]
495
+ parse_api_call,
496
+ )
497
+ except Exception as e:
498
+ logger.info(f"{e}, Module will be imported after download.")
499
+ sample = self.dataset[sample_id]
500
+ ground_truth = sample.ground_truth
501
+ if ground_truth['role'] == 'API':
502
+ api_name, param_dict = parse_api_call(model_output)
503
+ if api_name != ground_truth['api_name']:
504
+ return False, 'API Name Mismatch: {} vs {}'.format(
505
+ api_name, ground_truth['api_name']
506
+ )
507
+ try:
508
+ result = self.tool_manager.api_call(api_name, **param_dict)
509
+ except Exception as e:
510
+ return False, str(e)
511
+ api = self.tool_manager.init_tool(api_name)
512
+ try:
513
+ correct = api.check_api_call_correctness(
514
+ result, ground_truth['result']
515
+ )
516
+ except KeyError:
517
+ correct = False
518
+ result = 'KeyError' + str(result)
519
+ return correct, result
520
+ elif ground_truth['role'] == 'AI':
521
+ score = calculate_rouge_l_score(ground_truth['text'], model_output)
522
+ return round(score, 4)
523
+
524
+
525
+ API_CALL_PROMPT = '''
526
+ Based on the given API description and the existing \
527
+ conversation history 1..t, please generate the API request \
528
+ that the AI should call in step t+1 and output it in the \
529
+ format of [ApiName(key1='value1', key2='value2', ...)], \
530
+ replace the ApiName with the actual API name, and \
531
+ replace the key and value with the actual parameters. \
532
+ Your output should start with a square bracket "[" \
533
+ and end with a square bracket "]". Do not output any \
534
+ other explanation or prompt or the result of the API call in your output.
535
+ This year is 2023.
536
+ Input:
537
+ User: [User's utterence]
538
+ AI: [AI's utterence]
539
+
540
+ Expected output:
541
+ [ApiName(key1='value1', key2='value2', ...)]
542
+
543
+ API descriptions:
544
+ '''
545
+
546
+ RESPONSE_PROMPT = '''
547
+ Based on the given API description and the existing \
548
+ conversation history 1..t, please generate the next \
549
+ dialog that the AI should response after the API call t.
550
+ This year is 2023.
551
+ Input:
552
+ User: [User's utterence]
553
+ AI: [AI's utterence]
554
+ [ApiName(key1='value1', key2='value2', …)]
555
+
556
+ Expected output:
557
+ AI: [AI's utterence]
558
+
559
+ API descriptions:
560
+ '''