camel-ai 0.1.5.1__py3-none-any.whl → 0.1.5.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (86) hide show
  1. camel/agents/__init__.py +2 -0
  2. camel/agents/chat_agent.py +237 -52
  3. camel/agents/critic_agent.py +6 -9
  4. camel/agents/deductive_reasoner_agent.py +93 -40
  5. camel/agents/embodied_agent.py +6 -9
  6. camel/agents/knowledge_graph_agent.py +49 -27
  7. camel/agents/role_assignment_agent.py +14 -12
  8. camel/agents/search_agent.py +122 -0
  9. camel/agents/task_agent.py +26 -38
  10. camel/bots/__init__.py +20 -0
  11. camel/bots/discord_bot.py +103 -0
  12. camel/bots/telegram_bot.py +84 -0
  13. camel/configs/__init__.py +3 -0
  14. camel/configs/anthropic_config.py +1 -1
  15. camel/configs/litellm_config.py +113 -0
  16. camel/configs/openai_config.py +14 -0
  17. camel/embeddings/__init__.py +2 -0
  18. camel/embeddings/openai_embedding.py +2 -2
  19. camel/embeddings/sentence_transformers_embeddings.py +6 -5
  20. camel/embeddings/vlm_embedding.py +146 -0
  21. camel/functions/__init__.py +9 -0
  22. camel/functions/open_api_function.py +161 -33
  23. camel/functions/open_api_specs/biztoc/__init__.py +13 -0
  24. camel/functions/open_api_specs/biztoc/ai-plugin.json +34 -0
  25. camel/functions/open_api_specs/biztoc/openapi.yaml +21 -0
  26. camel/functions/open_api_specs/create_qr_code/__init__.py +13 -0
  27. camel/functions/open_api_specs/create_qr_code/openapi.yaml +44 -0
  28. camel/functions/open_api_specs/nasa_apod/__init__.py +13 -0
  29. camel/functions/open_api_specs/nasa_apod/openapi.yaml +72 -0
  30. camel/functions/open_api_specs/outschool/__init__.py +13 -0
  31. camel/functions/open_api_specs/outschool/ai-plugin.json +34 -0
  32. camel/functions/open_api_specs/outschool/openapi.yaml +1 -0
  33. camel/functions/open_api_specs/outschool/paths/__init__.py +14 -0
  34. camel/functions/open_api_specs/outschool/paths/get_classes.py +29 -0
  35. camel/functions/open_api_specs/outschool/paths/search_teachers.py +29 -0
  36. camel/functions/open_api_specs/security_config.py +21 -0
  37. camel/functions/open_api_specs/web_scraper/__init__.py +13 -0
  38. camel/functions/open_api_specs/web_scraper/ai-plugin.json +34 -0
  39. camel/functions/open_api_specs/web_scraper/openapi.yaml +71 -0
  40. camel/functions/open_api_specs/web_scraper/paths/__init__.py +13 -0
  41. camel/functions/open_api_specs/web_scraper/paths/scraper.py +29 -0
  42. camel/functions/openai_function.py +3 -1
  43. camel/functions/search_functions.py +104 -171
  44. camel/functions/slack_functions.py +16 -3
  45. camel/human.py +3 -1
  46. camel/loaders/base_io.py +3 -1
  47. camel/loaders/unstructured_io.py +16 -22
  48. camel/messages/base.py +135 -46
  49. camel/models/__init__.py +8 -0
  50. camel/models/anthropic_model.py +24 -16
  51. camel/models/base_model.py +6 -1
  52. camel/models/litellm_model.py +112 -0
  53. camel/models/model_factory.py +44 -16
  54. camel/models/nemotron_model.py +71 -0
  55. camel/models/ollama_model.py +121 -0
  56. camel/models/open_source_model.py +8 -2
  57. camel/models/openai_model.py +14 -5
  58. camel/models/stub_model.py +3 -1
  59. camel/models/zhipuai_model.py +125 -0
  60. camel/prompts/__init__.py +6 -0
  61. camel/prompts/base.py +2 -1
  62. camel/prompts/descripte_video_prompt.py +33 -0
  63. camel/prompts/generate_text_embedding_data.py +79 -0
  64. camel/prompts/task_prompt_template.py +13 -3
  65. camel/retrievers/auto_retriever.py +20 -11
  66. camel/retrievers/base.py +4 -2
  67. camel/retrievers/bm25_retriever.py +2 -1
  68. camel/retrievers/cohere_rerank_retriever.py +2 -1
  69. camel/retrievers/vector_retriever.py +10 -4
  70. camel/societies/babyagi_playing.py +2 -1
  71. camel/societies/role_playing.py +18 -20
  72. camel/storages/graph_storages/base.py +1 -0
  73. camel/storages/graph_storages/neo4j_graph.py +5 -3
  74. camel/storages/vectordb_storages/base.py +2 -1
  75. camel/storages/vectordb_storages/milvus.py +5 -2
  76. camel/toolkits/github_toolkit.py +120 -26
  77. camel/types/__init__.py +5 -2
  78. camel/types/enums.py +95 -4
  79. camel/utils/__init__.py +11 -2
  80. camel/utils/commons.py +78 -4
  81. camel/utils/constants.py +26 -0
  82. camel/utils/token_counting.py +62 -7
  83. {camel_ai-0.1.5.1.dist-info → camel_ai-0.1.5.3.dist-info}/METADATA +82 -53
  84. camel_ai-0.1.5.3.dist-info/RECORD +151 -0
  85. camel_ai-0.1.5.1.dist-info/RECORD +0 -119
  86. {camel_ai-0.1.5.1.dist-info → camel_ai-0.1.5.3.dist-info}/WHEEL +0 -0
camel/agents/__init__.py CHANGED
@@ -17,6 +17,7 @@ from .critic_agent import CriticAgent
17
17
  from .embodied_agent import EmbodiedAgent
18
18
  from .knowledge_graph_agent import KnowledgeGraphAgent
19
19
  from .role_assignment_agent import RoleAssignmentAgent
20
+ from .search_agent import SearchAgent
20
21
  from .task_agent import (
21
22
  TaskCreationAgent,
22
23
  TaskPlannerAgent,
@@ -38,5 +39,6 @@ __all__ = [
38
39
  'HuggingFaceToolAgent',
39
40
  'EmbodiedAgent',
40
41
  'RoleAssignmentAgent',
42
+ 'SearchAgent',
41
43
  'KnowledgeGraphAgent',
42
44
  ]
@@ -32,6 +32,7 @@ from camel.responses import ChatAgentResponse
32
32
  from camel.types import (
33
33
  ChatCompletion,
34
34
  ChatCompletionChunk,
35
+ ModelPlatformType,
35
36
  ModelType,
36
37
  OpenAIBackendRole,
37
38
  RoleType,
@@ -41,7 +42,6 @@ from camel.utils import get_model_encoding
41
42
  if TYPE_CHECKING:
42
43
  from openai import Stream
43
44
 
44
- from camel.configs import BaseConfig
45
45
  from camel.functions import OpenAIFunction
46
46
  from camel.terminators import ResponseTerminator
47
47
 
@@ -80,10 +80,9 @@ class ChatAgent(BaseAgent):
80
80
 
81
81
  Args:
82
82
  system_message (BaseMessage): The system message for the chat agent.
83
- model_type (ModelType, optional): The LLM model to use for generating
84
- responses. (default :obj:`ModelType.GPT_3_5_TURBO`)
85
- model_config (BaseConfig, optional): Configuration options for the
86
- LLM model. (default: :obj:`None`)
83
+ model (BaseModelBackend, optional): The model backend to use for
84
+ generating responses. (default: :obj:`OpenAIModel` with
85
+ `GPT_3_5_TURBO`)
87
86
  api_key (str, optional): The API key for authenticating with the
88
87
  LLM service. Only OpenAI and Anthropic model supported (default:
89
88
  :obj:`None`)
@@ -109,8 +108,7 @@ class ChatAgent(BaseAgent):
109
108
  def __init__(
110
109
  self,
111
110
  system_message: BaseMessage,
112
- model_type: Optional[ModelType] = None,
113
- model_config: Optional[BaseConfig] = None,
111
+ model: Optional[BaseModelBackend] = None,
114
112
  api_key: Optional[str] = None,
115
113
  memory: Optional[AgentMemory] = None,
116
114
  message_window_size: Optional[int] = None,
@@ -123,24 +121,30 @@ class ChatAgent(BaseAgent):
123
121
  self.system_message = system_message
124
122
  self.role_name: str = system_message.role_name
125
123
  self.role_type: RoleType = system_message.role_type
124
+ self._api_key = api_key
125
+ self.model_backend: BaseModelBackend = (
126
+ model
127
+ if model is not None
128
+ else ModelFactory.create(
129
+ model_platform=ModelPlatformType.OPENAI,
130
+ model_type=ModelType.GPT_3_5_TURBO,
131
+ model_config_dict=ChatGPTConfig().__dict__,
132
+ api_key=self._api_key,
133
+ )
134
+ )
126
135
  self.output_language: Optional[str] = output_language
127
136
  if self.output_language is not None:
128
137
  self.set_output_language(self.output_language)
129
138
 
130
- self.model_type: ModelType = (
131
- model_type if model_type is not None else ModelType.GPT_3_5_TURBO
132
- )
139
+ self.model_type: ModelType = self.model_backend.model_type
133
140
 
134
141
  self.func_dict: Dict[str, Callable] = {}
135
142
  if tools is not None:
136
143
  for func in tools:
137
144
  self.func_dict[func.get_function_name()] = func.func
138
145
 
139
- self.model_config = model_config or ChatGPTConfig()
140
- self._api_key = api_key
141
- self.model_backend: BaseModelBackend = ModelFactory.create(
142
- self.model_type, self.model_config.__dict__, self._api_key
143
- )
146
+ self.model_config_dict = self.model_backend.model_config_dict
147
+
144
148
  self.model_token_limit = token_limit or self.model_backend.token_limit
145
149
  context_creator = ScoreBasedContextCreator(
146
150
  self.model_backend.token_counter,
@@ -306,7 +310,7 @@ class ChatAgent(BaseAgent):
306
310
  tool_calls: List[FunctionCallingRecord] = []
307
311
  while True:
308
312
  # Format messages and get the token number
309
- openai_messages: Optional[List[OpenAIMessage]]
313
+ openai_messages: list[OpenAIMessage] | None
310
314
 
311
315
  try:
312
316
  openai_messages, num_tokens = self.memory.get_context()
@@ -314,18 +318,13 @@ class ChatAgent(BaseAgent):
314
318
  return self.step_token_exceed(
315
319
  e.args[1], tool_calls, "max_tokens_exceeded"
316
320
  )
317
-
318
- # Obtain the model's response
319
- response = self.model_backend.run(openai_messages)
320
-
321
- if isinstance(response, ChatCompletion):
322
- output_messages, finish_reasons, usage_dict, response_id = (
323
- self.handle_batch_response(response)
324
- )
325
- else:
326
- output_messages, finish_reasons, usage_dict, response_id = (
327
- self.handle_stream_response(response, num_tokens)
328
- )
321
+ (
322
+ response,
323
+ output_messages,
324
+ finish_reasons,
325
+ usage_dict,
326
+ response_id,
327
+ ) = self._step_model_response(openai_messages, num_tokens)
329
328
 
330
329
  if (
331
330
  self.is_tools_added()
@@ -350,38 +349,165 @@ class ChatAgent(BaseAgent):
350
349
 
351
350
  else:
352
351
  # Function calling disabled or not a function calling
352
+ info = self._step_get_info(
353
+ output_messages,
354
+ finish_reasons,
355
+ usage_dict,
356
+ response_id,
357
+ tool_calls,
358
+ num_tokens,
359
+ )
360
+ break
353
361
 
354
- # Loop over responses terminators, get list of termination
355
- # tuples with whether the terminator terminates the agent
356
- # and termination reason
357
- termination = [
358
- terminator.is_terminated(output_messages)
359
- for terminator in self.response_terminators
360
- ]
361
- # Terminate the agent if any of the terminator terminates
362
- self.terminated, termination_reason = next(
363
- (
364
- (terminated, termination_reason)
365
- for terminated, termination_reason in termination
366
- if terminated
367
- ),
368
- (False, None),
362
+ return ChatAgentResponse(output_messages, self.terminated, info)
363
+
364
+ async def step_async(
365
+ self,
366
+ input_message: BaseMessage,
367
+ ) -> ChatAgentResponse:
368
+ r"""Performs a single step in the chat session by generating a response
369
+ to the input message. This agent step can call async function calls.
370
+
371
+ Args:
372
+ input_message (BaseMessage): The input message to the agent.
373
+ Its `role` field that specifies the role at backend may be either
374
+ `user` or `assistant` but it will be set to `user` anyway since
375
+ for the self agent any incoming message is external.
376
+
377
+ Returns:
378
+ ChatAgentResponse: A struct containing the output messages,
379
+ a boolean indicating whether the chat session has terminated,
380
+ and information about the chat session.
381
+ """
382
+ self.update_memory(input_message, OpenAIBackendRole.USER)
383
+
384
+ output_messages: List[BaseMessage]
385
+ info: Dict[str, Any]
386
+ tool_calls: List[FunctionCallingRecord] = []
387
+ while True:
388
+ # Format messages and get the token number
389
+ openai_messages: list[OpenAIMessage] | None
390
+
391
+ try:
392
+ openai_messages, num_tokens = self.memory.get_context()
393
+ except RuntimeError as e:
394
+ return self.step_token_exceed(
395
+ e.args[1], tool_calls, "max_tokens_exceeded"
369
396
  )
370
- # For now only retain the first termination reason
371
- if self.terminated and termination_reason is not None:
372
- finish_reasons = [termination_reason] * len(finish_reasons)
397
+ (
398
+ response,
399
+ output_messages,
400
+ finish_reasons,
401
+ usage_dict,
402
+ response_id,
403
+ ) = self._step_model_response(openai_messages, num_tokens)
373
404
 
374
- info = self.get_info(
375
- response_id,
376
- usage_dict,
405
+ if (
406
+ self.is_tools_added()
407
+ and isinstance(response, ChatCompletion)
408
+ and response.choices[0].message.tool_calls is not None
409
+ ):
410
+ # Tools added for function calling and not in stream mode
411
+
412
+ # Do function calling
413
+ (
414
+ func_assistant_msg,
415
+ func_result_msg,
416
+ func_record,
417
+ ) = await self.step_tool_call_async(response)
418
+
419
+ # Update the messages
420
+ self.update_memory(
421
+ func_assistant_msg, OpenAIBackendRole.ASSISTANT
422
+ )
423
+ self.update_memory(func_result_msg, OpenAIBackendRole.FUNCTION)
424
+
425
+ # Record the function calling
426
+ tool_calls.append(func_record)
427
+
428
+ else:
429
+ # Function calling disabled or not a function calling
430
+ info = self._step_get_info(
431
+ output_messages,
377
432
  finish_reasons,
378
- num_tokens,
433
+ usage_dict,
434
+ response_id,
379
435
  tool_calls,
436
+ num_tokens,
380
437
  )
381
438
  break
382
439
 
383
440
  return ChatAgentResponse(output_messages, self.terminated, info)
384
441
 
442
+ def _step_model_response(
443
+ self,
444
+ openai_messages: list[OpenAIMessage],
445
+ num_tokens: int,
446
+ ) -> tuple[
447
+ ChatCompletion | Stream[ChatCompletionChunk],
448
+ list[BaseMessage],
449
+ list[str],
450
+ dict[str, int],
451
+ str,
452
+ ]:
453
+ r"""Internal function for agent step model response."""
454
+ # Obtain the model's response
455
+ response = self.model_backend.run(openai_messages)
456
+
457
+ if isinstance(response, ChatCompletion):
458
+ output_messages, finish_reasons, usage_dict, response_id = (
459
+ self.handle_batch_response(response)
460
+ )
461
+ else:
462
+ output_messages, finish_reasons, usage_dict, response_id = (
463
+ self.handle_stream_response(response, num_tokens)
464
+ )
465
+ return (
466
+ response,
467
+ output_messages,
468
+ finish_reasons,
469
+ usage_dict,
470
+ response_id,
471
+ )
472
+
473
+ def _step_get_info(
474
+ self,
475
+ output_messages: List[BaseMessage],
476
+ finish_reasons: List[str],
477
+ usage_dict: Dict[str, int],
478
+ response_id: str,
479
+ tool_calls: List[FunctionCallingRecord],
480
+ num_tokens: int,
481
+ ) -> Dict[str, Any]:
482
+ # Loop over responses terminators, get list of termination
483
+ # tuples with whether the terminator terminates the agent
484
+ # and termination reason
485
+ termination = [
486
+ terminator.is_terminated(output_messages)
487
+ for terminator in self.response_terminators
488
+ ]
489
+ # Terminate the agent if any of the terminator terminates
490
+ self.terminated, termination_reason = next(
491
+ (
492
+ (terminated, termination_reason)
493
+ for terminated, termination_reason in termination
494
+ if terminated
495
+ ),
496
+ (False, None),
497
+ )
498
+ # For now only retain the first termination reason
499
+ if self.terminated and termination_reason is not None:
500
+ finish_reasons = [termination_reason] * len(finish_reasons)
501
+
502
+ info = self.get_info(
503
+ response_id,
504
+ usage_dict,
505
+ finish_reasons,
506
+ num_tokens,
507
+ tool_calls,
508
+ )
509
+ return info
510
+
385
511
  def handle_batch_response(
386
512
  self, response: ChatCompletion
387
513
  ) -> Tuple[List[BaseMessage], List[str], Dict[str, int], str]:
@@ -516,12 +642,12 @@ class ChatAgent(BaseAgent):
516
642
  """
517
643
  choice = response.choices[0]
518
644
  if choice.message.tool_calls is None:
519
- raise RuntimeError("Tool calls is None")
645
+ raise RuntimeError("Tool call is None")
520
646
  func_name = choice.message.tool_calls[0].function.name
521
647
  func = self.func_dict[func_name]
522
648
 
523
649
  args_str: str = choice.message.tool_calls[0].function.arguments
524
- args = json.loads(args_str.replace("'", "\""))
650
+ args = json.loads(args_str)
525
651
 
526
652
  # Pass the extracted arguments to the indicated function
527
653
  try:
@@ -553,6 +679,65 @@ class ChatAgent(BaseAgent):
553
679
  func_record = FunctionCallingRecord(func_name, args, result)
554
680
  return assist_msg, func_msg, func_record
555
681
 
682
+ async def step_tool_call_async(
683
+ self,
684
+ response: ChatCompletion,
685
+ ) -> Tuple[
686
+ FunctionCallingMessage, FunctionCallingMessage, FunctionCallingRecord
687
+ ]:
688
+ r"""Execute the async function with arguments following the model's
689
+ response.
690
+
691
+ Args:
692
+ response (Dict[str, Any]): The response obtained by calling the
693
+ model.
694
+
695
+ Returns:
696
+ tuple: A tuple consisting of two obj:`FunctionCallingMessage`,
697
+ one about the arguments and the other about the execution
698
+ result, and a struct for logging information about this
699
+ function call.
700
+ """
701
+ # Note that when function calling is enabled, `n` is set to 1.
702
+ choice = response.choices[0]
703
+ if choice.message.tool_calls is None:
704
+ raise RuntimeError("Tool call is None")
705
+ func_name = choice.message.tool_calls[0].function.name
706
+ func = self.func_dict[func_name]
707
+
708
+ args_str: str = choice.message.tool_calls[0].function.arguments
709
+ args = json.loads(args_str)
710
+
711
+ # Pass the extracted arguments to the indicated function
712
+ try:
713
+ result = await func(**args)
714
+ except Exception:
715
+ raise ValueError(
716
+ f"Execution of function {func.__name__} failed with "
717
+ f"arguments being {args}."
718
+ )
719
+
720
+ assist_msg = FunctionCallingMessage(
721
+ role_name=self.role_name,
722
+ role_type=self.role_type,
723
+ meta_dict=None,
724
+ content="",
725
+ func_name=func_name,
726
+ args=args,
727
+ )
728
+ func_msg = FunctionCallingMessage(
729
+ role_name=self.role_name,
730
+ role_type=self.role_type,
731
+ meta_dict=None,
732
+ content="",
733
+ func_name=func_name,
734
+ result=result,
735
+ )
736
+
737
+ # Record information about this function call
738
+ func_record = FunctionCallingRecord(func_name, args, result)
739
+ return assist_msg, func_msg, func_record
740
+
556
741
  def get_usage_dict(
557
742
  self, output_messages: List[BaseMessage], prompt_tokens: int
558
743
  ) -> Dict[str, int]:
@@ -20,8 +20,8 @@ from colorama import Fore
20
20
  from camel.agents.chat_agent import ChatAgent
21
21
  from camel.memories import AgentMemory
22
22
  from camel.messages import BaseMessage
23
+ from camel.models import BaseModelBackend
23
24
  from camel.responses import ChatAgentResponse
24
- from camel.types import ModelType
25
25
  from camel.utils import get_first_int, print_text_animated
26
26
 
27
27
 
@@ -31,10 +31,9 @@ class CriticAgent(ChatAgent):
31
31
  Args:
32
32
  system_message (BaseMessage): The system message for the critic
33
33
  agent.
34
- model_type (ModelType, optional): The LLM model to use for generating
35
- responses. (default :obj:`ModelType.GPT_3_5_TURBO`)
36
- model_config (Any, optional): Configuration options for the LLM model.
37
- (default: :obj:`None`)
34
+ model (BaseModelBackend, optional): The model backend to use for
35
+ generating responses. (default: :obj:`OpenAIModel` with
36
+ `GPT_3_5_TURBO`)
38
37
  message_window_size (int, optional): The maximum number of previous
39
38
  messages to include in the context window. If `None`, no windowing
40
39
  is performed. (default: :obj:`6`)
@@ -48,8 +47,7 @@ class CriticAgent(ChatAgent):
48
47
  def __init__(
49
48
  self,
50
49
  system_message: BaseMessage,
51
- model_type: ModelType = ModelType.GPT_3_5_TURBO,
52
- model_config: Optional[Any] = None,
50
+ model: Optional[BaseModelBackend] = None,
53
51
  memory: Optional[AgentMemory] = None,
54
52
  message_window_size: int = 6,
55
53
  retry_attempts: int = 2,
@@ -58,8 +56,7 @@ class CriticAgent(ChatAgent):
58
56
  ) -> None:
59
57
  super().__init__(
60
58
  system_message,
61
- model_type=model_type,
62
- model_config=model_config,
59
+ model=model,
63
60
  memory=memory,
64
61
  message_window_size=message_window_size,
65
62
  )
@@ -15,10 +15,10 @@ import re
15
15
  from typing import Dict, List, Optional, Union
16
16
 
17
17
  from camel.agents.chat_agent import ChatAgent
18
- from camel.configs import BaseConfig
19
18
  from camel.messages import BaseMessage
19
+ from camel.models import BaseModelBackend
20
20
  from camel.prompts import TextPrompt
21
- from camel.types import ModelType, RoleType
21
+ from camel.types import RoleType
22
22
 
23
23
 
24
24
  class DeductiveReasonerAgent(ChatAgent):
@@ -33,16 +33,14 @@ class DeductiveReasonerAgent(ChatAgent):
33
33
  - L represents the path or process from A to B.
34
34
 
35
35
  Args:
36
- model_type (ModelType, optional): The type of model to use for the
37
- agent. (default: :obj: `None`)
38
- model_config (BaseConfig, optional): The configuration for the model.
39
- (default: :obj:`None`)
36
+ model (BaseModelBackend, optional): The model backend to use for
37
+ generating responses. (default: :obj:`OpenAIModel` with
38
+ `GPT_3_5_TURBO`)
40
39
  """
41
40
 
42
41
  def __init__(
43
42
  self,
44
- model_type: Optional[ModelType] = None,
45
- model_config: Optional[BaseConfig] = None,
43
+ model: Optional[BaseModelBackend] = None,
46
44
  ) -> None:
47
45
  system_message = BaseMessage(
48
46
  role_name="Insight Agent",
@@ -50,7 +48,7 @@ class DeductiveReasonerAgent(ChatAgent):
50
48
  meta_dict=None,
51
49
  content="You assign roles based on tasks.",
52
50
  )
53
- super().__init__(system_message, model_type, model_config)
51
+ super().__init__(system_message, model=model)
54
52
 
55
53
  def deduce_conditions_and_quality(
56
54
  self,
@@ -90,52 +88,106 @@ class DeductiveReasonerAgent(ChatAgent):
90
88
  """
91
89
  self.reset()
92
90
 
93
- deduce_prompt = """You are a deductive reasoner. You are tasked to complete the TASK based on the THOUGHT OF DEDUCTIVE REASONING, the STARTING STATE A and the TARGET STATE B. You are given the CONTEXT CONTENT to help you complete the TASK.
94
- Your answer MUST strictly adhere to the structure of ANSWER TEMPLATE, ONLY fill in the BLANKs, and DO NOT alter or modify any other part of the template
91
+ deduce_prompt = """You are a deductive reasoner. You are tasked to
92
+ complete the TASK based on the THOUGHT OF DEDUCTIVE REASONING, the
93
+ STARTING STATE A and the TARGET STATE B. You are given the CONTEXT
94
+ CONTENT to help you complete the TASK.
95
+ Your answer MUST strictly adhere to the structure of ANSWER TEMPLATE, ONLY
96
+ fill in the BLANKs, and DO NOT alter or modify any other part of the template
95
97
 
96
98
  ===== MODELING OF DEDUCTIVE REASONING =====
97
- You are tasked with understanding a mathematical model based on the components ${A, B, C, Q, L}$. In this model: ``L: A ⊕ C -> q * B``.
99
+ You are tasked with understanding a mathematical model based on the components
100
+ ${A, B, C, Q, L}$. In this model: ``L: A ⊕ C -> q * B``.
98
101
  - $A$ represents the known starting state.
99
102
  - $B$ represents the known target state.
100
103
  - $C$ represents the conditions required to transition from $A$ to $B$.
101
- - $Q$ represents the quality or effectiveness of the transition from $A$ to $B$.
104
+ - $Q$ represents the quality or effectiveness of the transition from $A$ to
105
+ $B$.
102
106
  - $L$ represents the path or process from $A$ to $B$.
103
107
 
104
108
  ===== THOUGHT OF DEDUCTIVE REASONING =====
105
109
  1. Define the Parameters of A and B:
106
- - Characterization: Before delving into transitions, thoroughly understand the nature and boundaries of both $A$ and $B$. This includes the type, properties, constraints, and possible interactions between the two.
107
- - Contrast and Compare: Highlight the similarities and differences between $A$ and $B$. This comparative analysis will give an insight into what needs changing and what remains constant.
110
+ - Characterization: Before delving into transitions, thoroughly understand
111
+ the nature and boundaries of both $A$ and $B$. This includes the type,
112
+ properties, constraints, and possible interactions between the two.
113
+ - Contrast and Compare: Highlight the similarities and differences between
114
+ $A$ and $B$. This comparative analysis will give an insight into what
115
+ needs changing and what remains constant.
108
116
  2. Historical & Empirical Analysis:
109
- - Previous Transitions according to the Knowledge Base of GPT: (if applicable) Extract conditions and patterns from the historical instances where a similar transition from a state comparable to $A$ moved towards $B$.
110
- - Scientific Principles: (if applicable) Consider the underlying scientific principles governing or related to the states and their transition. For example, if $A$ and $B$ are physical states, laws of physics might apply.
117
+ - Previous Transitions according to the Knowledge Base of GPT: (if
118
+ applicable) Extract conditions and patterns from the historical instances
119
+ where a similar transition from a state comparable to $A$ moved towards
120
+ $B$.
121
+ - Scientific Principles: (if applicable) Consider the underlying
122
+ scientific principles governing or related to the states and their
123
+ transition. For example, if $A$ and $B$ are physical states, laws of
124
+ physics might apply.
111
125
  3. Logical Deduction of Conditions ($C$):
112
- - Direct Path Analysis: What are the immediate and direct conditions required to move from $A$ to $B$?
113
- - Intermediate States: Are there states between $A$ and $B$ that must be transversed or can be used to make the transition smoother or more efficient? If yes, what is the content?
114
- - Constraints & Limitations: Identify potential barriers or restrictions in moving from $A$ to $B$. These can be external (e.g., environmental factors) or internal (properties of $A$ or $B$).
115
- - Resource and Information Analysis: What resources and information are required for the transition? This could be time, entity, factor, code language, software platform, unknowns, etc.
116
- - External Influences: Consider socio-economic, political, or environmental factors (if applicable) that could influence the transition conditions.
117
- - Creative/Heuristic Reasoning: Open your mind to multiple possible $C$'s, no matter how unconventional they might seem. Utilize analogies, metaphors, or brainstorming techniques to envision possible conditions or paths from $A$ to $B$.
118
- - The conditions $C$ should be multiple but in one sentence. And each condition should be concerned with one aspect/entity.
126
+ - Direct Path Analysis: What are the immediate and direct conditions
127
+ required to move from $A$ to $B$?
128
+ - Intermediate States: Are there states between $A$ and $B$ that must be
129
+ transversed or can be used to make the transition smoother or more
130
+ efficient? If yes, what is the content?
131
+ - Constraints & Limitations: Identify potential barriers or restrictions
132
+ in moving from $A$ to $B$. These can be external (e.g., environmental
133
+ factors) or internal (properties of $A$ or $B$).
134
+ - Resource and Information Analysis: What resources and information are
135
+ required for the transition? This could be time, entity, factor, code
136
+ language, software platform, unknowns, etc.
137
+ - External Influences: Consider socio-economic, political, or
138
+ environmental factors (if applicable) that could influence the transition
139
+ conditions.
140
+ - Creative/Heuristic Reasoning: Open your mind to multiple possible $C$'s,
141
+ no matter how unconventional they might seem. Utilize analogies,
142
+ metaphors, or brainstorming techniques to envision possible conditions or
143
+ paths from $A$ to $B$.
144
+ - The conditions $C$ should be multiple but in one sentence. And each
145
+ condition should be concerned with one aspect/entity.
119
146
  4. Entity/Label Recognition of Conditions ($C$):
120
- - Identify and categorize entities of Conditions ($C$) such as the names, locations, dates, specific technical terms or contextual parameters that might be associated with events, innovations post-2022.
121
- - The output of the entities/labels will be used as tags or labels for semantic similarity searches. The entities/labels may be the words, or phrases, each of them should contain valuable, high information entropy information, and should be independent.
122
- - Ensure that the identified entities are formatted in a manner suitable for database indexing and retrieval. Organize the entities into categories, and combine the category with its instance into a continuous phrase, without using colons or other separators.
123
- - Format these entities for database indexing: output the category rather than its instance/content into a continuous phrase. For example, instead of "Jan. 02", identify it as "Event time".
147
+ - Identify and categorize entities of Conditions ($C$) such as the names,
148
+ locations, dates, specific technical terms or contextual parameters that
149
+ might be associated with events, innovations post-2022.
150
+ - The output of the entities/labels will be used as tags or labels for
151
+ semantic similarity searches. The entities/labels may be the words, or
152
+ phrases, each of them should contain valuable, high information entropy
153
+ information, and should be independent.
154
+ - Ensure that the identified entities are formatted in a manner suitable
155
+ for database indexing and retrieval. Organize the entities into
156
+ categories, and combine the category with its instance into a continuous
157
+ phrase, without using colons or other separators.
158
+ - Format these entities for database indexing: output the category rather
159
+ than its instance/content into a continuous phrase. For example, instead
160
+ of "Jan. 02", identify it as "Event time".
124
161
  5. Quality Assessment ($Q$):
125
- - Efficiency: How efficient is the transition from $A$ to $B$, which measures the resources used versus the desired outcome?
126
- - Effectiveness: Did the transition achieve the desired outcome or was the target state achieved as intended?
127
- - Safety & Risks: Assess any risks associated with the transition and the measures to mitigate them.
128
- - Feedback Mechanisms: Incorporate feedback loops to continuously monitor and adjust the quality of transition, making it more adaptive.
162
+ - Efficiency: How efficient is the transition from $A$ to $B$, which
163
+ measures the resources used versus the desired outcome?
164
+ - Effectiveness: Did the transition achieve the desired outcome or was the
165
+ target state achieved as intended?
166
+ - Safety & Risks: Assess any risks associated with the transition and the
167
+ measures to mitigate them.
168
+ - Feedback Mechanisms: Incorporate feedback loops to continuously monitor
169
+ and adjust the quality of transition, making it more adaptive.
129
170
  6. Iterative Evaluation:
130
- - Test & Refine: Based on the initially deduced conditions and assessed quality, iterate the process to refine and optimize the transition. This might involve tweaking conditions, employing different paths, or changing resources.
131
- - Feedback Integration: Use feedback to make improvements and increase the quality of the transition.
132
- 7. Real-world scenarios often present challenges that may not be captured by models and frameworks. While using the model, maintain an adaptive mindset:
133
- - Scenario Exploration: Continuously imagine various possible scenarios, both positive and negative, to prepare for unexpected events.
134
- - Flexibility: Be prepared to modify conditions ($C$) or alter the path/process ($L$) if unforeseen challenges arise.
135
- - Feedback Integration: Rapidly integrate feedback from actual implementations to adjust the model's application, ensuring relevancy and effectiveness.
171
+ - Test & Refine: Based on the initially deduced conditions and assessed
172
+ quality, iterate the process to refine and optimize the transition. This
173
+ might involve tweaking conditions, employing different paths, or changing
174
+ resources.
175
+ - Feedback Integration: Use feedback to make improvements and increase the
176
+ quality of the transition.
177
+ 7. Real-world scenarios often present challenges that may not be captured by
178
+ models and frameworks. While using the model, maintain an adaptive mindset:
179
+ - Scenario Exploration: Continuously imagine various possible scenarios,
180
+ both positive and negative, to prepare for unexpected events.
181
+ - Flexibility: Be prepared to modify conditions ($C$) or alter the path/
182
+ process ($L$) if unforeseen challenges arise.
183
+ - Feedback Integration: Rapidly integrate feedback from actual
184
+ implementations to adjust the model's application, ensuring relevancy and
185
+ effectiveness.
136
186
 
137
187
  ===== TASK =====
138
- Given the starting state $A$ and the target state $B$, assuming that a path $L$ always exists between $A$ and $B$, how can one deduce or identify the necessary conditions $C$ and the quality $Q$ of the transition?
188
+ Given the starting state $A$ and the target state $B$, assuming that a path
189
+ $L$ always exists between $A$ and $B$, how can one deduce or identify the
190
+ necessary conditions $C$ and the quality $Q$ of the transition?
139
191
 
140
192
  ===== STARTING STATE $A$ =====
141
193
  {starting_state}
@@ -150,7 +202,8 @@ Given the starting state $A$ and the target state $B$, assuming that a path $L$
150
202
  - Logical Deduction of Conditions ($C$) (multiple conditions can be deduced):
151
203
  condition <NUM>:
152
204
  <BLANK>.
153
- - Entity/Label Recognition of Conditions:\n[<BLANK>, <BLANK>, ...] (include square brackets)
205
+ - Entity/Label Recognition of Conditions:\n[<BLANK>, <BLANK>, ...] (include
206
+ square brackets)
154
207
  - Quality Assessment ($Q$) (do not use symbols):
155
208
  <BLANK>.
156
209
  - Iterative Evaluation:\n<BLANK>/None"""