camel-ai 0.2.20a0__py3-none-any.whl → 0.2.21__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +2 -3
- camel/agents/knowledge_graph_agent.py +1 -5
- camel/benchmarks/apibench.py +1 -5
- camel/benchmarks/nexus.py +1 -5
- camel/benchmarks/ragbench.py +2 -2
- camel/bots/telegram_bot.py +1 -5
- camel/configs/__init__.py +3 -0
- camel/configs/aiml_config.py +80 -0
- camel/datagen/__init__.py +3 -1
- camel/datagen/self_improving_cot.py +821 -0
- camel/interpreters/subprocess_interpreter.py +72 -6
- camel/models/__init__.py +2 -0
- camel/models/aiml_model.py +147 -0
- camel/models/model_factory.py +3 -0
- camel/models/siliconflow_model.py +1 -1
- camel/societies/workforce/role_playing_worker.py +2 -4
- camel/societies/workforce/single_agent_worker.py +1 -6
- camel/societies/workforce/workforce.py +3 -9
- camel/toolkits/__init__.py +2 -0
- camel/toolkits/reddit_toolkit.py +8 -38
- camel/toolkits/sympy_toolkit.py +778 -0
- camel/toolkits/whatsapp_toolkit.py +11 -32
- camel/types/enums.py +29 -1
- camel/utils/__init__.py +7 -2
- camel/utils/commons.py +198 -21
- camel/utils/deduplication.py +232 -0
- camel/utils/token_counting.py +0 -38
- {camel_ai-0.2.20a0.dist-info → camel_ai-0.2.21.dist-info}/METADATA +10 -12
- {camel_ai-0.2.20a0.dist-info → camel_ai-0.2.21.dist-info}/RECORD +33 -28
- /camel/datagen/{cotdatagen.py → cot_datagen.py} +0 -0
- {camel_ai-0.2.20a0.dist-info → camel_ai-0.2.21.dist-info}/LICENSE +0 -0
- {camel_ai-0.2.20a0.dist-info → camel_ai-0.2.21.dist-info}/WHEEL +0 -0
|
@@ -19,7 +19,7 @@ import requests
|
|
|
19
19
|
|
|
20
20
|
from camel.toolkits import FunctionTool
|
|
21
21
|
from camel.toolkits.base import BaseToolkit
|
|
22
|
-
from camel.utils
|
|
22
|
+
from camel.utils import retry_on_error
|
|
23
23
|
|
|
24
24
|
|
|
25
25
|
class WhatsAppToolkit(BaseToolkit):
|
|
@@ -36,18 +36,8 @@ class WhatsAppToolkit(BaseToolkit):
|
|
|
36
36
|
version (str): API version.
|
|
37
37
|
"""
|
|
38
38
|
|
|
39
|
-
def __init__(self
|
|
40
|
-
r"""Initializes the WhatsAppToolkit
|
|
41
|
-
retries and delay.
|
|
42
|
-
|
|
43
|
-
Args:
|
|
44
|
-
retries (int): Number of times to retry the request in case of
|
|
45
|
-
failure. (default: :obj:`3`)
|
|
46
|
-
delay (int): Time in seconds to wait between retries.
|
|
47
|
-
(default: :obj:`1`)
|
|
48
|
-
"""
|
|
49
|
-
self.retries = retries
|
|
50
|
-
self.delay = delay
|
|
39
|
+
def __init__(self):
|
|
40
|
+
r"""Initializes the WhatsAppToolkit."""
|
|
51
41
|
self.base_url = "https://graph.facebook.com"
|
|
52
42
|
self.version = "v17.0"
|
|
53
43
|
|
|
@@ -61,6 +51,7 @@ class WhatsAppToolkit(BaseToolkit):
|
|
|
61
51
|
"WHATSAPP_PHONE_NUMBER_ID environment variables."
|
|
62
52
|
)
|
|
63
53
|
|
|
54
|
+
@retry_on_error()
|
|
64
55
|
def send_message(
|
|
65
56
|
self, to: str, message: str
|
|
66
57
|
) -> Union[Dict[str, Any], str]:
|
|
@@ -88,19 +79,15 @@ class WhatsAppToolkit(BaseToolkit):
|
|
|
88
79
|
}
|
|
89
80
|
|
|
90
81
|
try:
|
|
91
|
-
response =
|
|
92
|
-
requests.post,
|
|
93
|
-
retries=self.retries,
|
|
94
|
-
delay=self.delay,
|
|
95
|
-
url=url,
|
|
96
|
-
headers=headers,
|
|
97
|
-
json=data,
|
|
98
|
-
)
|
|
82
|
+
response = requests.post(url=url, headers=headers, json=data)
|
|
99
83
|
response.raise_for_status()
|
|
100
84
|
return response.json()
|
|
85
|
+
except requests.exceptions.RequestException as e:
|
|
86
|
+
raise e
|
|
101
87
|
except Exception as e:
|
|
102
88
|
return f"Failed to send message: {e!s}"
|
|
103
89
|
|
|
90
|
+
@retry_on_error()
|
|
104
91
|
def get_message_templates(self) -> Union[List[Dict[str, Any]], str]:
|
|
105
92
|
r"""Retrieves all message templates for the WhatsApp Business account.
|
|
106
93
|
|
|
@@ -116,18 +103,13 @@ class WhatsAppToolkit(BaseToolkit):
|
|
|
116
103
|
headers = {"Authorization": f"Bearer {self.access_token}"}
|
|
117
104
|
|
|
118
105
|
try:
|
|
119
|
-
response =
|
|
120
|
-
requests.get,
|
|
121
|
-
retries=self.retries,
|
|
122
|
-
delay=self.delay,
|
|
123
|
-
url=url,
|
|
124
|
-
headers=headers,
|
|
125
|
-
)
|
|
106
|
+
response = requests.get(url=url, headers=headers)
|
|
126
107
|
response.raise_for_status()
|
|
127
108
|
return response.json().get("data", [])
|
|
128
109
|
except Exception as e:
|
|
129
110
|
return f"Failed to retrieve message templates: {e!s}"
|
|
130
111
|
|
|
112
|
+
@retry_on_error()
|
|
131
113
|
def get_business_profile(self) -> Union[Dict[str, Any], str]:
|
|
132
114
|
r"""Retrieves the WhatsApp Business profile information.
|
|
133
115
|
|
|
@@ -149,10 +131,7 @@ class WhatsAppToolkit(BaseToolkit):
|
|
|
149
131
|
}
|
|
150
132
|
|
|
151
133
|
try:
|
|
152
|
-
response =
|
|
153
|
-
requests.get,
|
|
154
|
-
retries=self.retries,
|
|
155
|
-
delay=self.delay,
|
|
134
|
+
response = requests.get(
|
|
156
135
|
url=url,
|
|
157
136
|
headers=headers,
|
|
158
137
|
params=params,
|
camel/types/enums.py
CHANGED
|
@@ -163,6 +163,7 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
163
163
|
QWEN_2_5_32B = "qwen2.5-32b-instruct"
|
|
164
164
|
QWEN_2_5_14B = "qwen2.5-14b-instruct"
|
|
165
165
|
QWEN_QWQ_32B = "qwq-32b-preview"
|
|
166
|
+
QWEN_QVQ_72B = "qvq-72b-preview"
|
|
166
167
|
|
|
167
168
|
# Yi models (01-ai)
|
|
168
169
|
YI_LIGHTNING = "yi-lightning"
|
|
@@ -203,6 +204,10 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
203
204
|
SILICONFLOW_THUDM_GLM_4_9B_CHAT = "THUDM/glm-4-9b-chat"
|
|
204
205
|
SILICONFLOW_PRO_THUDM_GLM_4_9B_CHAT = "Pro/THUDM/glm-4-9b-chat"
|
|
205
206
|
|
|
207
|
+
# AIML models support tool calling
|
|
208
|
+
AIML_MIXTRAL_8X7B = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
|
209
|
+
AIML_MISTRAL_7B_INSTRUCT = "mistralai/Mistral-7B-Instruct-v0.1"
|
|
210
|
+
|
|
206
211
|
def __str__(self):
|
|
207
212
|
return self.value
|
|
208
213
|
|
|
@@ -217,7 +222,11 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
217
222
|
|
|
218
223
|
@property
|
|
219
224
|
def support_native_structured_output(self) -> bool:
|
|
220
|
-
return
|
|
225
|
+
return any(
|
|
226
|
+
[
|
|
227
|
+
self.is_openai,
|
|
228
|
+
]
|
|
229
|
+
)
|
|
221
230
|
|
|
222
231
|
@property
|
|
223
232
|
def support_native_tool_calling(self) -> bool:
|
|
@@ -236,6 +245,8 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
236
245
|
self.is_sglang,
|
|
237
246
|
self.is_moonshot,
|
|
238
247
|
self.is_siliconflow,
|
|
248
|
+
self.is_zhipuai,
|
|
249
|
+
self.is_aiml,
|
|
239
250
|
]
|
|
240
251
|
)
|
|
241
252
|
|
|
@@ -453,6 +464,7 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
453
464
|
ModelType.QWEN_2_5_32B,
|
|
454
465
|
ModelType.QWEN_2_5_14B,
|
|
455
466
|
ModelType.QWEN_QWQ_32B,
|
|
467
|
+
ModelType.QWEN_QVQ_72B,
|
|
456
468
|
}
|
|
457
469
|
|
|
458
470
|
@property
|
|
@@ -510,6 +522,13 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
510
522
|
ModelType.SILICONFLOW_PRO_THUDM_GLM_4_9B_CHAT,
|
|
511
523
|
}
|
|
512
524
|
|
|
525
|
+
@property
|
|
526
|
+
def is_aiml(self) -> bool:
|
|
527
|
+
return self in {
|
|
528
|
+
ModelType.AIML_MIXTRAL_8X7B,
|
|
529
|
+
ModelType.AIML_MISTRAL_7B_INSTRUCT,
|
|
530
|
+
}
|
|
531
|
+
|
|
513
532
|
@property
|
|
514
533
|
def token_limit(self) -> int:
|
|
515
534
|
r"""Returns the maximum token limit for a given model.
|
|
@@ -575,6 +594,7 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
575
594
|
ModelType.NVIDIA_MISTRAL_LARGE,
|
|
576
595
|
ModelType.NVIDIA_MIXTRAL_8X7B,
|
|
577
596
|
ModelType.QWEN_QWQ_32B,
|
|
597
|
+
ModelType.QWEN_QVQ_72B,
|
|
578
598
|
ModelType.INTERNLM3_8B_INSTRUCT,
|
|
579
599
|
ModelType.INTERNLM3_LATEST,
|
|
580
600
|
ModelType.INTERNLM2_5_LATEST,
|
|
@@ -582,6 +602,8 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
582
602
|
ModelType.TOGETHER_MIXTRAL_8_7B,
|
|
583
603
|
ModelType.SGLANG_MISTRAL_7B,
|
|
584
604
|
ModelType.MOONSHOT_V1_32K,
|
|
605
|
+
ModelType.AIML_MIXTRAL_8X7B,
|
|
606
|
+
ModelType.AIML_MISTRAL_7B_INSTRUCT,
|
|
585
607
|
}:
|
|
586
608
|
return 32_768
|
|
587
609
|
elif self in {
|
|
@@ -860,6 +882,7 @@ class ModelPlatformType(Enum):
|
|
|
860
882
|
INTERNLM = "internlm"
|
|
861
883
|
MOONSHOT = "moonshot"
|
|
862
884
|
SILICONFLOW = "siliconflow"
|
|
885
|
+
AIML = "aiml"
|
|
863
886
|
|
|
864
887
|
@property
|
|
865
888
|
def is_openai(self) -> bool:
|
|
@@ -977,6 +1000,11 @@ class ModelPlatformType(Enum):
|
|
|
977
1000
|
r"""Returns whether this platform is SiliconFlow."""
|
|
978
1001
|
return self is ModelPlatformType.SILICONFLOW
|
|
979
1002
|
|
|
1003
|
+
@property
|
|
1004
|
+
def is_aiml(self) -> bool:
|
|
1005
|
+
r"""Returns whether this platform is AIML."""
|
|
1006
|
+
return self is ModelPlatformType.AIML
|
|
1007
|
+
|
|
980
1008
|
|
|
981
1009
|
class AudioModelType(Enum):
|
|
982
1010
|
TTS_1 = "tts-1"
|
camel/utils/__init__.py
CHANGED
|
@@ -14,6 +14,7 @@
|
|
|
14
14
|
|
|
15
15
|
from .commons import (
|
|
16
16
|
AgentOpsMeta,
|
|
17
|
+
BatchProcessor,
|
|
17
18
|
agentops_decorator,
|
|
18
19
|
api_keys_required,
|
|
19
20
|
check_server_running,
|
|
@@ -33,16 +34,17 @@ from .commons import (
|
|
|
33
34
|
is_docker_running,
|
|
34
35
|
json_to_function_code,
|
|
35
36
|
print_text_animated,
|
|
37
|
+
retry_on_error,
|
|
36
38
|
text_extract_from_web,
|
|
37
39
|
to_pascal,
|
|
38
40
|
track_agent,
|
|
39
41
|
)
|
|
40
42
|
from .constants import Constants
|
|
43
|
+
from .deduplication import DeduplicationResult, deduplicate_internally
|
|
41
44
|
from .response_format import get_pydantic_model
|
|
42
45
|
from .token_counting import (
|
|
43
46
|
AnthropicTokenCounter,
|
|
44
47
|
BaseTokenCounter,
|
|
45
|
-
GeminiTokenCounter,
|
|
46
48
|
LiteLLMTokenCounter,
|
|
47
49
|
MistralTokenCounter,
|
|
48
50
|
OpenAITokenCounter,
|
|
@@ -69,7 +71,6 @@ __all__ = [
|
|
|
69
71
|
"dependencies_required",
|
|
70
72
|
"api_keys_required",
|
|
71
73
|
"is_docker_running",
|
|
72
|
-
"GeminiTokenCounter",
|
|
73
74
|
"MistralTokenCounter",
|
|
74
75
|
"get_pydantic_major_version",
|
|
75
76
|
"get_pydantic_object_schema",
|
|
@@ -82,4 +83,8 @@ __all__ = [
|
|
|
82
83
|
"get_pydantic_model",
|
|
83
84
|
"download_github_subdirectory",
|
|
84
85
|
"generate_prompt_for_structured_output",
|
|
86
|
+
"deduplicate_internally",
|
|
87
|
+
"DeduplicationResult",
|
|
88
|
+
"retry_on_error",
|
|
89
|
+
"BatchProcessor",
|
|
85
90
|
]
|
camel/utils/commons.py
CHANGED
|
@@ -11,7 +11,9 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
import functools
|
|
14
15
|
import importlib
|
|
16
|
+
import logging
|
|
15
17
|
import os
|
|
16
18
|
import platform
|
|
17
19
|
import re
|
|
@@ -47,6 +49,8 @@ from .constants import Constants
|
|
|
47
49
|
|
|
48
50
|
F = TypeVar('F', bound=Callable[..., Any])
|
|
49
51
|
|
|
52
|
+
logger = logging.getLogger(__name__)
|
|
53
|
+
|
|
50
54
|
|
|
51
55
|
def print_text_animated(text, delay: float = 0.02, end: str = ""):
|
|
52
56
|
r"""Prints the given text with an animated effect.
|
|
@@ -620,33 +624,206 @@ def handle_http_error(response: requests.Response) -> str:
|
|
|
620
624
|
return "HTTP Error"
|
|
621
625
|
|
|
622
626
|
|
|
623
|
-
def
|
|
624
|
-
|
|
625
|
-
) ->
|
|
626
|
-
r"""
|
|
627
|
+
def retry_on_error(
|
|
628
|
+
max_retries: int = 3, initial_delay: float = 1.0
|
|
629
|
+
) -> Callable:
|
|
630
|
+
r"""Decorator to retry function calls on exception with exponential
|
|
631
|
+
backoff.
|
|
627
632
|
|
|
628
633
|
Args:
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
delay (int): Delay between retries in seconds. (default: :obj:`1`)
|
|
632
|
-
*args: Arguments to pass to the function.
|
|
633
|
-
**kwargs: Keyword arguments to pass to the function.
|
|
634
|
+
max_retries (int): Maximum number of retry attempts
|
|
635
|
+
initial_delay (float): Initial delay between retries in seconds
|
|
634
636
|
|
|
635
637
|
Returns:
|
|
636
|
-
|
|
638
|
+
Callable: Decorated function with retry logic
|
|
639
|
+
"""
|
|
637
640
|
|
|
638
|
-
|
|
639
|
-
|
|
641
|
+
def decorator(func: Callable) -> Callable:
|
|
642
|
+
@functools.wraps(func)
|
|
643
|
+
def wrapper(*args, **kwargs):
|
|
644
|
+
delay = initial_delay
|
|
645
|
+
last_exception = None
|
|
646
|
+
|
|
647
|
+
for attempt in range(max_retries + 1):
|
|
648
|
+
try:
|
|
649
|
+
return func(*args, **kwargs)
|
|
650
|
+
except Exception as e:
|
|
651
|
+
last_exception = e
|
|
652
|
+
if attempt == max_retries:
|
|
653
|
+
logger.error(
|
|
654
|
+
f"Failed after {max_retries} retries: {e!s}"
|
|
655
|
+
)
|
|
656
|
+
raise
|
|
657
|
+
|
|
658
|
+
logger.warning(
|
|
659
|
+
f"Attempt {attempt + 1} failed: {e!s}. "
|
|
660
|
+
f"Retrying in {delay:.1f}s..."
|
|
661
|
+
)
|
|
662
|
+
time.sleep(delay)
|
|
663
|
+
delay *= 2 # Exponential backoff
|
|
664
|
+
|
|
665
|
+
raise last_exception
|
|
666
|
+
|
|
667
|
+
return wrapper
|
|
668
|
+
|
|
669
|
+
return decorator
|
|
670
|
+
|
|
671
|
+
|
|
672
|
+
class BatchProcessor:
|
|
673
|
+
r"""Handles batch processing with dynamic sizing and error handling based
|
|
674
|
+
on system load.
|
|
640
675
|
"""
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
676
|
+
|
|
677
|
+
def __init__(
|
|
678
|
+
self,
|
|
679
|
+
max_workers: Optional[int] = None,
|
|
680
|
+
initial_batch_size: Optional[int] = None,
|
|
681
|
+
monitoring_interval: float = 5.0,
|
|
682
|
+
cpu_threshold: float = 80.0,
|
|
683
|
+
memory_threshold: float = 85.0,
|
|
684
|
+
):
|
|
685
|
+
r"""Initialize the BatchProcessor with dynamic worker allocation.
|
|
686
|
+
|
|
687
|
+
Args:
|
|
688
|
+
max_workers: Maximum number of workers. If None, will be
|
|
689
|
+
determined dynamically based on system resources.
|
|
690
|
+
(default: :obj:`None`)
|
|
691
|
+
initial_batch_size: Initial size of each batch. If `None`,
|
|
692
|
+
defaults to `10`. (default: :obj:`None`)
|
|
693
|
+
monitoring_interval: Interval in seconds between resource checks.
|
|
694
|
+
(default: :obj:`5.0`)
|
|
695
|
+
cpu_threshold: CPU usage percentage threshold for scaling down.
|
|
696
|
+
(default: :obj:`80.0`)
|
|
697
|
+
memory_threshold: Memory usage percentage threshold for scaling
|
|
698
|
+
down. (default: :obj:`85.0`)
|
|
699
|
+
"""
|
|
700
|
+
import psutil
|
|
701
|
+
|
|
702
|
+
self.monitoring_interval = monitoring_interval
|
|
703
|
+
self.cpu_threshold = cpu_threshold
|
|
704
|
+
self.memory_threshold = memory_threshold
|
|
705
|
+
self.last_check_time = time.time()
|
|
706
|
+
self.psutil = psutil
|
|
707
|
+
|
|
708
|
+
# Initialize performance metrics
|
|
709
|
+
self.total_processed = 0
|
|
710
|
+
self.total_errors = 0
|
|
711
|
+
self.processing_times: List = []
|
|
712
|
+
|
|
713
|
+
if max_workers is None:
|
|
714
|
+
self.max_workers = self._calculate_optimal_workers()
|
|
715
|
+
else:
|
|
716
|
+
self.max_workers = max_workers
|
|
717
|
+
|
|
718
|
+
self.batch_size = (
|
|
719
|
+
10 if initial_batch_size is None else initial_batch_size
|
|
720
|
+
)
|
|
721
|
+
self.min_batch_size = 1
|
|
722
|
+
self.max_batch_size = 20
|
|
723
|
+
self.backoff_factor = 0.8
|
|
724
|
+
self.success_factor = 1.2
|
|
725
|
+
|
|
726
|
+
# Initial resource check
|
|
727
|
+
self._update_resource_metrics()
|
|
728
|
+
|
|
729
|
+
def _calculate_optimal_workers(self) -> int:
|
|
730
|
+
r"""Calculate optimal number of workers based on system resources."""
|
|
731
|
+
cpu_count = self.psutil.cpu_count()
|
|
732
|
+
cpu_percent = self.psutil.cpu_percent(interval=1)
|
|
733
|
+
memory = self.psutil.virtual_memory()
|
|
734
|
+
|
|
735
|
+
# Base number of workers on CPU count and current load
|
|
736
|
+
if cpu_percent > self.cpu_threshold:
|
|
737
|
+
workers = max(1, cpu_count // 4)
|
|
738
|
+
elif cpu_percent > 60:
|
|
739
|
+
workers = max(1, cpu_count // 2)
|
|
740
|
+
else:
|
|
741
|
+
workers = max(1, cpu_count - 1)
|
|
742
|
+
|
|
743
|
+
# Further reduce if memory is constrained
|
|
744
|
+
if memory.percent > self.memory_threshold:
|
|
745
|
+
workers = max(1, workers // 2)
|
|
746
|
+
|
|
747
|
+
return workers
|
|
748
|
+
|
|
749
|
+
def _update_resource_metrics(self) -> None:
|
|
750
|
+
r"""Update current resource usage metrics."""
|
|
751
|
+
self.current_cpu = self.psutil.cpu_percent()
|
|
752
|
+
self.current_memory = self.psutil.virtual_memory().percent
|
|
753
|
+
self.last_check_time = time.time()
|
|
754
|
+
|
|
755
|
+
def _should_check_resources(self) -> bool:
|
|
756
|
+
r"""Determine if it's time to check resource usage again."""
|
|
757
|
+
return time.time() - self.last_check_time >= self.monitoring_interval
|
|
758
|
+
|
|
759
|
+
def adjust_batch_size(
|
|
760
|
+
self, success: bool, processing_time: Optional[float] = None
|
|
761
|
+
) -> None:
|
|
762
|
+
r"""Adjust batch size based on success/failure and system resources.
|
|
763
|
+
|
|
764
|
+
Args:
|
|
765
|
+
success (bool): Whether the last batch completed successfully
|
|
766
|
+
processing_time (Optional[float]): Time taken to process the last
|
|
767
|
+
batch. (default: :obj:`None`)
|
|
768
|
+
"""
|
|
769
|
+
# Update metrics
|
|
770
|
+
self.total_processed += 1
|
|
771
|
+
if not success:
|
|
772
|
+
self.total_errors += 1
|
|
773
|
+
if processing_time is not None:
|
|
774
|
+
self.processing_times.append(processing_time)
|
|
775
|
+
|
|
776
|
+
# Check system resources if interval has elapsed
|
|
777
|
+
if self._should_check_resources():
|
|
778
|
+
self._update_resource_metrics()
|
|
779
|
+
|
|
780
|
+
# Adjust based on resource usage
|
|
781
|
+
if (
|
|
782
|
+
self.current_cpu > self.cpu_threshold
|
|
783
|
+
or self.current_memory > self.memory_threshold
|
|
784
|
+
):
|
|
785
|
+
self.batch_size = max(
|
|
786
|
+
int(self.batch_size * self.backoff_factor),
|
|
787
|
+
self.min_batch_size,
|
|
788
|
+
)
|
|
789
|
+
self.max_workers = max(1, self.max_workers - 1)
|
|
790
|
+
return
|
|
791
|
+
|
|
792
|
+
# Adjust based on success/failure
|
|
793
|
+
if success:
|
|
794
|
+
self.batch_size = min(
|
|
795
|
+
int(self.batch_size * self.success_factor), self.max_batch_size
|
|
796
|
+
)
|
|
797
|
+
else:
|
|
798
|
+
self.batch_size = max(
|
|
799
|
+
int(self.batch_size * self.backoff_factor), self.min_batch_size
|
|
800
|
+
)
|
|
801
|
+
|
|
802
|
+
def get_performance_metrics(self) -> Dict[str, Any]:
|
|
803
|
+
r"""Get current performance metrics.
|
|
804
|
+
|
|
805
|
+
Returns:
|
|
806
|
+
Dict containing performance metrics including:
|
|
807
|
+
- total_processed: Total number of batches processed
|
|
808
|
+
- error_rate: Percentage of failed batches
|
|
809
|
+
- avg_processing_time: Average time per batch
|
|
810
|
+
- current_batch_size: Current batch size
|
|
811
|
+
- current_workers: Current number of workers
|
|
812
|
+
- current_cpu: Current CPU usage percentage
|
|
813
|
+
- current_memory: Current memory usage percentage
|
|
814
|
+
"""
|
|
815
|
+
metrics = {
|
|
816
|
+
"total_processed": self.total_processed,
|
|
817
|
+
"error_rate": (self.total_errors / max(1, self.total_processed))
|
|
818
|
+
* 100,
|
|
819
|
+
"avg_processing_time": sum(self.processing_times)
|
|
820
|
+
/ max(1, len(self.processing_times)),
|
|
821
|
+
"current_batch_size": self.batch_size,
|
|
822
|
+
"current_workers": self.max_workers,
|
|
823
|
+
"current_cpu": self.current_cpu,
|
|
824
|
+
"current_memory": self.current_memory,
|
|
825
|
+
}
|
|
826
|
+
return metrics
|
|
650
827
|
|
|
651
828
|
|
|
652
829
|
def download_github_subdirectory(
|