agentle 0.9.24__py3-none-any.whl → 0.9.26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -43,6 +43,7 @@ from agentle.agents.whatsapp.providers.base.whatsapp_provider import WhatsAppPro
43
43
  from agentle.agents.whatsapp.providers.evolution.evolution_api_provider import (
44
44
  EvolutionAPIProvider,
45
45
  )
46
+ from agentle.agents.whatsapp.human_delay_calculator import HumanDelayCalculator
46
47
  from agentle.generations.models.message_parts.file import FilePart
47
48
  from agentle.generations.models.message_parts.text import TextPart
48
49
  from agentle.generations.models.message_parts.tool_execution_suggestion import (
@@ -140,8 +141,6 @@ class WhatsAppBot(BaseModel):
140
141
  file_storage_manager: FileStorageManager | None = Field(default=None)
141
142
  config: WhatsAppBotConfig = Field(default_factory=WhatsAppBotConfig)
142
143
 
143
- # REMOVED: context_manager field - no longer needed
144
-
145
144
  _running: bool = PrivateAttr(default=False)
146
145
  _webhook_handlers: MutableSequence[Callable[..., Any]] = PrivateAttr(
147
146
  default_factory=list
@@ -156,6 +155,7 @@ class WhatsAppBot(BaseModel):
156
155
  _response_callbacks: MutableSequence[CallbackWithContext] = PrivateAttr(
157
156
  default_factory=list
158
157
  )
158
+ _delay_calculator: HumanDelayCalculator | None = PrivateAttr(default=None)
159
159
 
160
160
  model_config = ConfigDict(arbitrary_types_allowed=True)
161
161
 
@@ -167,6 +167,55 @@ class WhatsAppBot(BaseModel):
167
167
  + "Please set agent.conversation_store before creating WhatsAppBot."
168
168
  )
169
169
 
170
+ # Log configuration validation
171
+ validation_issues = self.config.validate_config()
172
+ if validation_issues:
173
+ logger.warning(
174
+ f"[CONFIG_VALIDATION] Configuration has {len(validation_issues)} validation issue(s):"
175
+ )
176
+ for issue in validation_issues:
177
+ logger.warning(f"[CONFIG_VALIDATION] - {issue}")
178
+ else:
179
+ logger.info("[CONFIG_VALIDATION] Configuration validation passed")
180
+
181
+ # Initialize delay calculator if human delays are enabled
182
+ if self.config.enable_human_delays:
183
+ logger.info(
184
+ "[DELAY_CONFIG] ═══════════ HUMAN-LIKE DELAYS ENABLED ═══════════"
185
+ )
186
+ logger.info(
187
+ "[DELAY_CONFIG] Read delay bounds: "
188
+ + f"[{self.config.min_read_delay_seconds:.2f}s - {self.config.max_read_delay_seconds:.2f}s]"
189
+ )
190
+ logger.info(
191
+ "[DELAY_CONFIG] Typing delay bounds: "
192
+ + f"[{self.config.min_typing_delay_seconds:.2f}s - {self.config.max_typing_delay_seconds:.2f}s]"
193
+ )
194
+ logger.info(
195
+ "[DELAY_CONFIG] Send delay bounds: "
196
+ + f"[{self.config.min_send_delay_seconds:.2f}s - {self.config.max_send_delay_seconds:.2f}s]"
197
+ )
198
+ logger.info(
199
+ "[DELAY_CONFIG] Delay behavior settings: "
200
+ + f"jitter_enabled={self.config.enable_delay_jitter}, "
201
+ + f"show_typing={self.config.show_typing_during_delay}, "
202
+ + f"batch_compression={self.config.batch_read_compression_factor:.2f}"
203
+ )
204
+
205
+ # Initialize delay calculator
206
+ self._delay_calculator = HumanDelayCalculator(self.config)
207
+ logger.info("[DELAY_CONFIG] Delay calculator initialized successfully")
208
+ logger.info(
209
+ "[DELAY_CONFIG] ═══════════════════════════════════════════════"
210
+ )
211
+ else:
212
+ logger.info(
213
+ "[DELAY_CONFIG] Human-like delays disabled (enable_human_delays=False)"
214
+ )
215
+ logger.debug(
216
+ "[DELAY_CONFIG] To enable delays, set enable_human_delays=True in WhatsAppBotConfig"
217
+ )
218
+
170
219
  def start(self) -> None:
171
220
  """Start the WhatsApp bot."""
172
221
  run_sync(self.start_async)
@@ -227,7 +276,53 @@ class WhatsAppBot(BaseModel):
227
276
  ) -> GeneratedAssistantMessage[Any] | None:
228
277
  """
229
278
  Handle incoming WhatsApp message with enhanced error handling and batching.
279
+
280
+ This is the main entry point for processing incoming WhatsApp messages. It handles
281
+ rate limiting, spam protection, message batching, and applies human-like delays
282
+ to simulate realistic behavior patterns.
283
+
284
+ Message Processing Flow:
285
+ 1. Retrieve or create user session
286
+ 2. Check rate limiting (if spam protection enabled)
287
+ 3. Apply read delay (if human delays enabled) - simulates reading time
288
+ 4. Mark message as read (if auto_read_messages enabled)
289
+ 5. Send welcome message (if first interaction)
290
+ 6. Process message (with batching if enabled) or immediately
291
+ 7. Return generated response
292
+
293
+ Human-Like Delays:
294
+ When enable_human_delays is True, this method applies a read delay before
295
+ marking the message as read. The delay simulates the time a human would take
296
+ to read and comprehend the incoming message, creating a realistic gap between
297
+ message receipt and read receipt.
298
+
299
+ For batched messages, a batch read delay is applied instead, which accounts
300
+ for reading multiple messages in sequence with compression for faster batch
301
+ reading.
302
+
303
+ Args:
304
+ message: The incoming WhatsApp message to process.
305
+ chat_id: Optional custom chat identifier for conversation tracking.
306
+ If not provided, uses the sender's phone number.
307
+
308
+ Returns:
309
+ Generated assistant response message, or None if processing failed or
310
+ was rate limited.
311
+
312
+ Raises:
313
+ Exceptions are caught and logged. User-facing errors trigger error messages.
314
+
315
+ Example:
316
+ >>> message = WhatsAppTextMessage(
317
+ ... from_number="1234567890",
318
+ ... text="Hello!",
319
+ ... id="msg_123"
320
+ ... )
321
+ >>> response = await bot.handle_message(message)
322
+ >>> if response:
323
+ ... print(f"Response: {response.text}")
230
324
  """
325
+
231
326
  logger.info("[MESSAGE_HANDLER] ═══════════ MESSAGE HANDLER ENTRY ═══════════")
232
327
  logger.info(
233
328
  f"[MESSAGE_HANDLER] Received message from {message.from_number}: ID={message.id}, Type={type(message).__name__}"
@@ -274,6 +369,9 @@ class WhatsAppBot(BaseModel):
274
369
  await self.provider.update_session(session)
275
370
  return None
276
371
 
372
+ # Apply read delay before marking message as read (simulates human reading time)
373
+ await self._apply_read_delay(message)
374
+
277
375
  # Mark as read if configured (only after rate limiting check passes)
278
376
  if self.config.auto_read_messages:
279
377
  logger.debug(f"[MESSAGE_HANDLER] Marking message {message.id} as read")
@@ -1205,7 +1303,54 @@ class WhatsAppBot(BaseModel):
1205
1303
  async def _process_message_batch(
1206
1304
  self, phone_number: PhoneNumber, session: WhatsAppSession, processing_token: str
1207
1305
  ) -> GeneratedAssistantMessage[Any] | None:
1208
- """Process a batch of messages for a user with enhanced timeout protection."""
1306
+ """Process a batch of messages for a user with enhanced timeout protection.
1307
+
1308
+ This method processes multiple messages that were received in quick succession
1309
+ as a single batch. It applies batch-specific delays and combines all messages
1310
+ into a single conversation context for more coherent responses.
1311
+
1312
+ Batch Processing Flow:
1313
+ 1. Validate pending messages exist
1314
+ 2. Mark session as sending to prevent cleanup
1315
+ 3. Apply batch read delay (if human delays enabled) - simulates reading all messages
1316
+ 4. Convert message batch to agent input
1317
+ 5. Generate single response for entire batch
1318
+ 6. Send response to user
1319
+ 7. Mark all messages as read
1320
+ 8. Update session state
1321
+ 9. Execute response callbacks
1322
+
1323
+ Human-Like Delays:
1324
+ When enable_human_delays is True, this method applies a batch read delay
1325
+ at the start of processing. The delay simulates the time a human would take
1326
+ to read multiple messages in sequence, accounting for:
1327
+ - Individual reading time for each message
1328
+ - Brief pauses between messages (0.5s each)
1329
+ - Compression factor (default 0.7x) for faster batch reading
1330
+
1331
+ This creates a realistic gap before the batch is processed, making the bot
1332
+ appear more human-like when handling rapid message sequences.
1333
+
1334
+ Args:
1335
+ phone_number: Phone number of the user whose messages are being processed.
1336
+ session: The user's WhatsApp session containing pending messages.
1337
+ processing_token: Unique token to prevent duplicate batch processing.
1338
+
1339
+ Returns:
1340
+ Generated assistant response for the batch, or None if processing failed
1341
+ or no messages were pending.
1342
+
1343
+ Raises:
1344
+ Exceptions are caught and logged. Session state is cleaned up on errors.
1345
+
1346
+ Example:
1347
+ >>> # Called automatically by batch processor task
1348
+ >>> response = await self._process_message_batch(
1349
+ ... phone_number="1234567890",
1350
+ ... session=session,
1351
+ ... processing_token="batch_123"
1352
+ ... )
1353
+ """
1209
1354
  logger.info("[BATCH_PROCESSING] ═══════════ BATCH PROCESSING START ═══════════")
1210
1355
  logger.info(
1211
1356
  f"[BATCH_PROCESSING] Phone: {phone_number}, Token: {processing_token}"
@@ -1248,6 +1393,9 @@ class WhatsAppBot(BaseModel):
1248
1393
  f"[BATCH_PROCESSING] 📦 Processing batch of {len(pending_messages)} messages for {phone_number}"
1249
1394
  )
1250
1395
 
1396
+ # Apply batch read delay before processing (simulates human reading multiple messages)
1397
+ await self._apply_batch_read_delay(list(pending_messages))
1398
+
1251
1399
  # Convert message batch to agent input
1252
1400
  logger.debug(
1253
1401
  f"[BATCH_PROCESSING] Converting message batch to agent input for {phone_number}"
@@ -2062,7 +2210,51 @@ class WhatsAppBot(BaseModel):
2062
2210
  response: GeneratedAssistantMessage[Any] | str,
2063
2211
  reply_to: str | None = None,
2064
2212
  ) -> None:
2065
- """Send response message(s) to user with enhanced error handling and retry logic."""
2213
+ """Send response message(s) to user with enhanced error handling and retry logic.
2214
+
2215
+ This method handles the complete response sending flow including text-to-speech,
2216
+ human-like delays, typing indicators, message splitting, and error handling.
2217
+
2218
+ Response Sending Flow:
2219
+ 1. Extract and format response text
2220
+ 2. Attempt TTS audio generation (if configured and chance succeeds)
2221
+ 3. Apply typing delay (if human delays enabled and TTS not sent)
2222
+ 4. Show typing indicator (if configured and not already shown during delay)
2223
+ 5. Split long messages if needed
2224
+ 6. Send each message part with send delay between parts
2225
+ 7. Handle errors with retry logic
2226
+
2227
+ Human-Like Delays:
2228
+ When enable_human_delays is True, this method applies two types of delays:
2229
+
2230
+ 1. Typing Delay: Applied before sending the response to simulate the time
2231
+ a human would take to compose and type the message. The delay is based
2232
+ on response length and includes composition planning time.
2233
+
2234
+ 2. Send Delay: Applied immediately before each message transmission to
2235
+ simulate the brief final review time before hitting send. This delay
2236
+ is applied to each message part independently.
2237
+
2238
+ If TTS audio is successfully sent, the typing delay is skipped since the
2239
+ audio generation time already provides a natural delay.
2240
+
2241
+ Args:
2242
+ to: Phone number of the recipient.
2243
+ response: The response to send. Can be a GeneratedAssistantMessage or string.
2244
+ reply_to: Optional message ID to reply to (for message quoting).
2245
+
2246
+ Raises:
2247
+ Exceptions are caught and logged. Failed messages trigger retry logic
2248
+ if configured.
2249
+
2250
+ Example:
2251
+ >>> response = GeneratedAssistantMessage(text="Hello! How can I help?")
2252
+ >>> await self._send_response(
2253
+ ... to="1234567890",
2254
+ ... response=response,
2255
+ ... reply_to="msg_123"
2256
+ ... )
2257
+ """
2066
2258
  # Extract text from GeneratedAssistantMessage if needed
2067
2259
  response_text = (
2068
2260
  response.text
@@ -2077,6 +2269,9 @@ class WhatsAppBot(BaseModel):
2077
2269
  f"[SEND_RESPONSE] Sending response to {to} (length: {len(response_text)}, reply_to: {reply_to})"
2078
2270
  )
2079
2271
 
2272
+ # Track if TTS was successfully sent (to skip typing delay for audio)
2273
+ tts_sent_successfully = False
2274
+
2080
2275
  # Check if we should send audio via TTS
2081
2276
  should_attempt_tts = (
2082
2277
  self.tts_provider
@@ -2193,7 +2388,11 @@ class WhatsAppBot(BaseModel):
2193
2388
  "format": str(speech_result.format),
2194
2389
  },
2195
2390
  )
2196
- # Audio sent successfully, return early
2391
+ # Audio sent successfully, mark flag and return early
2392
+ tts_sent_successfully = True
2393
+ logger.info(
2394
+ "[TTS] Skipping typing delay since TTS audio was sent successfully"
2395
+ )
2197
2396
  return
2198
2397
 
2199
2398
  except Exception as e:
@@ -2225,9 +2424,34 @@ class WhatsAppBot(BaseModel):
2225
2424
  messages = self._split_message_by_line_breaks(response_text)
2226
2425
  logger.info(f"[SEND_RESPONSE] Split response into {len(messages)} parts")
2227
2426
 
2427
+ # Apply typing delay before sending messages (simulates human typing time)
2428
+ # This should be done before the typing indicator to coordinate properly
2429
+ # Note: This is only reached if TTS was not used or if TTS failed and fell back to text
2430
+ if should_attempt_tts and not tts_sent_successfully:
2431
+ logger.info(
2432
+ "[SEND_RESPONSE] TTS failed, applying typing delay for text fallback"
2433
+ )
2434
+ await self._apply_typing_delay(response_text, to)
2435
+
2228
2436
  # Show typing indicator ONCE before sending all messages
2229
2437
  # Only send typing indicator if we're not attempting TTS or if TTS failed
2230
- if self.config.typing_indicator and not should_attempt_tts:
2438
+ # Skip if typing delay already handled the indicator
2439
+ typing_delay_handled_indicator = (
2440
+ self.config.enable_human_delays
2441
+ and self.config.show_typing_during_delay
2442
+ and self.config.typing_indicator
2443
+ )
2444
+
2445
+ if typing_delay_handled_indicator:
2446
+ logger.debug(
2447
+ "[SEND_RESPONSE] Skipping redundant typing indicator - already sent during typing delay"
2448
+ )
2449
+
2450
+ if (
2451
+ self.config.typing_indicator
2452
+ and not should_attempt_tts
2453
+ and not typing_delay_handled_indicator
2454
+ ):
2231
2455
  try:
2232
2456
  logger.debug(
2233
2457
  f"[SEND_RESPONSE] Sending typing indicator to {to} before sending {len(messages)} message(s)"
@@ -2238,8 +2462,13 @@ class WhatsAppBot(BaseModel):
2238
2462
  except Exception as e:
2239
2463
  # Don't let typing indicator failures break message sending
2240
2464
  logger.warning(f"[SEND_RESPONSE] Failed to send typing indicator: {e}")
2241
- elif self.config.typing_indicator and should_attempt_tts:
2465
+ elif (
2466
+ self.config.typing_indicator
2467
+ and should_attempt_tts
2468
+ and not typing_delay_handled_indicator
2469
+ ):
2242
2470
  # TTS was attempted but failed, send typing indicator for text fallback
2471
+ # Skip if typing delay already handled the indicator
2243
2472
  try:
2244
2473
  logger.debug(
2245
2474
  f"[SEND_RESPONSE] TTS failed, sending typing indicator to {to} for text fallback"
@@ -2270,6 +2499,9 @@ class WhatsAppBot(BaseModel):
2270
2499
 
2271
2500
  for attempt in range(max_retries + 1):
2272
2501
  try:
2502
+ # Apply send delay before transmitting message (simulates final review)
2503
+ await self._apply_send_delay()
2504
+
2273
2505
  sent_message = await self.provider.send_text_message(
2274
2506
  to=to, text=msg, quoted_message_id=quoted_id
2275
2507
  )
@@ -2310,15 +2542,30 @@ class WhatsAppBot(BaseModel):
2310
2542
  # Delay between messages (respecting typing duration + small buffer)
2311
2543
  if i < len(messages) - 1:
2312
2544
  # Use typing duration if typing indicator is enabled, otherwise use a small delay
2313
- delay = (
2545
+ inter_message_delay = (
2314
2546
  self.config.typing_duration + 0.5
2315
2547
  if self.config.typing_indicator
2316
2548
  else 1.0
2317
2549
  )
2318
- logger.debug(
2319
- f"[SEND_RESPONSE] Waiting {delay}s before sending next message part"
2320
- )
2321
- await asyncio.sleep(delay)
2550
+
2551
+ # Calculate total delay including send delay if human delays are enabled
2552
+ if self.config.enable_human_delays and self._delay_calculator:
2553
+ # Send delay will be applied before next message, so log total expected delay
2554
+ estimated_send_delay = (
2555
+ self.config.min_send_delay_seconds
2556
+ + self.config.max_send_delay_seconds
2557
+ ) / 2
2558
+ total_delay = inter_message_delay + estimated_send_delay
2559
+ logger.debug(
2560
+ f"[SEND_RESPONSE] Inter-message delay: {inter_message_delay:.2f}s "
2561
+ + f"(+ ~{estimated_send_delay:.2f}s send delay = ~{total_delay:.2f}s total)"
2562
+ )
2563
+ else:
2564
+ logger.debug(
2565
+ f"[SEND_RESPONSE] Waiting {inter_message_delay}s before sending next message part"
2566
+ )
2567
+
2568
+ await asyncio.sleep(inter_message_delay)
2322
2569
 
2323
2570
  # Log final sending results
2324
2571
  if failed_parts:
@@ -2713,6 +2960,306 @@ class WhatsAppBot(BaseModel):
2713
2960
  f"[RATE_LIMIT_ERROR] Failed to send rate limit message to {to}: {e}"
2714
2961
  )
2715
2962
 
2963
+ async def _apply_read_delay(self, message: WhatsAppMessage) -> None:
2964
+ """Apply human-like read delay before marking message as read.
2965
+
2966
+ This method simulates the time a human would take to read and comprehend
2967
+ an incoming message. The delay is calculated based on message content length
2968
+ and includes reading time, context switching, and comprehension time.
2969
+
2970
+ The delay is applied BEFORE marking the message as read, creating a realistic
2971
+ gap between message receipt and read receipt that matches human behavior.
2972
+
2973
+ Behavior:
2974
+ - Skips delay if enable_human_delays is False
2975
+ - Extracts text content from message (text or media caption)
2976
+ - Calculates delay using HumanDelayCalculator
2977
+ - Applies delay using asyncio.sleep (non-blocking)
2978
+ - Logs delay start and completion
2979
+ - Handles cancellation and errors gracefully
2980
+
2981
+ Args:
2982
+ message: The WhatsApp message to process. Can be text or media message.
2983
+
2984
+ Raises:
2985
+ asyncio.CancelledError: Re-raised to allow proper task cancellation.
2986
+ Other exceptions are caught and logged, processing continues without delay.
2987
+
2988
+ Example:
2989
+ >>> # Called automatically in handle_message() before marking as read
2990
+ >>> await self._apply_read_delay(message)
2991
+ >>> await self.provider.mark_message_as_read(message.id)
2992
+ """
2993
+ if not self.config.enable_human_delays or not self._delay_calculator:
2994
+ logger.debug("[HUMAN_DELAY] ⏱️ Read delay skipped (delays disabled)")
2995
+ return
2996
+
2997
+ try:
2998
+ # Extract text content from message
2999
+ text_content = ""
3000
+ message_type = type(message).__name__
3001
+ if isinstance(message, WhatsAppTextMessage):
3002
+ text_content = message.text
3003
+ elif isinstance(message, WhatsAppMediaMessage):
3004
+ # For media messages, use caption if available
3005
+ text_content = message.caption or ""
3006
+
3007
+ # Calculate read delay
3008
+ delay = self._delay_calculator.calculate_read_delay(text_content)
3009
+
3010
+ # Log delay start
3011
+ logger.info(
3012
+ f"[HUMAN_DELAY] ⏱️ Starting read delay: {delay:.2f}s "
3013
+ + f"for {len(text_content)} chars (message_type={message_type}, message_id={message.id})"
3014
+ )
3015
+
3016
+ # Apply delay
3017
+ await asyncio.sleep(delay)
3018
+
3019
+ # Log delay completion
3020
+ logger.info(
3021
+ f"[HUMAN_DELAY] ⏱️ Read delay completed: {delay:.2f}s "
3022
+ + f"(message_id={message.id})"
3023
+ )
3024
+
3025
+ except asyncio.CancelledError:
3026
+ logger.warning(
3027
+ f"[HUMAN_DELAY] ⏱️ Read delay cancelled for message {message.id}"
3028
+ )
3029
+ raise # Re-raise to allow proper cancellation
3030
+ except Exception as e:
3031
+ logger.error(
3032
+ f"[HUMAN_DELAY] ⏱️ Error applying read delay for message {message.id}: {e}",
3033
+ exc_info=True,
3034
+ )
3035
+ # Continue without delay on error
3036
+
3037
+ async def _apply_typing_delay(self, response_text: str, to: PhoneNumber) -> None:
3038
+ """Apply human-like typing delay before sending response.
3039
+
3040
+ This method simulates the time a human would take to compose and type
3041
+ a response. The delay is calculated based on response content length
3042
+ and includes composition planning, typing time, and multitasking overhead.
3043
+
3044
+ The delay is applied AFTER response generation but BEFORE sending the message,
3045
+ creating a realistic gap that matches human typing behavior.
3046
+
3047
+ Behavior:
3048
+ - Skips delay if enable_human_delays is False
3049
+ - Calculates delay using HumanDelayCalculator based on response length
3050
+ - Optionally sends typing indicator during delay (if show_typing_during_delay is True)
3051
+ - Applies delay using asyncio.sleep (non-blocking)
3052
+ - Logs delay start and completion
3053
+ - Handles typing indicator failures gracefully
3054
+ - Handles cancellation and errors gracefully
3055
+
3056
+ Args:
3057
+ response_text: The response text that will be sent to the user.
3058
+ to: The phone number of the recipient.
3059
+
3060
+ Raises:
3061
+ asyncio.CancelledError: Re-raised to allow proper task cancellation.
3062
+ Other exceptions are caught and logged, processing continues without delay.
3063
+
3064
+ Example:
3065
+ >>> # Called automatically in _send_response() before sending
3066
+ >>> response_text = "Hello! How can I help you?"
3067
+ >>> await self._apply_typing_delay(response_text, phone_number)
3068
+ >>> await self.provider.send_text_message(phone_number, response_text)
3069
+ """
3070
+ if not self.config.enable_human_delays or not self._delay_calculator:
3071
+ logger.debug("[HUMAN_DELAY] ⌨️ Typing delay skipped (delays disabled)")
3072
+ return
3073
+
3074
+ try:
3075
+ # Calculate typing delay
3076
+ delay = self._delay_calculator.calculate_typing_delay(response_text)
3077
+
3078
+ # Log delay start
3079
+ logger.info(
3080
+ f"[HUMAN_DELAY] ⌨️ Starting typing delay: {delay:.2f}s "
3081
+ + f"for {len(response_text)} chars (to={to})"
3082
+ )
3083
+
3084
+ # Show typing indicator during delay if configured
3085
+ if self.config.show_typing_during_delay and self.config.typing_indicator:
3086
+ try:
3087
+ logger.debug(
3088
+ f"[HUMAN_DELAY] ⌨️ Sending typing indicator for {int(delay)}s to {to}"
3089
+ )
3090
+ # Send typing indicator for the duration of the delay
3091
+ await self.provider.send_typing_indicator(to, int(delay))
3092
+ except Exception as indicator_error:
3093
+ logger.warning(
3094
+ f"[HUMAN_DELAY] ⌨️ Failed to send typing indicator during delay to {to}: "
3095
+ + f"{indicator_error}"
3096
+ )
3097
+ # Continue with delay even if indicator fails
3098
+
3099
+ # Apply delay
3100
+ await asyncio.sleep(delay)
3101
+
3102
+ # Log delay completion
3103
+ logger.info(
3104
+ f"[HUMAN_DELAY] ⌨️ Typing delay completed: {delay:.2f}s (to={to})"
3105
+ )
3106
+
3107
+ except asyncio.CancelledError:
3108
+ logger.warning(f"[HUMAN_DELAY] ⌨️ Typing delay cancelled for {to}")
3109
+ raise # Re-raise to allow proper cancellation
3110
+ except Exception as e:
3111
+ logger.error(
3112
+ f"[HUMAN_DELAY] ⌨️ Error applying typing delay for {to}: {e}",
3113
+ exc_info=True,
3114
+ )
3115
+ # Continue without delay on error
3116
+
3117
+ async def _apply_send_delay(self) -> None:
3118
+ """Apply brief delay before sending message.
3119
+
3120
+ This method simulates the final review time before a human sends a message.
3121
+ The delay is a random value within configured bounds, representing the brief
3122
+ moment a human takes to review their message before hitting send.
3123
+
3124
+ The delay is applied immediately BEFORE each message transmission, creating
3125
+ a small gap that adds to the natural feel of the conversation.
3126
+
3127
+ Behavior:
3128
+ - Skips delay if enable_human_delays is False
3129
+ - Generates random delay within configured send delay bounds
3130
+ - Applies optional jitter if enabled
3131
+ - Applies delay using asyncio.sleep (non-blocking)
3132
+ - Logs delay start and completion
3133
+ - Handles cancellation and errors gracefully
3134
+
3135
+ Raises:
3136
+ asyncio.CancelledError: Re-raised to allow proper task cancellation.
3137
+ Other exceptions are caught and logged, processing continues without delay.
3138
+
3139
+ Example:
3140
+ >>> # Called automatically before each message transmission
3141
+ >>> for message_part in message_parts:
3142
+ ... await self._apply_send_delay()
3143
+ ... await self.provider.send_text_message(phone_number, message_part)
3144
+ """
3145
+ if not self.config.enable_human_delays or not self._delay_calculator:
3146
+ logger.debug("[HUMAN_DELAY] 📤 Send delay skipped (delays disabled)")
3147
+ return
3148
+
3149
+ try:
3150
+ # Calculate send delay
3151
+ delay = self._delay_calculator.calculate_send_delay()
3152
+
3153
+ # Log delay start
3154
+ logger.info(f"[HUMAN_DELAY] 📤 Starting send delay: {delay:.2f}s")
3155
+
3156
+ # Apply delay
3157
+ await asyncio.sleep(delay)
3158
+
3159
+ # Log delay completion
3160
+ logger.debug(f"[HUMAN_DELAY] 📤 Send delay completed: {delay:.2f}s")
3161
+
3162
+ except asyncio.CancelledError:
3163
+ logger.warning("[HUMAN_DELAY] 📤 Send delay cancelled")
3164
+ raise # Re-raise to allow proper cancellation
3165
+ except Exception as e:
3166
+ logger.error(
3167
+ f"[HUMAN_DELAY] 📤 Error applying send delay: {e}", exc_info=True
3168
+ )
3169
+ # Continue without delay on error
3170
+
3171
+ async def _apply_batch_read_delay(self, messages: list[dict[str, Any]]) -> None:
3172
+ """Apply human-like read delay for batch of messages.
3173
+
3174
+ This method simulates the time a human would take to read multiple messages
3175
+ in sequence. The delay accounts for reading each message individually, with
3176
+ brief pauses between messages, and applies a compression factor to simulate
3177
+ faster batch reading compared to reading messages one at a time.
3178
+
3179
+ The delay is applied at the START of batch processing, before any message
3180
+ processing begins, creating a realistic gap that matches human batch reading.
3181
+
3182
+ Behavior:
3183
+ - Skips delay if enable_human_delays is False
3184
+ - Extracts text content from all messages (text and media captions)
3185
+ - Calculates individual read delays for each message
3186
+ - Adds 0.5s pause between each message
3187
+ - Applies compression factor (default 0.7x for 30% faster reading)
3188
+ - Clamps to reasonable bounds (2-20 seconds suggested)
3189
+ - Applies delay using asyncio.sleep (non-blocking)
3190
+ - Logs delay start and completion with message count
3191
+ - Handles cancellation and errors gracefully
3192
+
3193
+ Args:
3194
+ messages: List of message dictionaries from the batch. Each dict should
3195
+ contain 'type' and either 'text' or 'caption' fields.
3196
+
3197
+ Raises:
3198
+ asyncio.CancelledError: Re-raised to allow proper task cancellation.
3199
+ Other exceptions are caught and logged, processing continues without delay.
3200
+
3201
+ Example:
3202
+ >>> # Called automatically in _process_message_batch() before processing
3203
+ >>> pending_messages = [msg1_dict, msg2_dict, msg3_dict]
3204
+ >>> await self._apply_batch_read_delay(pending_messages)
3205
+ >>> # Now process the batch...
3206
+ """
3207
+ if not self.config.enable_human_delays or not self._delay_calculator:
3208
+ logger.debug("[HUMAN_DELAY] 📚 Batch read delay skipped (delays disabled)")
3209
+ return
3210
+
3211
+ try:
3212
+ # Extract text content from all messages in batch
3213
+ message_texts: list[str] = []
3214
+ total_chars = 0
3215
+ for msg in messages:
3216
+ if msg.get("type") == "WhatsAppTextMessage":
3217
+ text = msg.get("text", "")
3218
+ if text:
3219
+ message_texts.append(text)
3220
+ total_chars += len(text)
3221
+ elif msg.get("type") in [
3222
+ "WhatsAppImageMessage",
3223
+ "WhatsAppDocumentMessage",
3224
+ "WhatsAppAudioMessage",
3225
+ "WhatsAppVideoMessage",
3226
+ ]:
3227
+ # For media messages, use caption if available
3228
+ caption = msg.get("caption", "")
3229
+ if caption:
3230
+ message_texts.append(caption)
3231
+ total_chars += len(caption)
3232
+
3233
+ # Calculate batch read delay
3234
+ delay = self._delay_calculator.calculate_batch_read_delay(message_texts)
3235
+
3236
+ # Log delay start
3237
+ logger.info(
3238
+ f"[HUMAN_DELAY] 📚 Starting batch read delay: {delay:.2f}s "
3239
+ + f"for {len(messages)} messages ({total_chars} total chars)"
3240
+ )
3241
+
3242
+ # Apply delay
3243
+ await asyncio.sleep(delay)
3244
+
3245
+ # Log delay completion
3246
+ logger.info(
3247
+ f"[HUMAN_DELAY] 📚 Batch read delay completed: {delay:.2f}s "
3248
+ + f"for {len(messages)} messages"
3249
+ )
3250
+
3251
+ except asyncio.CancelledError:
3252
+ logger.warning(
3253
+ f"[HUMAN_DELAY] 📚 Batch read delay cancelled for {len(messages)} messages"
3254
+ )
3255
+ raise # Re-raise to allow proper cancellation
3256
+ except Exception as e:
3257
+ logger.error(
3258
+ f"[HUMAN_DELAY] 📚 Error applying batch read delay for {len(messages)} messages: {e}",
3259
+ exc_info=True,
3260
+ )
3261
+ # Continue without delay on error
3262
+
2716
3263
  def _split_message(self, text: str) -> Sequence[str]:
2717
3264
  """Split long message into chunks."""
2718
3265
  if len(text) <= self.config.max_message_length: