ripperdoc 0.2.8__py3-none-any.whl → 0.2.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ripperdoc/__init__.py +1 -1
- ripperdoc/cli/cli.py +257 -123
- ripperdoc/cli/commands/__init__.py +2 -1
- ripperdoc/cli/commands/agents_cmd.py +138 -8
- ripperdoc/cli/commands/clear_cmd.py +9 -4
- ripperdoc/cli/commands/config_cmd.py +1 -1
- ripperdoc/cli/commands/context_cmd.py +3 -2
- ripperdoc/cli/commands/doctor_cmd.py +18 -4
- ripperdoc/cli/commands/exit_cmd.py +1 -0
- ripperdoc/cli/commands/hooks_cmd.py +27 -53
- ripperdoc/cli/commands/models_cmd.py +27 -10
- ripperdoc/cli/commands/permissions_cmd.py +27 -9
- ripperdoc/cli/commands/resume_cmd.py +9 -3
- ripperdoc/cli/commands/stats_cmd.py +244 -0
- ripperdoc/cli/commands/status_cmd.py +4 -4
- ripperdoc/cli/commands/tasks_cmd.py +8 -4
- ripperdoc/cli/ui/file_mention_completer.py +2 -1
- ripperdoc/cli/ui/interrupt_handler.py +2 -3
- ripperdoc/cli/ui/message_display.py +4 -2
- ripperdoc/cli/ui/panels.py +1 -0
- ripperdoc/cli/ui/provider_options.py +247 -0
- ripperdoc/cli/ui/rich_ui.py +403 -81
- ripperdoc/cli/ui/spinner.py +54 -18
- ripperdoc/cli/ui/thinking_spinner.py +1 -2
- ripperdoc/cli/ui/tool_renderers.py +8 -2
- ripperdoc/cli/ui/wizard.py +213 -0
- ripperdoc/core/agents.py +19 -6
- ripperdoc/core/config.py +51 -17
- ripperdoc/core/custom_commands.py +7 -6
- ripperdoc/core/default_tools.py +101 -12
- ripperdoc/core/hooks/config.py +1 -3
- ripperdoc/core/hooks/events.py +27 -28
- ripperdoc/core/hooks/executor.py +4 -6
- ripperdoc/core/hooks/integration.py +12 -21
- ripperdoc/core/hooks/llm_callback.py +59 -0
- ripperdoc/core/hooks/manager.py +40 -15
- ripperdoc/core/permissions.py +118 -12
- ripperdoc/core/providers/anthropic.py +109 -36
- ripperdoc/core/providers/gemini.py +70 -5
- ripperdoc/core/providers/openai.py +89 -24
- ripperdoc/core/query.py +273 -68
- ripperdoc/core/query_utils.py +2 -0
- ripperdoc/core/skills.py +9 -3
- ripperdoc/core/system_prompt.py +4 -2
- ripperdoc/core/tool.py +17 -8
- ripperdoc/sdk/client.py +79 -4
- ripperdoc/tools/ask_user_question_tool.py +5 -3
- ripperdoc/tools/background_shell.py +307 -135
- ripperdoc/tools/bash_output_tool.py +1 -1
- ripperdoc/tools/bash_tool.py +63 -24
- ripperdoc/tools/dynamic_mcp_tool.py +29 -8
- ripperdoc/tools/enter_plan_mode_tool.py +1 -1
- ripperdoc/tools/exit_plan_mode_tool.py +1 -1
- ripperdoc/tools/file_edit_tool.py +167 -54
- ripperdoc/tools/file_read_tool.py +28 -4
- ripperdoc/tools/file_write_tool.py +13 -10
- ripperdoc/tools/glob_tool.py +3 -2
- ripperdoc/tools/grep_tool.py +3 -2
- ripperdoc/tools/kill_bash_tool.py +1 -1
- ripperdoc/tools/ls_tool.py +1 -1
- ripperdoc/tools/lsp_tool.py +615 -0
- ripperdoc/tools/mcp_tools.py +13 -10
- ripperdoc/tools/multi_edit_tool.py +8 -7
- ripperdoc/tools/notebook_edit_tool.py +7 -4
- ripperdoc/tools/skill_tool.py +1 -1
- ripperdoc/tools/task_tool.py +519 -69
- ripperdoc/tools/todo_tool.py +2 -2
- ripperdoc/tools/tool_search_tool.py +3 -2
- ripperdoc/utils/conversation_compaction.py +9 -5
- ripperdoc/utils/file_watch.py +214 -5
- ripperdoc/utils/json_utils.py +2 -1
- ripperdoc/utils/lsp.py +806 -0
- ripperdoc/utils/mcp.py +11 -3
- ripperdoc/utils/memory.py +4 -2
- ripperdoc/utils/message_compaction.py +21 -7
- ripperdoc/utils/message_formatting.py +14 -7
- ripperdoc/utils/messages.py +126 -67
- ripperdoc/utils/path_ignore.py +35 -8
- ripperdoc/utils/permissions/path_validation_utils.py +2 -1
- ripperdoc/utils/permissions/shell_command_validation.py +427 -91
- ripperdoc/utils/permissions/tool_permission_utils.py +174 -15
- ripperdoc/utils/safe_get_cwd.py +2 -1
- ripperdoc/utils/session_heatmap.py +244 -0
- ripperdoc/utils/session_history.py +13 -6
- ripperdoc/utils/session_stats.py +293 -0
- ripperdoc/utils/todo.py +2 -1
- ripperdoc/utils/token_estimation.py +6 -1
- {ripperdoc-0.2.8.dist-info → ripperdoc-0.2.10.dist-info}/METADATA +8 -2
- ripperdoc-0.2.10.dist-info/RECORD +129 -0
- ripperdoc-0.2.8.dist-info/RECORD +0 -121
- {ripperdoc-0.2.8.dist-info → ripperdoc-0.2.10.dist-info}/WHEEL +0 -0
- {ripperdoc-0.2.8.dist-info → ripperdoc-0.2.10.dist-info}/entry_points.txt +0 -0
- {ripperdoc-0.2.8.dist-info → ripperdoc-0.2.10.dist-info}/licenses/LICENSE +0 -0
- {ripperdoc-0.2.8.dist-info → ripperdoc-0.2.10.dist-info}/top_level.txt +0 -0
|
@@ -5,6 +5,7 @@ from __future__ import annotations
|
|
|
5
5
|
import asyncio
|
|
6
6
|
import copy
|
|
7
7
|
import inspect
|
|
8
|
+
import json
|
|
8
9
|
import os
|
|
9
10
|
import time
|
|
10
11
|
from typing import Any, AsyncIterator, Dict, List, Optional, Tuple, cast
|
|
@@ -240,9 +241,7 @@ async def _async_build_tool_declarations(tools: List[Tool[Any, Any]]) -> List[Di
|
|
|
240
241
|
description=description,
|
|
241
242
|
parameters_json_schema=parameters_schema,
|
|
242
243
|
)
|
|
243
|
-
declarations.append(
|
|
244
|
-
func_decl.model_dump(mode="json", exclude_none=True)
|
|
245
|
-
)
|
|
244
|
+
declarations.append(func_decl.model_dump(mode="json", exclude_none=True))
|
|
246
245
|
else:
|
|
247
246
|
declarations.append(
|
|
248
247
|
{
|
|
@@ -385,6 +384,17 @@ class GeminiClient(ProviderClient):
|
|
|
385
384
|
) -> ProviderResponse:
|
|
386
385
|
start_time = time.time()
|
|
387
386
|
|
|
387
|
+
logger.debug(
|
|
388
|
+
"[gemini_client] Preparing request",
|
|
389
|
+
extra={
|
|
390
|
+
"model": model_profile.model,
|
|
391
|
+
"tool_mode": tool_mode,
|
|
392
|
+
"stream": stream,
|
|
393
|
+
"max_thinking_tokens": max_thinking_tokens,
|
|
394
|
+
"num_tools": len(tools),
|
|
395
|
+
},
|
|
396
|
+
)
|
|
397
|
+
|
|
388
398
|
try:
|
|
389
399
|
client = await self._client(model_profile)
|
|
390
400
|
except asyncio.CancelledError:
|
|
@@ -392,6 +402,15 @@ class GeminiClient(ProviderClient):
|
|
|
392
402
|
except Exception as exc:
|
|
393
403
|
duration_ms = (time.time() - start_time) * 1000
|
|
394
404
|
error_code, error_message = _classify_gemini_error(exc)
|
|
405
|
+
logger.debug(
|
|
406
|
+
"[gemini_client] Exception details during init",
|
|
407
|
+
extra={
|
|
408
|
+
"model": model_profile.model,
|
|
409
|
+
"exception_type": type(exc).__name__,
|
|
410
|
+
"exception_str": str(exc),
|
|
411
|
+
"error_code": error_code,
|
|
412
|
+
},
|
|
413
|
+
)
|
|
395
414
|
logger.error(
|
|
396
415
|
"[gemini_client] Initialization failed",
|
|
397
416
|
extra={
|
|
@@ -422,7 +441,12 @@ class GeminiClient(ProviderClient):
|
|
|
422
441
|
from google.genai import types as genai_types # type: ignore
|
|
423
442
|
|
|
424
443
|
config["thinking_config"] = genai_types.ThinkingConfig(**thinking_config)
|
|
425
|
-
except (
|
|
444
|
+
except (
|
|
445
|
+
ImportError,
|
|
446
|
+
ModuleNotFoundError,
|
|
447
|
+
TypeError,
|
|
448
|
+
ValueError,
|
|
449
|
+
): # pragma: no cover - fallback when SDK not installed
|
|
426
450
|
config["thinking_config"] = thinking_config
|
|
427
451
|
if declarations:
|
|
428
452
|
config["tools"] = [{"function_declarations": declarations}]
|
|
@@ -432,6 +456,23 @@ class GeminiClient(ProviderClient):
|
|
|
432
456
|
"contents": contents,
|
|
433
457
|
"config": config,
|
|
434
458
|
}
|
|
459
|
+
|
|
460
|
+
logger.debug(
|
|
461
|
+
"[gemini_client] Request parameters",
|
|
462
|
+
extra={
|
|
463
|
+
"model": model_profile.model,
|
|
464
|
+
"config": json.dumps(
|
|
465
|
+
{k: v for k, v in config.items() if k != "system_instruction"},
|
|
466
|
+
ensure_ascii=False,
|
|
467
|
+
default=str,
|
|
468
|
+
)[:1000],
|
|
469
|
+
"num_declarations": len(declarations),
|
|
470
|
+
"thinking_config": json.dumps(thinking_config, ensure_ascii=False)
|
|
471
|
+
if thinking_config
|
|
472
|
+
else None,
|
|
473
|
+
},
|
|
474
|
+
)
|
|
475
|
+
|
|
435
476
|
usage_tokens: Dict[str, int] = {}
|
|
436
477
|
collected_text: List[str] = []
|
|
437
478
|
function_calls: List[Dict[str, Any]] = []
|
|
@@ -483,6 +524,10 @@ class GeminiClient(ProviderClient):
|
|
|
483
524
|
|
|
484
525
|
try:
|
|
485
526
|
if stream:
|
|
527
|
+
logger.debug(
|
|
528
|
+
"[gemini_client] Initiating stream request",
|
|
529
|
+
extra={"model": model_profile.model},
|
|
530
|
+
)
|
|
486
531
|
stream_resp = await _call_generate(streaming=True)
|
|
487
532
|
|
|
488
533
|
# Normalize streams into an async iterator to avoid StopIteration surfacing through
|
|
@@ -523,7 +568,8 @@ class GeminiClient(ProviderClient):
|
|
|
523
568
|
except (RuntimeError, ValueError, TypeError, OSError) as cb_exc:
|
|
524
569
|
logger.warning(
|
|
525
570
|
"[gemini_client] Stream callback failed: %s: %s",
|
|
526
|
-
type(cb_exc).__name__,
|
|
571
|
+
type(cb_exc).__name__,
|
|
572
|
+
cb_exc,
|
|
527
573
|
)
|
|
528
574
|
if text_chunk:
|
|
529
575
|
collected_text.append(text_chunk)
|
|
@@ -552,6 +598,15 @@ class GeminiClient(ProviderClient):
|
|
|
552
598
|
except Exception as exc:
|
|
553
599
|
duration_ms = (time.time() - start_time) * 1000
|
|
554
600
|
error_code, error_message = _classify_gemini_error(exc)
|
|
601
|
+
logger.debug(
|
|
602
|
+
"[gemini_client] Exception details",
|
|
603
|
+
extra={
|
|
604
|
+
"model": model_profile.model,
|
|
605
|
+
"exception_type": type(exc).__name__,
|
|
606
|
+
"exception_str": str(exc),
|
|
607
|
+
"error_code": error_code,
|
|
608
|
+
},
|
|
609
|
+
)
|
|
555
610
|
logger.error(
|
|
556
611
|
"[gemini_client] API call failed",
|
|
557
612
|
extra={
|
|
@@ -595,6 +650,16 @@ class GeminiClient(ProviderClient):
|
|
|
595
650
|
**(usage_tokens or {}),
|
|
596
651
|
)
|
|
597
652
|
|
|
653
|
+
logger.debug(
|
|
654
|
+
"[gemini_client] Response content blocks",
|
|
655
|
+
extra={
|
|
656
|
+
"model": model_profile.model,
|
|
657
|
+
"content_blocks": json.dumps(content_blocks, ensure_ascii=False)[:1000],
|
|
658
|
+
"usage_tokens": json.dumps(usage_tokens, ensure_ascii=False),
|
|
659
|
+
"metadata": json.dumps(response_metadata, ensure_ascii=False)[:500],
|
|
660
|
+
},
|
|
661
|
+
)
|
|
662
|
+
|
|
598
663
|
logger.info(
|
|
599
664
|
"[gemini_client] Response received",
|
|
600
665
|
extra={
|
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
5
|
import asyncio
|
|
6
|
+
import json
|
|
6
7
|
import time
|
|
7
8
|
from typing import Any, Dict, List, Optional, cast
|
|
8
9
|
from uuid import uuid4
|
|
@@ -94,7 +95,7 @@ def _detect_openai_vendor(model_profile: ModelProfile) -> str:
|
|
|
94
95
|
if "generativelanguage.googleapis.com" in base or name.startswith("gemini"):
|
|
95
96
|
return "gemini_openai"
|
|
96
97
|
if "gpt-5" in name:
|
|
97
|
-
return "
|
|
98
|
+
return "openai"
|
|
98
99
|
return "openai"
|
|
99
100
|
|
|
100
101
|
|
|
@@ -130,7 +131,7 @@ def _build_thinking_kwargs(
|
|
|
130
131
|
if effort:
|
|
131
132
|
top_level["reasoning_effort"] = effort
|
|
132
133
|
extra_body.setdefault("reasoning", {"effort": effort})
|
|
133
|
-
elif vendor == "
|
|
134
|
+
elif vendor == "openai":
|
|
134
135
|
if effort:
|
|
135
136
|
extra_body["reasoning"] = {"effort": effort}
|
|
136
137
|
else:
|
|
@@ -178,6 +179,15 @@ class OpenAIClient(ProviderClient):
|
|
|
178
179
|
except Exception as exc:
|
|
179
180
|
duration_ms = (time.time() - start_time) * 1000
|
|
180
181
|
error_code, error_message = _classify_openai_error(exc)
|
|
182
|
+
logger.debug(
|
|
183
|
+
"[openai_client] Exception details",
|
|
184
|
+
extra={
|
|
185
|
+
"model": model_profile.model,
|
|
186
|
+
"exception_type": type(exc).__name__,
|
|
187
|
+
"exception_str": str(exc),
|
|
188
|
+
"error_code": error_code,
|
|
189
|
+
},
|
|
190
|
+
)
|
|
181
191
|
logger.error(
|
|
182
192
|
"[openai_client] API call failed",
|
|
183
193
|
extra={
|
|
@@ -213,6 +223,18 @@ class OpenAIClient(ProviderClient):
|
|
|
213
223
|
openai_messages: List[Dict[str, object]] = [
|
|
214
224
|
{"role": "system", "content": system_prompt}
|
|
215
225
|
] + sanitize_tool_history(list(normalized_messages))
|
|
226
|
+
|
|
227
|
+
logger.debug(
|
|
228
|
+
"[openai_client] Preparing request",
|
|
229
|
+
extra={
|
|
230
|
+
"model": model_profile.model,
|
|
231
|
+
"tool_mode": tool_mode,
|
|
232
|
+
"stream": stream,
|
|
233
|
+
"max_thinking_tokens": max_thinking_tokens,
|
|
234
|
+
"num_tools": len(openai_tools),
|
|
235
|
+
"num_messages": len(openai_messages),
|
|
236
|
+
},
|
|
237
|
+
)
|
|
216
238
|
collected_text: List[str] = []
|
|
217
239
|
streamed_tool_calls: Dict[int, Dict[str, Optional[str]]] = {}
|
|
218
240
|
streamed_tool_text: List[str] = []
|
|
@@ -228,6 +250,16 @@ class OpenAIClient(ProviderClient):
|
|
|
228
250
|
model_profile, max_thinking_tokens
|
|
229
251
|
)
|
|
230
252
|
|
|
253
|
+
logger.debug(
|
|
254
|
+
"[openai_client] Request parameters",
|
|
255
|
+
extra={
|
|
256
|
+
"model": model_profile.model,
|
|
257
|
+
"thinking_extra_body": json.dumps(thinking_extra_body, ensure_ascii=False),
|
|
258
|
+
"thinking_top_level": json.dumps(thinking_top_level, ensure_ascii=False),
|
|
259
|
+
"messages_preview": json.dumps(openai_messages[:2], ensure_ascii=False)[:500],
|
|
260
|
+
},
|
|
261
|
+
)
|
|
262
|
+
|
|
231
263
|
async with AsyncOpenAI(
|
|
232
264
|
api_key=model_profile.api_key, base_url=model_profile.api_base
|
|
233
265
|
) as client:
|
|
@@ -246,6 +278,16 @@ class OpenAIClient(ProviderClient):
|
|
|
246
278
|
}
|
|
247
279
|
if thinking_extra_body:
|
|
248
280
|
stream_kwargs["extra_body"] = thinking_extra_body
|
|
281
|
+
logger.debug(
|
|
282
|
+
"[openai_client] Initiating stream request",
|
|
283
|
+
extra={
|
|
284
|
+
"model": model_profile.model,
|
|
285
|
+
"stream_kwargs": json.dumps(
|
|
286
|
+
{k: v for k, v in stream_kwargs.items() if k != "messages"},
|
|
287
|
+
ensure_ascii=False,
|
|
288
|
+
),
|
|
289
|
+
},
|
|
290
|
+
)
|
|
249
291
|
stream_coro = client.chat.completions.create( # type: ignore[call-overload]
|
|
250
292
|
**stream_kwargs
|
|
251
293
|
)
|
|
@@ -258,9 +300,10 @@ class OpenAIClient(ProviderClient):
|
|
|
258
300
|
if getattr(chunk, "usage", None):
|
|
259
301
|
streamed_usage.update(openai_usage_tokens(chunk.usage))
|
|
260
302
|
|
|
261
|
-
|
|
303
|
+
choices = getattr(chunk, "choices", None)
|
|
304
|
+
if not choices or len(choices) == 0:
|
|
262
305
|
continue
|
|
263
|
-
delta = getattr(
|
|
306
|
+
delta = getattr(choices[0], "delta", None)
|
|
264
307
|
if not delta:
|
|
265
308
|
continue
|
|
266
309
|
|
|
@@ -303,7 +346,8 @@ class OpenAIClient(ProviderClient):
|
|
|
303
346
|
except (RuntimeError, ValueError, TypeError, OSError) as cb_exc:
|
|
304
347
|
logger.warning(
|
|
305
348
|
"[openai_client] Stream callback failed: %s: %s",
|
|
306
|
-
type(cb_exc).__name__,
|
|
349
|
+
type(cb_exc).__name__,
|
|
350
|
+
cb_exc,
|
|
307
351
|
)
|
|
308
352
|
|
|
309
353
|
# Tool call deltas for native tool mode
|
|
@@ -333,7 +377,8 @@ class OpenAIClient(ProviderClient):
|
|
|
333
377
|
except (RuntimeError, ValueError, TypeError, OSError) as cb_exc:
|
|
334
378
|
logger.warning(
|
|
335
379
|
"[openai_client] Stream callback failed: %s: %s",
|
|
336
|
-
type(cb_exc).__name__,
|
|
380
|
+
type(cb_exc).__name__,
|
|
381
|
+
cb_exc,
|
|
337
382
|
)
|
|
338
383
|
|
|
339
384
|
if idx not in announced_tool_indexes and state.get("name"):
|
|
@@ -344,7 +389,8 @@ class OpenAIClient(ProviderClient):
|
|
|
344
389
|
except (RuntimeError, ValueError, TypeError, OSError) as cb_exc:
|
|
345
390
|
logger.warning(
|
|
346
391
|
"[openai_client] Stream callback failed: %s: %s",
|
|
347
|
-
type(cb_exc).__name__,
|
|
392
|
+
type(cb_exc).__name__,
|
|
393
|
+
cb_exc,
|
|
348
394
|
)
|
|
349
395
|
|
|
350
396
|
streamed_tool_calls[idx] = state
|
|
@@ -441,23 +487,32 @@ class OpenAIClient(ProviderClient):
|
|
|
441
487
|
)
|
|
442
488
|
finish_reason = "stream"
|
|
443
489
|
else:
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
)
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
490
|
+
response_choices = getattr(openai_response, "choices", None)
|
|
491
|
+
if not response_choices or len(response_choices) == 0:
|
|
492
|
+
logger.warning(
|
|
493
|
+
"[openai_client] Empty choices in response",
|
|
494
|
+
extra={"model": model_profile.model},
|
|
495
|
+
)
|
|
496
|
+
content_blocks = [{"type": "text", "text": ""}]
|
|
497
|
+
finish_reason = "error"
|
|
498
|
+
else:
|
|
499
|
+
choice = response_choices[0]
|
|
500
|
+
content_blocks = content_blocks_from_openai_choice(choice, tool_mode)
|
|
501
|
+
finish_reason = cast(Optional[str], getattr(choice, "finish_reason", None))
|
|
502
|
+
message_obj = getattr(choice, "message", None) or choice
|
|
503
|
+
reasoning_content = getattr(message_obj, "reasoning_content", None)
|
|
504
|
+
if reasoning_content:
|
|
505
|
+
response_metadata["reasoning_content"] = reasoning_content
|
|
506
|
+
reasoning_field = getattr(message_obj, "reasoning", None)
|
|
507
|
+
if reasoning_field:
|
|
508
|
+
response_metadata["reasoning"] = reasoning_field
|
|
509
|
+
if "reasoning_content" not in response_metadata and isinstance(
|
|
510
|
+
reasoning_field, str
|
|
511
|
+
):
|
|
512
|
+
response_metadata["reasoning_content"] = reasoning_field
|
|
513
|
+
reasoning_details = getattr(message_obj, "reasoning_details", None)
|
|
514
|
+
if reasoning_details:
|
|
515
|
+
response_metadata["reasoning_details"] = reasoning_details
|
|
461
516
|
|
|
462
517
|
if can_stream:
|
|
463
518
|
if stream_reasoning_text:
|
|
@@ -467,6 +522,16 @@ class OpenAIClient(ProviderClient):
|
|
|
467
522
|
if stream_reasoning_details:
|
|
468
523
|
response_metadata["reasoning_details"] = stream_reasoning_details
|
|
469
524
|
|
|
525
|
+
logger.debug(
|
|
526
|
+
"[openai_client] Response content blocks",
|
|
527
|
+
extra={
|
|
528
|
+
"model": model_profile.model,
|
|
529
|
+
"content_blocks": json.dumps(content_blocks, ensure_ascii=False)[:1000],
|
|
530
|
+
"usage_tokens": json.dumps(usage_tokens, ensure_ascii=False),
|
|
531
|
+
"metadata": json.dumps(response_metadata, ensure_ascii=False)[:500],
|
|
532
|
+
},
|
|
533
|
+
)
|
|
534
|
+
|
|
470
535
|
logger.info(
|
|
471
536
|
"[openai_client] Response received",
|
|
472
537
|
extra={
|