DeepFabric 4.4.1__py3-none-any.whl → 4.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. deepfabric/__init__.py +8 -0
  2. deepfabric/auth.py +8 -2
  3. deepfabric/builders.py +2 -2
  4. deepfabric/builders_agent.py +18 -6
  5. deepfabric/cli.py +292 -13
  6. deepfabric/cloud_upload.py +884 -0
  7. deepfabric/config.py +47 -20
  8. deepfabric/config_manager.py +2 -2
  9. deepfabric/dataset.py +302 -0
  10. deepfabric/evaluation/backends/__init__.py +2 -0
  11. deepfabric/evaluation/backends/llm_eval_backend.py +527 -0
  12. deepfabric/evaluation/backends/ollama_backend.py +3 -3
  13. deepfabric/evaluation/backends/tool_call_parsers.py +7 -7
  14. deepfabric/evaluation/backends/transformers_backend.py +73 -16
  15. deepfabric/evaluation/evaluator.py +41 -7
  16. deepfabric/evaluation/evaluators/builtin/tool_calling.py +13 -8
  17. deepfabric/evaluation/inference.py +77 -5
  18. deepfabric/evaluation/metrics.py +4 -0
  19. deepfabric/evaluation/parser.py +8 -8
  20. deepfabric/evaluation/reporters/cloud_reporter.py +19 -6
  21. deepfabric/exceptions.py +14 -0
  22. deepfabric/generator.py +8 -4
  23. deepfabric/graph.py +38 -0
  24. deepfabric/hf_hub.py +1 -1
  25. deepfabric/loader.py +554 -0
  26. deepfabric/schemas.py +7 -7
  27. deepfabric/topic_manager.py +4 -0
  28. deepfabric/training/__init__.py +24 -5
  29. deepfabric/training/callback.py +43 -1
  30. deepfabric/training/dataset_utils.py +223 -0
  31. deepfabric/training/metrics_sender.py +50 -16
  32. deepfabric/tui.py +9 -1
  33. deepfabric/utils.py +14 -0
  34. deepfabric/validation.py +1 -1
  35. {deepfabric-4.4.1.dist-info → deepfabric-4.6.0.dist-info}/METADATA +84 -177
  36. {deepfabric-4.4.1.dist-info → deepfabric-4.6.0.dist-info}/RECORD +39 -34
  37. {deepfabric-4.4.1.dist-info → deepfabric-4.6.0.dist-info}/WHEEL +0 -0
  38. {deepfabric-4.4.1.dist-info → deepfabric-4.6.0.dist-info}/entry_points.txt +0 -0
  39. {deepfabric-4.4.1.dist-info → deepfabric-4.6.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,527 @@
1
+ """LLM Evaluation Backend for cloud providers.
2
+
3
+ Supports OpenAI, Anthropic, Gemini, and OpenRouter for tool calling evaluation.
4
+ Uses async clients internally with sync wrapper for compatibility with Evaluator.
5
+ """
6
+
7
+ import asyncio
8
+ import json
9
+ import logging
10
+ import os
11
+
12
+ from typing import Any
13
+
14
+ from deepfabric.evaluation.inference import (
15
+ InferenceBackend,
16
+ InferenceConfig,
17
+ ModelResponse,
18
+ )
19
+ from deepfabric.llm.errors import handle_provider_error
20
+ from deepfabric.llm.rate_limit_config import (
21
+ RateLimitConfig,
22
+ create_rate_limit_config,
23
+ get_default_rate_limit_config,
24
+ )
25
+ from deepfabric.llm.retry_handler import RetryHandler
26
+ from deepfabric.schemas import ToolDefinition
27
+
28
+ logger = logging.getLogger(__name__)
29
+
30
+
31
+ class LLMEvalBackend(InferenceBackend):
32
+ """Inference backend using cloud LLM providers for evaluation.
33
+
34
+ Supports:
35
+ - OpenAI (GPT-4, GPT-4o, etc.)
36
+ - Anthropic (Claude models)
37
+ - Gemini (gemini-2.0-flash, etc.)
38
+ - OpenRouter (OpenAI-compatible API)
39
+
40
+ Uses async clients internally with sync wrapper for compatibility.
41
+ """
42
+
43
+ def __init__(self, config: InferenceConfig) -> None:
44
+ """Initialize LLM evaluation backend.
45
+
46
+ Args:
47
+ config: Inference configuration with provider and model details
48
+ """
49
+ super().__init__(config)
50
+
51
+ if config.provider is None:
52
+ msg = "provider must be specified for LLM backend"
53
+ raise ValueError(msg)
54
+
55
+ self.provider = config.provider
56
+ self.model_name = config.model
57
+
58
+ # Initialize rate limiting
59
+ self.rate_limit_config: RateLimitConfig
60
+ if config.rate_limit_config:
61
+ self.rate_limit_config = create_rate_limit_config(
62
+ self.provider, config.rate_limit_config
63
+ )
64
+ else:
65
+ self.rate_limit_config = get_default_rate_limit_config(self.provider)
66
+
67
+ self.retry_handler = RetryHandler(self.rate_limit_config, self.provider)
68
+
69
+ # Initialize provider-specific async client
70
+ self._client = self._create_client()
71
+
72
+ def _create_client(self) -> Any:
73
+ """Create the appropriate async client for the provider."""
74
+ client_creators = {
75
+ "openai": self._create_openai_client,
76
+ "anthropic": self._create_anthropic_client,
77
+ "gemini": self._create_gemini_client,
78
+ "openrouter": self._create_openrouter_client,
79
+ }
80
+ if creator := client_creators.get(self.provider):
81
+ return creator()
82
+ msg = f"Unsupported provider: {self.provider}"
83
+ raise ValueError(msg)
84
+
85
+ def _create_openai_client(self) -> Any:
86
+ """Create async OpenAI client."""
87
+ import openai # noqa: PLC0415
88
+
89
+ api_key = self.config.api_key or os.getenv("OPENAI_API_KEY")
90
+ if not api_key:
91
+ msg = "OPENAI_API_KEY environment variable is not set"
92
+ raise ValueError(msg)
93
+
94
+ kwargs: dict[str, Any] = {"api_key": api_key}
95
+ if self.config.base_url:
96
+ kwargs["base_url"] = self.config.base_url
97
+
98
+ return openai.AsyncOpenAI(**kwargs)
99
+
100
+ def _create_anthropic_client(self) -> Any:
101
+ """Create async Anthropic client."""
102
+ import anthropic # noqa: PLC0415
103
+
104
+ api_key = self.config.api_key or os.getenv("ANTHROPIC_API_KEY")
105
+ if not api_key:
106
+ msg = "ANTHROPIC_API_KEY environment variable is not set"
107
+ raise ValueError(msg)
108
+
109
+ return anthropic.AsyncAnthropic(api_key=api_key)
110
+
111
+ def _create_gemini_client(self) -> Any:
112
+ """Create Gemini client (uses aio namespace for async)."""
113
+ from google import genai # noqa: PLC0415
114
+
115
+ api_key = self.config.api_key or os.getenv("GOOGLE_API_KEY") or os.getenv("GEMINI_API_KEY")
116
+ if not api_key:
117
+ msg = "GOOGLE_API_KEY or GEMINI_API_KEY environment variable is not set"
118
+ raise ValueError(msg)
119
+
120
+ return genai.Client(api_key=api_key)
121
+
122
+ def _create_openrouter_client(self) -> Any:
123
+ """Create async OpenRouter client (OpenAI-compatible)."""
124
+ import openai # noqa: PLC0415
125
+
126
+ api_key = self.config.api_key or os.getenv("OPENROUTER_API_KEY")
127
+ if not api_key:
128
+ msg = "OPENROUTER_API_KEY environment variable is not set"
129
+ raise ValueError(msg)
130
+
131
+ base_url = self.config.base_url or "https://openrouter.ai/api/v1"
132
+ return openai.AsyncOpenAI(api_key=api_key, base_url=base_url)
133
+
134
+ def generate(
135
+ self,
136
+ messages: list[dict[str, str]],
137
+ tools: list[ToolDefinition] | None = None,
138
+ ) -> ModelResponse:
139
+ """Generate response with optional tool calling (sync wrapper).
140
+
141
+ Args:
142
+ messages: List of message dicts with 'role' and 'content'
143
+ tools: Optional list of available tools for function calling
144
+
145
+ Returns:
146
+ ModelResponse with generated content and parsed tool calls
147
+ """
148
+ return asyncio.run(self.generate_async(messages, tools))
149
+
150
+ async def generate_async(
151
+ self,
152
+ messages: list[dict[str, str]],
153
+ tools: list[ToolDefinition] | None = None,
154
+ ) -> ModelResponse:
155
+ """Generate response with optional tool calling (async).
156
+
157
+ Args:
158
+ messages: List of message dicts with 'role' and 'content'
159
+ tools: Optional list of available tools for function calling
160
+
161
+ Returns:
162
+ ModelResponse with generated content and parsed tool calls
163
+ """
164
+ return await self._generate_with_retry(messages, tools)
165
+
166
+ async def _generate_with_retry(
167
+ self,
168
+ messages: list[dict[str, str]],
169
+ tools: list[ToolDefinition] | None = None,
170
+ ) -> ModelResponse:
171
+ """Generate with retry logic."""
172
+ attempt = 0
173
+ max_retries = self.rate_limit_config.max_retries
174
+
175
+ while attempt <= max_retries:
176
+ try:
177
+ return await self._do_generate(messages, tools)
178
+ except Exception as e:
179
+ if not self.retry_handler.should_retry(e):
180
+ raise handle_provider_error(e, self.provider, self.model_name) from e
181
+
182
+ if attempt >= max_retries:
183
+ self.retry_handler.on_giveup_handler({"exception": e, "tries": attempt + 1})
184
+ raise handle_provider_error(e, self.provider, self.model_name) from e
185
+
186
+ delay = self.retry_handler.calculate_delay(attempt, e)
187
+ self.retry_handler.on_backoff_handler(
188
+ {
189
+ "exception": e,
190
+ "wait": delay,
191
+ "tries": attempt + 1,
192
+ }
193
+ )
194
+ await asyncio.sleep(delay)
195
+ attempt += 1
196
+
197
+ msg = "Unexpected state in retry logic"
198
+ raise RuntimeError(msg)
199
+
200
+ async def _do_generate(
201
+ self,
202
+ messages: list[dict[str, str]],
203
+ tools: list[ToolDefinition] | None = None,
204
+ ) -> ModelResponse:
205
+ """Execute generation based on provider."""
206
+ generators = {
207
+ "openai": self._generate_openai_async,
208
+ "openrouter": self._generate_openai_async,
209
+ "anthropic": self._generate_anthropic_async,
210
+ "gemini": self._generate_gemini_async,
211
+ }
212
+ if generator := generators.get(self.provider):
213
+ return await generator(messages, tools)
214
+ msg = f"Unsupported provider: {self.provider}"
215
+ raise ValueError(msg)
216
+
217
+ async def _generate_openai_async(
218
+ self,
219
+ messages: list[dict[str, str]],
220
+ tools: list[ToolDefinition] | None = None,
221
+ ) -> ModelResponse:
222
+ """Generate using OpenAI/OpenRouter API."""
223
+ kwargs: dict[str, Any] = {
224
+ "model": self.model_name,
225
+ "messages": messages,
226
+ "temperature": self.config.temperature,
227
+ "max_tokens": self.config.max_tokens,
228
+ "top_p": self.config.top_p,
229
+ }
230
+
231
+ if tools:
232
+ kwargs["tools"] = [tool.to_openai() for tool in tools]
233
+ kwargs["tool_choice"] = "auto"
234
+
235
+ response = await self._client.chat.completions.create(**kwargs)
236
+ message = response.choices[0].message
237
+
238
+ # Parse tool calls
239
+ tool_call = None
240
+ tool_calls = None
241
+ if message.tool_calls:
242
+ tool_calls = []
243
+ for tc in message.tool_calls:
244
+ # Parse arguments from JSON string
245
+ try:
246
+ args = json.loads(tc.function.arguments)
247
+ except json.JSONDecodeError as e:
248
+ logger.warning(
249
+ "Failed to parse tool call arguments as JSON: %s (%s)",
250
+ tc.function.arguments,
251
+ e,
252
+ )
253
+ args = {}
254
+
255
+ parsed = {
256
+ "name": tc.function.name,
257
+ "arguments": args,
258
+ }
259
+ tool_calls.append(parsed)
260
+ tool_call = tool_calls[0] if tool_calls else None
261
+
262
+ return ModelResponse(
263
+ content=message.content or "",
264
+ tool_call=tool_call,
265
+ tool_calls=tool_calls,
266
+ raw_output=message.content or "",
267
+ finish_reason=response.choices[0].finish_reason,
268
+ )
269
+
270
+ async def _generate_anthropic_async(
271
+ self,
272
+ messages: list[dict[str, str]],
273
+ tools: list[ToolDefinition] | None = None,
274
+ ) -> ModelResponse:
275
+ """Generate using Anthropic API."""
276
+ # Convert messages to Anthropic format (system message separate)
277
+ system_message = None
278
+ anthropic_messages = []
279
+ for msg in messages:
280
+ if msg["role"] == "system":
281
+ system_message = msg["content"]
282
+ else:
283
+ anthropic_messages.append(
284
+ {
285
+ "role": msg["role"],
286
+ "content": msg["content"],
287
+ }
288
+ )
289
+
290
+ # Anthropic doesn't allow both temperature and top_p together
291
+ # Use temperature only (the more commonly configured parameter)
292
+ kwargs: dict[str, Any] = {
293
+ "model": self.model_name,
294
+ "messages": anthropic_messages,
295
+ "max_tokens": self.config.max_tokens,
296
+ "temperature": self.config.temperature,
297
+ }
298
+
299
+ if system_message:
300
+ kwargs["system"] = system_message
301
+
302
+ if tools:
303
+ kwargs["tools"] = [self._convert_tool_to_anthropic(tool) for tool in tools]
304
+
305
+ response = await self._client.messages.create(**kwargs)
306
+
307
+ # Parse response - Anthropic uses content blocks
308
+ content = ""
309
+ tool_call = None
310
+ tool_calls: list[dict[str, Any]] = []
311
+
312
+ for block in response.content:
313
+ if block.type == "text":
314
+ content += block.text
315
+ elif block.type == "tool_use":
316
+ parsed = {
317
+ "name": block.name,
318
+ "arguments": block.input,
319
+ }
320
+ tool_calls.append(parsed)
321
+
322
+ tool_call = tool_calls[0] if tool_calls else None
323
+
324
+ return ModelResponse(
325
+ content=content,
326
+ tool_call=tool_call,
327
+ tool_calls=tool_calls if tool_calls else None,
328
+ raw_output=content,
329
+ finish_reason=response.stop_reason,
330
+ )
331
+
332
+ def _convert_tool_to_anthropic(self, tool: ToolDefinition) -> dict[str, Any]:
333
+ """Convert ToolDefinition to Anthropic tool format."""
334
+ openai_format = tool.to_openai()
335
+ func = openai_format["function"]
336
+
337
+ return {
338
+ "name": func["name"],
339
+ "description": func["description"],
340
+ "input_schema": func["parameters"],
341
+ }
342
+
343
+ async def _generate_gemini_async(
344
+ self,
345
+ messages: list[dict[str, str]],
346
+ tools: list[ToolDefinition] | None = None,
347
+ ) -> ModelResponse:
348
+ """Generate using Gemini API."""
349
+ from google.genai import types # noqa: PLC0415
350
+
351
+ # Convert messages to Gemini format
352
+ gemini_contents: list[types.Content] = []
353
+ system_instruction = None
354
+
355
+ for msg in messages:
356
+ role = msg["role"]
357
+ if role == "system":
358
+ # Gemini uses system_instruction parameter
359
+ system_instruction = msg["content"]
360
+ elif role == "assistant":
361
+ gemini_contents.append(
362
+ types.Content(role="model", parts=[types.Part(text=msg["content"])])
363
+ )
364
+ else:
365
+ gemini_contents.append(
366
+ types.Content(role="user", parts=[types.Part(text=msg["content"])])
367
+ )
368
+
369
+ # Prepare tools for Gemini
370
+ gemini_tools = None
371
+ if tools:
372
+ function_declarations = []
373
+ for tool in tools:
374
+ openai_format = tool.to_openai()
375
+ func = openai_format["function"]
376
+
377
+ # Gemini uses slightly different schema format
378
+ params = self._convert_schema_for_gemini(func["parameters"])
379
+
380
+ function_declarations.append(
381
+ types.FunctionDeclaration(
382
+ name=func["name"],
383
+ description=func["description"],
384
+ parameters=params,
385
+ )
386
+ )
387
+
388
+ gemini_tools = [types.Tool(function_declarations=function_declarations)]
389
+
390
+ # Configure generation
391
+ generation_config = types.GenerateContentConfig(
392
+ temperature=self.config.temperature,
393
+ max_output_tokens=self.config.max_tokens,
394
+ top_p=self.config.top_p,
395
+ system_instruction=system_instruction,
396
+ tools=gemini_tools,
397
+ )
398
+
399
+ response = await self._client.aio.models.generate_content(
400
+ model=self.model_name,
401
+ contents=gemini_contents,
402
+ config=generation_config,
403
+ )
404
+
405
+ # Parse response
406
+ content = ""
407
+ tool_call = None
408
+ tool_calls: list[dict[str, Any]] = []
409
+
410
+ if response.candidates:
411
+ for part in response.candidates[0].content.parts:
412
+ if part.text:
413
+ content += part.text
414
+ elif part.function_call:
415
+ fc = part.function_call
416
+ parsed = {
417
+ "name": fc.name,
418
+ "arguments": dict(fc.args) if fc.args else {},
419
+ }
420
+ tool_calls.append(parsed)
421
+
422
+ tool_call = tool_calls[0] if tool_calls else None
423
+ finish_reason = response.candidates[0].finish_reason.name if response.candidates else None
424
+
425
+ return ModelResponse(
426
+ content=content,
427
+ tool_call=tool_call,
428
+ tool_calls=tool_calls if tool_calls else None,
429
+ raw_output=content,
430
+ finish_reason=finish_reason,
431
+ )
432
+
433
+ def _convert_schema_for_gemini(self, schema: dict[str, Any]) -> dict[str, Any]:
434
+ """Convert JSON Schema to Gemini-compatible format.
435
+
436
+ Gemini has specific requirements:
437
+ - Does not support additionalProperties
438
+ - Requires 'items' field for array types
439
+ - Handles nested schemas in anyOf, oneOf, allOf
440
+ """
441
+ if not isinstance(schema, dict):
442
+ return schema
443
+
444
+ result = dict(schema)
445
+
446
+ # Remove additionalProperties (not supported by Gemini)
447
+ if "additionalProperties" in result:
448
+ del result["additionalProperties"]
449
+
450
+ # Ensure array types have items defined (Gemini requires this)
451
+ # Check both explicit type and type within anyOf/oneOf
452
+ is_array = result.get("type") == "array"
453
+ if not is_array and "type" in result and isinstance(result["type"], list):
454
+ is_array = "array" in result["type"]
455
+
456
+ if is_array and "items" not in result:
457
+ result["items"] = {"type": "string"} # Default to string array
458
+
459
+ # Recursively process nested schemas in properties
460
+ if "properties" in result and isinstance(result["properties"], dict):
461
+ for prop_name, prop_schema in result["properties"].items():
462
+ if isinstance(prop_schema, dict):
463
+ result["properties"][prop_name] = self._convert_schema_for_gemini(prop_schema)
464
+
465
+ # Process items in arrays
466
+ if "items" in result and isinstance(result["items"], dict):
467
+ result["items"] = self._convert_schema_for_gemini(result["items"])
468
+
469
+ # Process anyOf, oneOf, allOf schemas
470
+ for key in ("anyOf", "oneOf", "allOf"):
471
+ if key in result and isinstance(result[key], list):
472
+ result[key] = [
473
+ self._convert_schema_for_gemini(sub_schema)
474
+ for sub_schema in result[key]
475
+ if isinstance(sub_schema, dict)
476
+ ]
477
+ # If any sub-schema is an array type, ensure it has items
478
+ for sub_schema in result[key]:
479
+ if isinstance(sub_schema, dict):
480
+ sub_is_array = sub_schema.get("type") == "array"
481
+ if sub_is_array and "items" not in sub_schema:
482
+ sub_schema["items"] = {"type": "string"}
483
+
484
+ # Process nested definitions/defs
485
+ for key in ("definitions", "$defs"):
486
+ if key in result and isinstance(result[key], dict):
487
+ result[key] = {
488
+ name: self._convert_schema_for_gemini(def_schema)
489
+ for name, def_schema in result[key].items()
490
+ if isinstance(def_schema, dict)
491
+ }
492
+
493
+ return result
494
+
495
+ def generate_batch(
496
+ self,
497
+ batch_messages: list[list[dict[str, str]]],
498
+ tools: list[ToolDefinition] | None = None,
499
+ ) -> list[ModelResponse]:
500
+ """Generate responses for a batch of message lists.
501
+
502
+ Uses asyncio.gather for parallel execution with rate limiting.
503
+
504
+ Args:
505
+ batch_messages: List of message lists
506
+ tools: Optional list of available tools
507
+
508
+ Returns:
509
+ List of ModelResponse objects
510
+ """
511
+ return asyncio.run(self._generate_batch_async(batch_messages, tools))
512
+
513
+ async def _generate_batch_async(
514
+ self,
515
+ batch_messages: list[list[dict[str, str]]],
516
+ tools: list[ToolDefinition] | None = None,
517
+ ) -> list[ModelResponse]:
518
+ """Generate batch responses using asyncio.gather."""
519
+ tasks = [self.generate_async(messages, tools) for messages in batch_messages]
520
+ return list(await asyncio.gather(*tasks))
521
+
522
+ def cleanup(self) -> None:
523
+ """Clean up resources.
524
+
525
+ Cloud clients don't typically need explicit cleanup.
526
+ """
527
+ pass
@@ -27,15 +27,15 @@ class OllamaBackend(InferenceBackend):
27
27
  config: Inference configuration
28
28
 
29
29
  Note:
30
- - model_path should be the Ollama model name (e.g., "mistral", "llama2")
30
+ - model should be the Ollama model name (e.g., "mistral", "llama2")
31
31
  - Ollama server must be running (ollama serve)
32
32
  - Device setting is ignored (Ollama handles device automatically)
33
33
  """
34
34
  super().__init__(config)
35
35
 
36
- # Use model_path directly as Ollama model name
36
+ # Use model directly as Ollama model name
37
37
  # Supports: "qwen3:8b", "hf.co/user/model:latest", etc.
38
- self.model_name = config.model_path
38
+ self.model_name = config.model
39
39
 
40
40
  # Verify model is available
41
41
  try:
@@ -351,11 +351,11 @@ class ToolCallParserRegistry:
351
351
  logger.debug("No specific parser found, using generic fallback")
352
352
  return self._fallback()
353
353
 
354
- def get_parser_for_model(self, model_path: str) -> ToolCallParser:
354
+ def get_parser_for_model(self, model: str) -> ToolCallParser:
355
355
  """Get parser by loading model config and detecting architecture.
356
356
 
357
357
  Args:
358
- model_path: Path to model or HuggingFace Hub ID
358
+ model: Path to model or HuggingFace Hub ID
359
359
 
360
360
  Returns:
361
361
  Instantiated parser for the model
@@ -363,7 +363,7 @@ class ToolCallParserRegistry:
363
363
  from transformers import AutoConfig # noqa: PLC0415
364
364
 
365
365
  try:
366
- config = AutoConfig.from_pretrained(model_path) # nosec
366
+ config = AutoConfig.from_pretrained(model) # nosec
367
367
  architectures = getattr(config, "architectures", None)
368
368
  return self.get_parser(architectures)
369
369
  except Exception as e:
@@ -387,16 +387,16 @@ def get_parser(architectures: list[str] | None = None) -> ToolCallParser:
387
387
  return _registry.get_parser(architectures)
388
388
 
389
389
 
390
- def get_parser_for_model(model_path: str) -> ToolCallParser:
391
- """Get a parser for a model by path.
390
+ def get_parser_for_model(model: str) -> ToolCallParser:
391
+ """Get a parser for a model.
392
392
 
393
393
  Args:
394
- model_path: Path to model or HuggingFace Hub ID
394
+ model: Model path or HuggingFace Hub ID
395
395
 
396
396
  Returns:
397
397
  Instantiated parser
398
398
  """
399
- return _registry.get_parser_for_model(model_path)
399
+ return _registry.get_parser_for_model(model)
400
400
 
401
401
 
402
402
  def register_parser(architecture: str, parser_class: type[ToolCallParser]) -> None: