hegelion 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. hegelion/__init__.py +45 -0
  2. hegelion/core/__init__.py +29 -0
  3. hegelion/core/agent.py +166 -0
  4. hegelion/core/autocoding_state.py +293 -0
  5. hegelion/core/backends.py +442 -0
  6. hegelion/core/cache.py +92 -0
  7. hegelion/core/config.py +276 -0
  8. hegelion/core/core.py +649 -0
  9. hegelion/core/engine.py +865 -0
  10. hegelion/core/logging_utils.py +67 -0
  11. hegelion/core/models.py +293 -0
  12. hegelion/core/parsing.py +271 -0
  13. hegelion/core/personas.py +81 -0
  14. hegelion/core/prompt_autocoding.py +353 -0
  15. hegelion/core/prompt_dialectic.py +414 -0
  16. hegelion/core/prompts.py +127 -0
  17. hegelion/core/schema.py +67 -0
  18. hegelion/core/validation.py +68 -0
  19. hegelion/council.py +254 -0
  20. hegelion/examples_data/__init__.py +6 -0
  21. hegelion/examples_data/glm4_6_examples.jsonl +2 -0
  22. hegelion/judge.py +230 -0
  23. hegelion/mcp/__init__.py +3 -0
  24. hegelion/mcp/server.py +918 -0
  25. hegelion/scripts/hegelion_agent_cli.py +90 -0
  26. hegelion/scripts/hegelion_bench.py +117 -0
  27. hegelion/scripts/hegelion_cli.py +497 -0
  28. hegelion/scripts/hegelion_dataset.py +99 -0
  29. hegelion/scripts/hegelion_eval.py +137 -0
  30. hegelion/scripts/mcp_setup.py +150 -0
  31. hegelion/search_providers.py +151 -0
  32. hegelion/training/__init__.py +7 -0
  33. hegelion/training/datasets.py +123 -0
  34. hegelion/training/generator.py +232 -0
  35. hegelion/training/mlx_scu_trainer.py +379 -0
  36. hegelion/training/mlx_trainer.py +181 -0
  37. hegelion/training/unsloth_trainer.py +136 -0
  38. hegelion-0.4.0.dist-info/METADATA +295 -0
  39. hegelion-0.4.0.dist-info/RECORD +43 -0
  40. hegelion-0.4.0.dist-info/WHEEL +5 -0
  41. hegelion-0.4.0.dist-info/entry_points.txt +8 -0
  42. hegelion-0.4.0.dist-info/licenses/LICENSE +21 -0
  43. hegelion-0.4.0.dist-info/top_level.txt +1 -0
hegelion/core/core.py ADDED
@@ -0,0 +1,649 @@
1
+ """Core public API for Hegelion dialectical reasoning."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ import json
7
+ import os
8
+ from importlib.metadata import PackageNotFoundError, version as pkg_version
9
+ from os import PathLike
10
+ from pathlib import Path
11
+ from typing import Any, Callable, Iterable, List, Mapping, Optional, Union
12
+
13
+ from .cache import CacheConfig, ResultCache, compute_cache_key
14
+ from .backends import LLMBackend
15
+ from .config import (
16
+ get_backend_from_env,
17
+ get_engine_settings,
18
+ resolve_backend_for_model,
19
+ )
20
+ from .engine import HegelionEngine
21
+ from .models import HegelionResult
22
+ from .personas import Persona, get_personas
23
+ from .validation import validate_hegelion_result
24
+
25
+ try:
26
+ _PACKAGE_VERSION = pkg_version("hegelion")
27
+ except PackageNotFoundError:
28
+ _PACKAGE_VERSION = os.getenv("HEGELION_VERSION", "dev")
29
+
30
+ PromptEntry = Union[str, Mapping[str, Any]]
31
+ PromptSource = Union[Path, str, PathLike[str], Iterable[PromptEntry]]
32
+
33
+
34
+ def _extract_prompt_from_mapping(data: Mapping[str, Any]) -> Optional[str]:
35
+ for key in ("query", "prompt", "text"):
36
+ value = data.get(key)
37
+ if isinstance(value, str) and value.strip():
38
+ return value.strip()
39
+ return None
40
+
41
+
42
+ def _normalize_prompt_entry(entry: PromptEntry) -> str:
43
+ if isinstance(entry, str):
44
+ text = entry.strip()
45
+ if not text:
46
+ raise ValueError("Prompt entries cannot be empty strings.")
47
+ return text
48
+ if isinstance(entry, Mapping):
49
+ extracted = _extract_prompt_from_mapping(entry)
50
+ if extracted:
51
+ return extracted
52
+ raise ValueError("Prompt dicts must include a 'query' or 'prompt' field.")
53
+ raise TypeError(f"Unsupported prompt entry type: {type(entry)!r}")
54
+
55
+
56
+ def _coerce_prompts(prompts: Iterable[PromptEntry]) -> List[str]:
57
+ normalized: List[str] = []
58
+ for entry in prompts:
59
+ try:
60
+ normalized.append(_normalize_prompt_entry(entry))
61
+ except (TypeError, ValueError) as exc: # pragma: no cover - defensive
62
+ raise ValueError(f"Invalid prompt entry {entry!r}: {exc}") from exc
63
+ return normalized
64
+
65
+
66
+ def _load_prompts_from_path(path: Path) -> List[str]:
67
+ if not path.exists() or not path.is_file():
68
+ raise FileNotFoundError(f"Prompts file not found: {path}")
69
+
70
+ prompts: List[str] = []
71
+ with path.open("r", encoding="utf-8") as handle:
72
+ for raw_line in handle:
73
+ line = raw_line.strip()
74
+ if not line:
75
+ continue
76
+ try:
77
+ parsed = json.loads(line)
78
+ except json.JSONDecodeError:
79
+ prompts.append(line)
80
+ continue
81
+
82
+ if isinstance(parsed, Mapping):
83
+ extracted = _extract_prompt_from_mapping(parsed)
84
+ if extracted:
85
+ prompts.append(extracted)
86
+ else:
87
+ prompts.append(json.dumps(parsed, ensure_ascii=False))
88
+ elif isinstance(parsed, str):
89
+ if parsed.strip():
90
+ prompts.append(parsed.strip())
91
+ else:
92
+ prompts.append(json.dumps(parsed, ensure_ascii=False))
93
+ return prompts
94
+
95
+
96
+ async def run_dialectic(
97
+ query: str,
98
+ *,
99
+ debug: bool = False,
100
+ backend: Optional[LLMBackend] = None,
101
+ model: Optional[str] = None,
102
+ max_tokens_per_phase: Optional[int] = None,
103
+ use_cache: Optional[bool] = None,
104
+ cache_ttl_seconds: Optional[int] = None,
105
+ validate: Optional[bool] = None,
106
+ stream_callback: Optional[Callable[[str, str], Any]] = None,
107
+ progress_callback: Optional[Callable[[str, dict], Any]] = None,
108
+ personas: Optional[Union[List[Persona], str]] = None,
109
+ iterations: int = 1,
110
+ # Phase 2 enhancements
111
+ use_search: bool = False,
112
+ use_council: bool = False,
113
+ use_judge: bool = False,
114
+ min_judge_score: int = 5,
115
+ council_members: Optional[List[str]] = None,
116
+ max_iterations: int = 1,
117
+ ) -> HegelionResult:
118
+ """
119
+ Run a single dialectical reasoning query with optional Phase 2 enhancements.
120
+
121
+ Args:
122
+ query: The question or prompt to analyze dialectically.
123
+ debug: Whether to include debug information in output.
124
+ backend: Optional LLM backend (defaults to env-configured backend).
125
+ model: Optional model name override.
126
+ max_tokens_per_phase: Optional override for maximum tokens per phase.
127
+ personas: Optional list of Persona objects OR preset name (e.g., "council", "security").
128
+ iterations: Number of refinement loops (Synthesis T1 -> Thesis T2). Defaults to 1.
129
+ use_search: Enable search-grounded antithesis (Phase 2).
130
+ use_council: Enable multi-perspective council critiques (Phase 2).
131
+ use_judge: Enable quality evaluation with Iron Judge (Phase 2).
132
+ min_judge_score: Minimum acceptable judge score (0-10).
133
+ council_members: Specific council members to use (default: all).
134
+ max_iterations: Maximum iterations for quality improvement.
135
+
136
+
137
+ Returns:
138
+ HegelionResult: Structured result with thesis, antithesis, synthesis, and analysis
139
+
140
+ Example:
141
+ >>> import asyncio
142
+ >>> from hegelion import run_dialectic
143
+ >>>
144
+ >>> # Basic dialectic
145
+ >>> async def main():
146
+ ... result = await run_dialectic("What year was the printing press invented?")
147
+ ... print(result.synthesis)
148
+ >>>
149
+ """
150
+ settings = get_engine_settings()
151
+ resolved_tokens = max_tokens_per_phase or settings.max_tokens_per_phase
152
+ resolved_validate = settings.validate_results if validate is None else validate
153
+ resolved_cache_enabled = settings.cache_enabled if use_cache is None else use_cache
154
+ resolved_cache_ttl = (
155
+ cache_ttl_seconds if cache_ttl_seconds is not None else settings.cache_ttl_seconds
156
+ )
157
+
158
+ # Resolve Personas
159
+ resolved_personas: Optional[List[Persona]] = None
160
+ if isinstance(personas, str):
161
+ resolved_personas = get_personas(preset_name=personas)
162
+ elif isinstance(personas, list):
163
+ resolved_personas = personas
164
+
165
+ # Backwards-compatible resolution: explicit backend wins, then model,
166
+ # then the environment-configured default.
167
+ if backend is not None:
168
+ resolved_backend = backend
169
+ resolved_model = model or settings.model
170
+ elif model is not None:
171
+ resolved_backend = resolve_backend_for_model(model)
172
+ resolved_model = model
173
+ else:
174
+ resolved_backend = get_backend_from_env()
175
+ resolved_model = settings.model
176
+
177
+ engine = HegelionEngine(
178
+ backend=resolved_backend,
179
+ model=resolved_model,
180
+ synthesis_threshold=settings.synthesis_threshold,
181
+ max_tokens_per_phase=resolved_tokens,
182
+ )
183
+
184
+ cache: Optional[ResultCache] = None
185
+ cache_key: Optional[str] = None
186
+
187
+ # Update cache key to include new params
188
+ persona_key_part = ",".join(p.name for p in resolved_personas) if resolved_personas else "none"
189
+
190
+ if resolved_cache_enabled:
191
+ cache = ResultCache(
192
+ CacheConfig.from_env(cache_dir=settings.cache_dir, ttl_seconds=resolved_cache_ttl)
193
+ )
194
+ backend_name = resolved_backend.__class__.__name__
195
+
196
+ # Enhanced cache key
197
+ base_key = compute_cache_key(
198
+ query=query,
199
+ model=resolved_model,
200
+ backend_provider=backend_name,
201
+ version=_PACKAGE_VERSION,
202
+ max_tokens_per_phase=resolved_tokens,
203
+ debug=debug,
204
+ )
205
+ # Append new features to key manually since compute_cache_key is fixed signature
206
+ cache_key = f"{base_key}_{persona_key_part}_{iterations}_{use_search}"
207
+
208
+ cached_payload = cache.load(cache_key)
209
+ if cached_payload:
210
+ return HegelionResult(**cached_payload)
211
+
212
+ # Resolve iterations (use max of both inputs)
213
+ resolved_iterations = max(iterations, max_iterations)
214
+
215
+ # Check for Phase 2 features
216
+ if use_search or use_council or use_judge:
217
+ result = await _run_enhanced_dialectic(
218
+ engine=engine,
219
+ query=query,
220
+ debug=debug,
221
+ use_search=use_search,
222
+ use_council=use_council,
223
+ use_judge=use_judge,
224
+ min_judge_score=min_judge_score,
225
+ council_members=council_members,
226
+ max_iterations=resolved_iterations,
227
+ stream_callback=stream_callback,
228
+ progress_callback=progress_callback,
229
+ )
230
+ else:
231
+ # Standard Phase 1 processing
232
+ result = await engine.process_query(
233
+ query,
234
+ debug=debug,
235
+ max_iterations=resolved_iterations,
236
+ personas=resolved_personas,
237
+ use_search=use_search,
238
+ stream_callback=stream_callback,
239
+ progress_callback=progress_callback,
240
+ )
241
+
242
+ if resolved_validate:
243
+ validate_hegelion_result(result)
244
+
245
+ if cache and cache_key:
246
+ cache.save(cache_key, result)
247
+
248
+ return result
249
+
250
+
251
+ async def _run_enhanced_dialectic(
252
+ engine: HegelionEngine,
253
+ query: str,
254
+ debug: bool = False,
255
+ use_search: bool = False,
256
+ use_council: bool = False,
257
+ use_judge: bool = False,
258
+ min_judge_score: int = 5,
259
+ council_members: Optional[List[str]] = None,
260
+ max_iterations: int = 1,
261
+ stream_callback=None,
262
+ progress_callback=None,
263
+ ) -> HegelionResult:
264
+ """Run enhanced Phase 2 dialectical reasoning.
265
+
266
+ Args:
267
+ engine: Hegelion engine instance
268
+ query: Query to analyze
269
+ debug: Include debug information
270
+ use_search: Enable search grounding
271
+ use_council: Enable council critiques
272
+ use_judge: Enable quality judging
273
+ min_judge_score: Minimum judge score
274
+ council_members: Specific council members
275
+ max_iterations: Maximum quality iterations
276
+
277
+ Returns:
278
+ Enhanced HegelionResult
279
+ """
280
+ from .search_providers import search_for_context
281
+ from .council import DialecticalCouncil
282
+ from .judge import judge_dialectic
283
+
284
+ for iteration in range(max_iterations):
285
+ try:
286
+ # Step 1: Standard thesis generation
287
+ if progress_callback:
288
+ progress_callback("thesis", {"iteration": iteration + 1})
289
+
290
+ thesis_result = await engine.process_query(
291
+ query,
292
+ debug=debug,
293
+ stream_callback=stream_callback,
294
+ progress_callback=progress_callback,
295
+ )
296
+
297
+ # Step 2: Search grounding (if enabled)
298
+ search_context = []
299
+ if use_search:
300
+ if progress_callback:
301
+ progress_callback("search", {"query": query})
302
+ search_context = await search_for_context(query, max_results=5)
303
+ if debug and search_context:
304
+ print(f"🔍 Found {len(search_context)} search results for grounding")
305
+
306
+ # Step 3: Enhanced antithesis
307
+ if use_council:
308
+ if progress_callback:
309
+ progress_callback("council", {"members": council_members or "all"})
310
+
311
+ council = DialecticalCouncil(engine.backend)
312
+ council_results = await council.generate_council_antithesis(
313
+ query=query,
314
+ thesis=thesis_result.thesis,
315
+ search_context=search_context if search_context else None,
316
+ selected_members=council_members,
317
+ )
318
+ enhanced_antithesis = council.synthesize_council_input(council_results)
319
+
320
+ # Store council info for trace
321
+ thesis_result.metadata.council_perspectives = len(council_results)
322
+ if debug and hasattr(thesis_result, "trace") and thesis_result.trace:
323
+ thesis_result.trace.council_critiques = [
324
+ f"{name}: {critique.member.expertise}"
325
+ for name, critique in council_results.items()
326
+ ]
327
+ else:
328
+ # Enhanced antithesis with search context
329
+ enhanced_antithesis = await _generate_search_enhanced_antithesis(
330
+ engine.backend, query, thesis_result.thesis, search_context
331
+ )
332
+
333
+ # Step 4: Update the result with enhanced antithesis
334
+ thesis_result.antithesis = enhanced_antithesis
335
+
336
+ # Re-extract contradictions from enhanced antithesis
337
+ from .parsing import extract_contradictions, extract_research_proposals
338
+
339
+ thesis_result.contradictions = extract_contradictions(enhanced_antithesis)
340
+
341
+ # Step 5: Enhanced synthesis with all context
342
+ synthesis_prompt = _build_enhanced_synthesis_prompt(
343
+ query=query,
344
+ thesis=thesis_result.thesis,
345
+ antithesis=enhanced_antithesis,
346
+ contradictions=thesis_result.contradictions,
347
+ search_context=search_context,
348
+ )
349
+
350
+ enhanced_synthesis = await engine.backend.generate(synthesis_prompt)
351
+ thesis_result.synthesis = enhanced_synthesis
352
+ thesis_result.research_proposals = extract_research_proposals(enhanced_synthesis)
353
+
354
+ # Step 6: Judge quality (if enabled)
355
+ if use_judge:
356
+ if progress_callback:
357
+ progress_callback("judge", {"min_score": min_judge_score})
358
+
359
+ try:
360
+ judge_result = await judge_dialectic(
361
+ backend=engine.backend,
362
+ query=query,
363
+ thesis=thesis_result.thesis,
364
+ antithesis=enhanced_antithesis,
365
+ synthesis=enhanced_synthesis,
366
+ min_score=min_judge_score,
367
+ )
368
+
369
+ # Store judge info in metadata
370
+ if (
371
+ not hasattr(thesis_result.metadata, "debug")
372
+ or not thesis_result.metadata.debug
373
+ ):
374
+ from .models import HegelionDebugInfo
375
+
376
+ thesis_result.metadata.debug = HegelionDebugInfo()
377
+
378
+ thesis_result.metadata.debug.judge_score = judge_result.score
379
+ thesis_result.metadata.debug.judge_reasoning = judge_result.reasoning
380
+ thesis_result.metadata.debug.critique_validity = judge_result.critique_validity
381
+
382
+ if debug:
383
+ print(f"⚖️ Judge Score: {judge_result.score}/10")
384
+ print(f"✅ Critique Validity: {judge_result.critique_validity}")
385
+
386
+ # Success! Return result
387
+ thesis_result.mode = "enhanced_synthesis"
388
+ return thesis_result
389
+
390
+ except ValueError as e:
391
+ # Quality below threshold, retry if iterations remaining
392
+ if iteration < max_iterations - 1:
393
+ if debug:
394
+ print(f"🔄 Iteration {iteration + 1}: {e}")
395
+ continue
396
+ else:
397
+ # Last iteration, let it through but log warning
398
+ import logging
399
+
400
+ logging.warning(f"Final iteration below quality threshold: {e}")
401
+ thesis_result.mode = "enhanced_synthesis"
402
+ return thesis_result
403
+ else:
404
+ # No judging, return enhanced result
405
+ thesis_result.mode = "enhanced_synthesis"
406
+ return thesis_result
407
+
408
+ except Exception as e:
409
+ if iteration < max_iterations - 1:
410
+ if debug:
411
+ print(f"🔄 Iteration {iteration + 1} failed, retrying: {e}")
412
+ continue
413
+ else:
414
+ raise RuntimeError(
415
+ f"Enhanced dialectic failed after {max_iterations} iterations: {e}"
416
+ )
417
+
418
+ raise RuntimeError("Should not reach here")
419
+
420
+
421
+ async def _generate_search_enhanced_antithesis(
422
+ backend, query: str, thesis: str, search_context: List[str]
423
+ ) -> str:
424
+ """Generate antithesis with search context."""
425
+ from .prompts import ANTITHESIS_PROMPT
426
+
427
+ # Enhanced antithesis prompt with search context
428
+ context_section = ""
429
+ if search_context:
430
+ context_section = f"""
431
+
432
+ SEARCH CONTEXT (for fact-checking and grounding):
433
+ {chr(10).join(f"- {context}" for context in search_context)}
434
+
435
+ Use this context to ground your critique in real-world information."""
436
+
437
+ enhanced_prompt = ANTITHESIS_PROMPT.format(query=query, thesis=thesis) + context_section
438
+
439
+ return await backend.generate(enhanced_prompt)
440
+
441
+
442
+ def _build_enhanced_synthesis_prompt(
443
+ query: str, thesis: str, antithesis: str, contradictions: List[dict], search_context: List[str]
444
+ ) -> str:
445
+ """Build enhanced synthesis prompt with all context."""
446
+ from .prompts import SYNTHESIS_PROMPT
447
+
448
+ contradictions_str = "\n".join(f"- {c['description']}: {c['evidence']}" for c in contradictions)
449
+
450
+ base_prompt = SYNTHESIS_PROMPT.format(
451
+ query=query, thesis=thesis, antithesis=antithesis, contradictions=contradictions_str
452
+ )
453
+
454
+ if search_context:
455
+ base_prompt += f"""
456
+
457
+ ADDITIONAL CONTEXT FROM SEARCH:
458
+ {chr(10).join(f"- {context}" for context in search_context)}
459
+
460
+ Your synthesis should integrate insights from this real-world information."""
461
+
462
+ return base_prompt
463
+
464
+
465
+ async def run_benchmark(
466
+ prompts: PromptSource,
467
+ *,
468
+ output_file: Optional[Union[Path, str, PathLike[str]]] = None,
469
+ backend: Optional[LLMBackend] = None,
470
+ model: Optional[str] = None,
471
+ max_tokens_per_phase: Optional[int] = None,
472
+ debug: bool = False,
473
+ use_cache: Optional[bool] = None,
474
+ cache_ttl_seconds: Optional[int] = None,
475
+ validate: Optional[bool] = None,
476
+ stream_callback: Optional[Callable[[str, str], Any]] = None,
477
+ progress_callback: Optional[Callable[[str, dict], Any]] = None,
478
+ personas: Optional[Union[List[Persona], str]] = None,
479
+ iterations: int = 1,
480
+ use_search: bool = False,
481
+ ) -> List[HegelionResult]:
482
+ """
483
+ Run Hegelion on multiple prompts for benchmarking.
484
+
485
+ Args:
486
+ prompts: Iterable of prompt strings/objects or a path to a JSONL file.
487
+ output_file: Optional path that receives JSONL output (one result per line).
488
+ backend: Optional LLM backend override.
489
+ model: Optional model override.
490
+ max_tokens_per_phase: Optional override for phase token limits.
491
+ debug: Whether to include debug information in each result.
492
+
493
+ Returns:
494
+ List[HegelionResult]: Results for all prompts
495
+ """
496
+ path_like = (str, Path, PathLike)
497
+ if isinstance(prompts, path_like):
498
+ prompt_list = _load_prompts_from_path(Path(prompts))
499
+ else:
500
+ prompt_list = _coerce_prompts(prompts)
501
+
502
+ if not prompt_list:
503
+ return []
504
+
505
+ settings = get_engine_settings()
506
+ resolved_tokens = max_tokens_per_phase or settings.max_tokens_per_phase
507
+
508
+ if backend is not None:
509
+ resolved_backend = backend
510
+ resolved_model = model or settings.model
511
+ elif model is not None:
512
+ resolved_backend = resolve_backend_for_model(model)
513
+ resolved_model = model
514
+ else:
515
+ resolved_backend = get_backend_from_env()
516
+ resolved_model = settings.model
517
+
518
+ # Run all prompts with the same backend instance to maximize reuse and cache hits
519
+ results = []
520
+ for prompt in prompt_list:
521
+ result = await run_dialectic(
522
+ prompt,
523
+ debug=debug,
524
+ backend=resolved_backend,
525
+ model=resolved_model,
526
+ max_tokens_per_phase=resolved_tokens,
527
+ use_cache=use_cache,
528
+ cache_ttl_seconds=cache_ttl_seconds,
529
+ validate=validate,
530
+ stream_callback=stream_callback,
531
+ progress_callback=progress_callback,
532
+ personas=personas,
533
+ iterations=iterations,
534
+ use_search=use_search,
535
+ )
536
+ results.append(result)
537
+
538
+ # Write to output file if specified
539
+ if output_file:
540
+ output_path = Path(output_file)
541
+ with output_path.open("w", encoding="utf-8") as handle:
542
+ for result in results:
543
+ json.dump(result.to_dict(), handle, ensure_ascii=False)
544
+ handle.write("\n")
545
+
546
+ return results
547
+
548
+
549
+ def run_dialectic_sync(*args, **kwargs) -> HegelionResult:
550
+ """Synchronous wrapper for run_dialectic."""
551
+ return asyncio.run(run_dialectic(*args, **kwargs))
552
+
553
+
554
+ def run_benchmark_sync(*args, **kwargs) -> List[HegelionResult]:
555
+ """Synchronous wrapper for run_benchmark."""
556
+ return asyncio.run(run_benchmark(*args, **kwargs))
557
+
558
+
559
+ async def dialectic(
560
+ query: str,
561
+ *,
562
+ model: Optional[str] = None,
563
+ backend: Optional[LLMBackend] = None,
564
+ max_tokens_per_phase: Optional[int] = None,
565
+ debug: bool = False,
566
+ use_cache: Optional[bool] = None,
567
+ cache_ttl_seconds: Optional[int] = None,
568
+ validate: Optional[bool] = None,
569
+ stream_callback: Optional[Callable[[str, str], Any]] = None,
570
+ progress_callback: Optional[Callable[[str, dict], Any]] = None,
571
+ personas: Optional[Union[List[Persona], str]] = None,
572
+ iterations: int = 1,
573
+ use_search: bool = False,
574
+ ) -> HegelionResult:
575
+ """Universal entrypoint for running a single dialectic query.
576
+
577
+ This is a higher-level convenience wrapper around ``run_dialectic`` that
578
+ adds provider auto-detection based on the ``model`` string.
579
+ """
580
+ return await run_dialectic(
581
+ query,
582
+ debug=debug,
583
+ backend=backend,
584
+ model=model,
585
+ max_tokens_per_phase=max_tokens_per_phase,
586
+ use_cache=use_cache,
587
+ cache_ttl_seconds=cache_ttl_seconds,
588
+ validate=validate,
589
+ stream_callback=stream_callback,
590
+ progress_callback=progress_callback,
591
+ personas=personas,
592
+ iterations=iterations,
593
+ use_search=use_search,
594
+ )
595
+
596
+
597
+ async def quickstart(
598
+ query: str,
599
+ model: Optional[str] = None,
600
+ debug: bool = False,
601
+ use_cache: Optional[bool] = None,
602
+ cache_ttl_seconds: Optional[int] = None,
603
+ validate: Optional[bool] = None,
604
+ stream_callback: Optional[Callable[[str, str], Any]] = None,
605
+ progress_callback: Optional[Callable[[str, dict], Any]] = None,
606
+ personas: Optional[Union[List[Persona], str]] = None,
607
+ iterations: int = 1,
608
+ use_search: bool = False,
609
+ ) -> HegelionResult:
610
+ """One-call helper for the common case.
611
+
612
+ - If ``model`` is provided, it will be used with automatic backend detection.
613
+ - Otherwise, the engine falls back to environment configuration.
614
+ """
615
+ return await dialectic(
616
+ query,
617
+ model=model,
618
+ debug=debug,
619
+ use_cache=use_cache,
620
+ cache_ttl_seconds=cache_ttl_seconds,
621
+ validate=validate,
622
+ stream_callback=stream_callback,
623
+ progress_callback=progress_callback,
624
+ personas=personas,
625
+ iterations=iterations,
626
+ use_search=use_search,
627
+ )
628
+
629
+
630
+ def dialectic_sync(*args, **kwargs) -> HegelionResult:
631
+ """Synchronous wrapper for dialectic."""
632
+ return asyncio.run(dialectic(*args, **kwargs))
633
+
634
+
635
+ def quickstart_sync(*args, **kwargs) -> HegelionResult:
636
+ """Synchronous wrapper for quickstart."""
637
+ return asyncio.run(quickstart(*args, **kwargs))
638
+
639
+
640
+ __all__ = [
641
+ "run_dialectic",
642
+ "run_benchmark",
643
+ "run_dialectic_sync",
644
+ "run_benchmark_sync",
645
+ "dialectic",
646
+ "quickstart",
647
+ "dialectic_sync",
648
+ "quickstart_sync",
649
+ ]