hegelion 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. hegelion/__init__.py +45 -0
  2. hegelion/core/__init__.py +29 -0
  3. hegelion/core/agent.py +166 -0
  4. hegelion/core/autocoding_state.py +293 -0
  5. hegelion/core/backends.py +442 -0
  6. hegelion/core/cache.py +92 -0
  7. hegelion/core/config.py +276 -0
  8. hegelion/core/core.py +649 -0
  9. hegelion/core/engine.py +865 -0
  10. hegelion/core/logging_utils.py +67 -0
  11. hegelion/core/models.py +293 -0
  12. hegelion/core/parsing.py +271 -0
  13. hegelion/core/personas.py +81 -0
  14. hegelion/core/prompt_autocoding.py +353 -0
  15. hegelion/core/prompt_dialectic.py +414 -0
  16. hegelion/core/prompts.py +127 -0
  17. hegelion/core/schema.py +67 -0
  18. hegelion/core/validation.py +68 -0
  19. hegelion/council.py +254 -0
  20. hegelion/examples_data/__init__.py +6 -0
  21. hegelion/examples_data/glm4_6_examples.jsonl +2 -0
  22. hegelion/judge.py +230 -0
  23. hegelion/mcp/__init__.py +3 -0
  24. hegelion/mcp/server.py +918 -0
  25. hegelion/scripts/hegelion_agent_cli.py +90 -0
  26. hegelion/scripts/hegelion_bench.py +117 -0
  27. hegelion/scripts/hegelion_cli.py +497 -0
  28. hegelion/scripts/hegelion_dataset.py +99 -0
  29. hegelion/scripts/hegelion_eval.py +137 -0
  30. hegelion/scripts/mcp_setup.py +150 -0
  31. hegelion/search_providers.py +151 -0
  32. hegelion/training/__init__.py +7 -0
  33. hegelion/training/datasets.py +123 -0
  34. hegelion/training/generator.py +232 -0
  35. hegelion/training/mlx_scu_trainer.py +379 -0
  36. hegelion/training/mlx_trainer.py +181 -0
  37. hegelion/training/unsloth_trainer.py +136 -0
  38. hegelion-0.4.0.dist-info/METADATA +295 -0
  39. hegelion-0.4.0.dist-info/RECORD +43 -0
  40. hegelion-0.4.0.dist-info/WHEEL +5 -0
  41. hegelion-0.4.0.dist-info/entry_points.txt +8 -0
  42. hegelion-0.4.0.dist-info/licenses/LICENSE +21 -0
  43. hegelion-0.4.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,497 @@
1
+ #!/usr/bin/env python
2
+ """CLI for Hegelion dialectical reasoning."""
3
+
4
+ from __future__ import annotations
5
+
6
+ import argparse
7
+ import asyncio
8
+ import json
9
+ import sys
10
+ from pathlib import Path
11
+ from typing import List, Optional, Sequence
12
+
13
+ from importlib.resources import files
14
+
15
+ try:
16
+ from rich.console import Console
17
+ from rich.markdown import Markdown
18
+ from rich.panel import Panel
19
+ from rich.progress import Progress, SpinnerColumn, TextColumn
20
+ from rich.theme import Theme
21
+
22
+ custom_theme = Theme({"info": "dim cyan", "warning": "magenta", "danger": "bold red"})
23
+ console = Console(theme=custom_theme)
24
+ except ImportError:
25
+ console = None
26
+
27
+ if __package__ is None or __package__ == "": # pragma: no cover - direct execution fallback
28
+ sys.path.insert(0, str(Path(__file__).parent.parent))
29
+
30
+ from hegelion.core.config import ConfigurationError, get_config, set_config_value
31
+ from hegelion import run_dialectic
32
+ from hegelion import HegelionResult
33
+ from hegelion.scripts.mcp_setup import print_setup_instructions
34
+
35
+
36
+ def build_parser() -> argparse.ArgumentParser:
37
+ parser = argparse.ArgumentParser(
38
+ description="Run Hegelion dialectical reasoning on a single query."
39
+ )
40
+ parser.add_argument(
41
+ "query",
42
+ nargs="?",
43
+ help="Question or topic to analyze dialectically (optional when using --interactive or --demo).",
44
+ )
45
+ parser.add_argument(
46
+ "--debug",
47
+ action="store_true",
48
+ help="Include debug information and internal diagnostics",
49
+ )
50
+ parser.add_argument(
51
+ "--output",
52
+ type=Path,
53
+ default=None,
54
+ help="Optional path to write the structured result",
55
+ )
56
+ parser.add_argument(
57
+ "--format",
58
+ choices=["json", "summary", "rich"],
59
+ default="json",
60
+ help="Output format (default: json). Use 'rich' for formatted terminal output.",
61
+ )
62
+ parser.add_argument(
63
+ "--demo",
64
+ action="store_true",
65
+ help="Show a cached example trace without calling any live backend",
66
+ )
67
+ parser.add_argument(
68
+ "--interactive",
69
+ action="store_true",
70
+ help="Run in interactive mode for an exploratory session",
71
+ )
72
+ parser.add_argument(
73
+ "--stream",
74
+ action="store_true",
75
+ help="Stream output in real-time as each phase is generated",
76
+ )
77
+ return parser
78
+
79
+
80
+ def parse_args(argv: Optional[Sequence[str]] = None) -> argparse.Namespace:
81
+ return build_parser().parse_args(argv)
82
+
83
+
84
+ def _load_demo_examples() -> list[dict]:
85
+ """Load bundled demo examples from the installed package."""
86
+ try:
87
+ data = (
88
+ files("hegelion.examples_data")
89
+ .joinpath("glm4_6_examples.jsonl")
90
+ .read_text(encoding="utf-8")
91
+ )
92
+ return [json.loads(line) for line in data.splitlines() if line.strip()]
93
+ except Exception:
94
+ return []
95
+
96
+
97
+ def print_cached_example(format_type: str = "json") -> None:
98
+ """Print a cached example result for demo mode."""
99
+ examples = _load_demo_examples()
100
+ if not examples:
101
+ print("Demo data is not available in this installation.", file=sys.stderr)
102
+ return
103
+
104
+ example_data = examples[0]
105
+
106
+ if format_type == "summary":
107
+ result = HegelionResult(**example_data)
108
+ print(format_summary(result))
109
+ elif format_type == "rich" and console:
110
+ result = HegelionResult(**example_data)
111
+ print_rich_result(result)
112
+ else:
113
+ print(json.dumps(example_data, indent=2, ensure_ascii=False))
114
+
115
+
116
+ def format_summary(result) -> str:
117
+ """Format result as a human-readable summary."""
118
+ metadata = result.metadata or {}
119
+
120
+ def _fmt_time(key: str) -> str:
121
+ value = metadata.get(key)
122
+ if isinstance(value, (int, float)):
123
+ return f"{value:.0f}ms"
124
+ return "n/a"
125
+
126
+ lines = [
127
+ f"Query: {result.query}",
128
+ f"Mode: {result.mode}",
129
+ f"Contradictions Found: {len(result.contradictions)}",
130
+ f"Research Proposals: {len(result.research_proposals)}",
131
+ ]
132
+
133
+ backend_provider = metadata.get("backend_provider")
134
+ backend_model = metadata.get("backend_model")
135
+ if backend_provider or backend_model:
136
+ backend_parts = [
137
+ str(part).strip()
138
+ for part in (backend_provider, backend_model)
139
+ if part is not None and str(part).strip()
140
+ ]
141
+ if backend_parts:
142
+ lines.append("Backend: " + ", ".join(backend_parts))
143
+
144
+ lines.extend(
145
+ [
146
+ "",
147
+ "=== THESIS ===",
148
+ result.thesis,
149
+ "",
150
+ "=== ANTITHESIS ===",
151
+ result.antithesis,
152
+ "",
153
+ "=== SYNTHESIS ===",
154
+ result.synthesis,
155
+ "",
156
+ ]
157
+ )
158
+
159
+ if result.contradictions:
160
+ lines.append("=== CONTRADICTIONS ===")
161
+ for i, contradiction in enumerate(result.contradictions, 1):
162
+ lines.append(f"{i}. {contradiction.get('description', 'No description')}")
163
+ evidence = contradiction.get("evidence")
164
+ if evidence:
165
+ lines.append(f" Evidence: {evidence}")
166
+ lines.append("")
167
+
168
+ if result.research_proposals:
169
+ lines.append("=== RESEARCH PROPOSALS ===")
170
+ for i, proposal in enumerate(result.research_proposals, 1):
171
+ lines.append(f"{i}. {proposal.get('description', 'No description')}")
172
+ prediction = proposal.get("testable_prediction")
173
+ if prediction:
174
+ lines.append(f" Prediction: {prediction}")
175
+ lines.append("")
176
+
177
+ lines.append("=== TIMING ===")
178
+ lines.append(f"Thesis: {_fmt_time('thesis_time_ms')}")
179
+ lines.append(f"Antithesis: {_fmt_time('antithesis_time_ms')}")
180
+ lines.append(f"Synthesis: {_fmt_time('synthesis_time_ms')}")
181
+ lines.append(f"Total: {_fmt_time('total_time_ms')}")
182
+
183
+ debug_info = metadata.get("debug")
184
+ if isinstance(debug_info, dict) and debug_info:
185
+ lines.append("")
186
+ lines.append("=== DEBUG METRICS ===")
187
+ for key, value in debug_info.items():
188
+ lines.append(f"{key}: {value}")
189
+
190
+ return "\n".join(lines)
191
+
192
+
193
+ def print_rich_result(result: HegelionResult):
194
+ """Print the result using Rich panels and markdown."""
195
+ if not console:
196
+ print(format_summary(result))
197
+ return
198
+
199
+ console.print(f"\n[bold]Query:[/bold] {result.query}")
200
+
201
+ console.print(Panel(Markdown(result.thesis), title="[bold cyan]Thesis[/]", border_style="cyan"))
202
+ console.print(
203
+ Panel(
204
+ Markdown(result.antithesis), title="[bold magenta]Antithesis[/]", border_style="magenta"
205
+ )
206
+ )
207
+ console.print(
208
+ Panel(Markdown(result.synthesis), title="[bold green]Synthesis[/]", border_style="green")
209
+ )
210
+
211
+ if result.contradictions:
212
+ console.print("\n[bold red]Contradictions Identified:[/]")
213
+ for i, c in enumerate(result.contradictions, 1):
214
+ console.print(f"{i}. {c.get('description', 'No description')}", style="red")
215
+
216
+ if result.research_proposals:
217
+ console.print("\n[bold blue]Research Proposals:[/]")
218
+ for i, p in enumerate(result.research_proposals, 1):
219
+ console.print(f"{i}. {p.get('description', 'No description')}", style="blue")
220
+ if p.get("testable_prediction"):
221
+ console.print(f" [dim]Prediction: {p['testable_prediction']}[/]")
222
+
223
+
224
+ def parse_command_string(line: str) -> tuple[str, list[str]]:
225
+ """Parse an interactive command line into command and arguments."""
226
+ parts = line.strip().split()
227
+ if not parts:
228
+ return "", []
229
+ return parts[0].lower(), parts[1:]
230
+
231
+
232
+ async def async_input(prompt: str) -> str:
233
+ """Non-blocking input for async context."""
234
+ loop = asyncio.get_event_loop()
235
+ return await loop.run_in_executor(None, lambda: input(prompt))
236
+
237
+
238
+ async def interactive_session() -> None:
239
+ """Run the Hegelion CLI in an interactive REPL session."""
240
+ if console:
241
+ console.print("[bold green]Welcome to the Hegelion Interactive Dialectic Explorer.[/]")
242
+ console.print("Type a query to start, or 'help' for a list of commands.")
243
+ else:
244
+ print("Welcome to the Hegelion Interactive Dialectic Explorer.")
245
+ print("Type a query to start, or 'help' for a list of commands.")
246
+
247
+ history: List[str] = []
248
+ latest_result: Optional[HegelionResult] = None
249
+ debug_mode = get_config().debug
250
+
251
+ while True:
252
+ try:
253
+ query = (await async_input("Hegelion> ")).strip()
254
+ if not query:
255
+ continue
256
+
257
+ history.append(query)
258
+ parts = query.split()
259
+ command = parts[0].lower()
260
+
261
+ if command in ("exit", "quit"):
262
+ break
263
+ elif command == "help":
264
+ print_interactive_help()
265
+ elif command == "history":
266
+ for i, item in enumerate(history, 1):
267
+ print(f"{i}: {item}")
268
+ elif command == "show":
269
+ if not latest_result:
270
+ print("Run a query first.")
271
+ continue
272
+ if len(parts) < 2:
273
+ print(
274
+ "Usage: show <thesis|antithesis|synthesis|contradictions|research|metadata|summary|rich>"
275
+ )
276
+ continue
277
+ show_what = parts[1].lower()
278
+ if show_what == "thesis":
279
+ print(latest_result.thesis)
280
+ elif show_what == "antithesis":
281
+ print(latest_result.antithesis)
282
+ elif show_what == "synthesis":
283
+ print(latest_result.synthesis)
284
+ elif show_what in ("contradictions", "cons"):
285
+ print(json.dumps(latest_result.contradictions, indent=2, ensure_ascii=False))
286
+ elif show_what in ("research", "proposals"):
287
+ print(
288
+ json.dumps(
289
+ latest_result.research_proposals,
290
+ indent=2,
291
+ ensure_ascii=False,
292
+ )
293
+ )
294
+ elif show_what == "metadata":
295
+ print(json.dumps(latest_result.metadata, indent=2, ensure_ascii=False))
296
+ elif show_what == "summary":
297
+ print(format_summary(latest_result))
298
+ elif show_what == "rich" and console:
299
+ print_rich_result(latest_result)
300
+ else:
301
+ print(f"Unknown section: {show_what}")
302
+ elif command == "set":
303
+ if len(parts) < 3:
304
+ print("Usage: set <model|provider|debug> <value>")
305
+ continue
306
+ setting, value = parts[1].lower(), " ".join(parts[2:])
307
+ if setting == "model":
308
+ set_config_value("model", value)
309
+ print(f"Model set to: {value}")
310
+ elif setting == "provider":
311
+ set_config_value("provider", value)
312
+ print(f"Provider set to: {value}")
313
+ elif setting == "debug":
314
+ debug_mode = value.lower() in ("true", "on", "1")
315
+ set_config_value("debug", debug_mode)
316
+ print(f"Debug mode set to: {debug_mode}")
317
+ else:
318
+ print(f"Unknown setting: {setting}")
319
+ else:
320
+ # Treat as a new query - use streaming by default in interactive mode
321
+ stream_cb, progress_cb = await create_stream_callbacks(console)
322
+ latest_result = await run_dialectic(
323
+ query=query,
324
+ debug=debug_mode,
325
+ stream_callback=stream_cb,
326
+ progress_callback=progress_cb,
327
+ )
328
+ # Show a brief summary after streaming
329
+ if console:
330
+ console.print(
331
+ f"\n[dim]Query completed in {latest_result.metadata.get('total_time_ms', 0):.0f}ms[/]"
332
+ )
333
+ console.print("Type 'show <section>' to examine parts of the result.")
334
+ else:
335
+ print(
336
+ f"\nQuery completed in {latest_result.metadata.get('total_time_ms', 0):.0f}ms"
337
+ )
338
+ print("Type 'show <section>' to examine parts of the result.")
339
+
340
+ except (EOFError, KeyboardInterrupt):
341
+ print("\nExiting.")
342
+ break
343
+ except Exception as exc:
344
+ print(f"An error occurred: {exc}", file=sys.stderr)
345
+
346
+
347
+ def print_interactive_help() -> None:
348
+ """Print the help message for the interactive mode."""
349
+ print("\nHegelion Interactive Commands:")
350
+ print(" <query> - Run a new dialectical query.")
351
+ print(" show <section> - Show a section of the last result.")
352
+ print(
353
+ " Sections: thesis, antithesis, synthesis, contradictions, research, metadata, summary, rich"
354
+ )
355
+ print(" set <setting> <value> - Change a setting for the session.")
356
+ print(" Settings: model, provider, debug (on/off)")
357
+ print(" history - Show a history of queries from this session.")
358
+ print(" help - Show this help message.")
359
+ print(" exit, quit - Exit the interactive session.\n")
360
+
361
+
362
+ async def create_stream_callbacks(console):
363
+ """Create callbacks for streaming output to terminal."""
364
+ current_phase = {"name": None}
365
+
366
+ async def on_progress(event: str, payload: dict):
367
+ """Handle phase lifecycle events."""
368
+ if event == "phase_started":
369
+ phase = payload.get("phase", "unknown")
370
+ current_phase["name"] = phase
371
+ # Handle council mode persona names
372
+ if ":" in phase:
373
+ phase_name, persona = phase.split(":", 1)
374
+ display_phase = f"{phase_name.upper()}: {persona.upper()}"
375
+ else:
376
+ display_phase = phase.upper()
377
+
378
+ # Print phase header
379
+ color = {"thesis": "cyan", "antithesis": "magenta", "synthesis": "green"}.get(
380
+ phase.split(":")[0], "white"
381
+ )
382
+ if console:
383
+ console.print(f"\n[bold {color}]━━━ {display_phase} ━━━[/]")
384
+ else:
385
+ print(f"\n--- {display_phase} ---")
386
+ elif event == "phase_completed":
387
+ phase = payload.get("phase")
388
+ time_ms = payload.get("time_ms", 0)
389
+ if console:
390
+ console.print(f"\n[dim]({phase.split(':')[0]} completed in {time_ms:.0f}ms)[/]")
391
+ else:
392
+ print(f"({phase.split(':')[0]} completed in {time_ms:.0f}ms)")
393
+
394
+ async def on_stream(phase: str, chunk: str):
395
+ """Handle streaming text chunks."""
396
+ # Print chunk without newline, allowing text to flow
397
+ if console:
398
+ console.print(chunk, end="", highlight=False)
399
+ else:
400
+ print(chunk, end="", flush=True)
401
+
402
+ return on_stream, on_progress
403
+
404
+
405
+ async def _run(args: argparse.Namespace) -> None:
406
+ if args.interactive:
407
+ await interactive_session()
408
+ return
409
+
410
+ if args.query == "setup-mcp":
411
+ print_setup_instructions()
412
+ print("\nTip: use the dedicated 'hegelion-setup-mcp' command for streamlined setup.")
413
+ return
414
+
415
+ if args.demo:
416
+ print_cached_example(format_type=args.format)
417
+ return
418
+
419
+ if not args.query:
420
+ raise SystemExit("Error: QUERY is required unless --interactive or --demo is specified.")
421
+
422
+ try:
423
+ if args.stream:
424
+ # Streaming mode - show real-time output
425
+ stream_cb, progress_cb = await create_stream_callbacks(console)
426
+ result = await run_dialectic(
427
+ query=args.query,
428
+ debug=args.debug,
429
+ stream_callback=stream_cb,
430
+ progress_callback=progress_cb,
431
+ )
432
+ # Show summary after streaming completes
433
+ if console:
434
+ console.print(
435
+ f"\n[dim]Total time: {result.metadata.get('total_time_ms', 0):.0f}ms[/]"
436
+ )
437
+ else:
438
+ print(f"\nTotal time: {result.metadata.get('total_time_ms', 0):.0f}ms")
439
+ elif console and args.format == "rich":
440
+ # Non-streaming rich mode with spinner
441
+ with Progress(
442
+ SpinnerColumn(),
443
+ TextColumn("[progress.description]{task.description}"),
444
+ console=console,
445
+ transient=True,
446
+ ) as progress:
447
+ progress.add_task(description="Synthesizing Truth...", total=None)
448
+ result = await run_dialectic(query=args.query, debug=args.debug)
449
+ else:
450
+ # Non-streaming plain mode
451
+ result = await run_dialectic(query=args.query, debug=args.debug)
452
+
453
+ except ConfigurationError as exc:
454
+ message = str(exc)
455
+ guidance = (
456
+ "No LLM backend is configured.\n"
457
+ "Run `hegelion --demo` to see a cached example without API keys,\n"
458
+ "or set ANTHROPIC_API_KEY / OPENAI_API_KEY / GOOGLE_API_KEY and HEGELION_PROVIDER."
459
+ )
460
+ # Always print errors to stderr for proper Unix behavior
461
+ print(f"Error: {message}", file=sys.stderr)
462
+ print(guidance, file=sys.stderr)
463
+ raise SystemExit(1)
464
+
465
+ # Prepare file output data (always JSON for file writes)
466
+ output_data = json.dumps(result.to_dict(), indent=2, ensure_ascii=False)
467
+
468
+ # Display based on format
469
+ if args.format == "summary":
470
+ print(format_summary(result))
471
+ elif args.format == "rich" and console:
472
+ print_rich_result(result)
473
+ else:
474
+ print(output_data)
475
+
476
+ # Write to file if specified
477
+ if args.output:
478
+ if args.output.suffix == ".jsonl":
479
+ with args.output.open("a", encoding="utf-8") as handle:
480
+ json.dump(result.to_dict(), handle, ensure_ascii=False)
481
+ handle.write("\n")
482
+ else:
483
+ args.output.write_text(output_data, encoding="utf-8")
484
+ print(f"Result saved to {args.output}", file=sys.stderr)
485
+
486
+
487
+ def main(argv: Optional[Sequence[str]] = None) -> None:
488
+ args = parse_args(argv)
489
+ try:
490
+ asyncio.run(_run(args))
491
+ except Exception as exc:
492
+ print(f"Error: {exc}", file=sys.stderr)
493
+ sys.exit(1)
494
+
495
+
496
+ if __name__ == "__main__": # pragma: no cover - CLI entrypoint
497
+ main()
@@ -0,0 +1,99 @@
1
+ """CLI tool for converting Hegelion logs into training datasets (DPO/Instruction)."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import argparse
6
+ import json
7
+ import sys
8
+ from pathlib import Path
9
+ from typing import List, Optional, Sequence
10
+
11
+ from hegelion import HegelionResult
12
+ from hegelion.training.datasets import export_training_data
13
+
14
+
15
+ def build_parser() -> argparse.ArgumentParser:
16
+ parser = argparse.ArgumentParser(
17
+ description="Convert Hegelion agent logs to training datasets."
18
+ )
19
+ parser.add_argument(
20
+ "input_file", type=Path, help="Path to input JSONL log file (from --log-file)"
21
+ )
22
+ parser.add_argument(
23
+ "output_file", type=Path, help="Path to save the converted dataset (.jsonl or .json)"
24
+ )
25
+ parser.add_argument(
26
+ "--format",
27
+ choices=["dpo", "instruction"],
28
+ default="dpo",
29
+ help="Output format: 'dpo' (preference pairs) or 'instruction' (Alpaca-style)",
30
+ )
31
+ parser.add_argument(
32
+ "--rejected",
33
+ choices=["thesis", "antithesis", "both"],
34
+ default="thesis",
35
+ help="Which part of the dialectic to treat as the rejected response (DPO only)",
36
+ )
37
+ return parser
38
+
39
+
40
+ def parse_args(argv: Optional[Sequence[str]] = None) -> argparse.Namespace:
41
+ return build_parser().parse_args(argv)
42
+
43
+
44
+ def load_results(input_file: Path) -> List[HegelionResult]:
45
+ """Load HegelionResults from a JSONL log file."""
46
+ results = []
47
+ if not input_file.exists():
48
+ print(f"Error: Input file {input_file} not found.", file=sys.stderr)
49
+ sys.exit(1)
50
+
51
+ try:
52
+ with open(input_file, "r", encoding="utf-8") as f:
53
+ for i, line in enumerate(f, 1):
54
+ line = line.strip()
55
+ if not line:
56
+ continue
57
+ try:
58
+ data = json.loads(line)
59
+ # Handle both direct result dumps and wrapped format
60
+ if "result" in data:
61
+ data = data["result"]
62
+ results.append(HegelionResult.from_dict(data))
63
+ except Exception as e:
64
+ print(f"Warning: Skipping invalid JSON on line {i}: {e}", file=sys.stderr)
65
+ except Exception as e:
66
+ print(f"Error reading input file: {e}", file=sys.stderr)
67
+ sys.exit(1)
68
+
69
+ return results
70
+
71
+
72
+ def main(argv: Optional[Sequence[str]] = None) -> int:
73
+ args = parse_args(argv)
74
+
75
+ print(f"Loading results from {args.input_file}...")
76
+ results = load_results(args.input_file)
77
+
78
+ if not results:
79
+ print("No valid results found in input file.", file=sys.stderr)
80
+ return 1
81
+
82
+ print(f"Found {len(results)} results. Converting to {args.format} format...")
83
+
84
+ try:
85
+ export_training_data(
86
+ results,
87
+ args.output_file,
88
+ format=args.format,
89
+ rejected_source=args.rejected,
90
+ )
91
+ print(f"Successfully created dataset at {args.output_file}")
92
+ return 0
93
+ except Exception as e:
94
+ print(f"Error exporting dataset: {e}", file=sys.stderr)
95
+ return 1
96
+
97
+
98
+ if __name__ == "__main__":
99
+ raise SystemExit(main())