crca 1.4.0__py3-none-any.whl → 1.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- CRCA.py +172 -7
- MODEL_CARD.md +53 -0
- PKG-INFO +8 -2
- RELEASE_NOTES.md +17 -0
- STABILITY.md +19 -0
- architecture/hybrid/consistency_engine.py +362 -0
- architecture/hybrid/conversation_manager.py +421 -0
- architecture/hybrid/explanation_generator.py +452 -0
- architecture/hybrid/few_shot_learner.py +533 -0
- architecture/hybrid/graph_compressor.py +286 -0
- architecture/hybrid/hybrid_agent.py +4398 -0
- architecture/hybrid/language_compiler.py +623 -0
- architecture/hybrid/main,py +0 -0
- architecture/hybrid/reasoning_tracker.py +322 -0
- architecture/hybrid/self_verifier.py +524 -0
- architecture/hybrid/task_decomposer.py +567 -0
- architecture/hybrid/text_corrector.py +341 -0
- benchmark_results/crca_core_benchmarks.json +178 -0
- branches/crca_sd/crca_sd_realtime.py +6 -2
- branches/general_agent/__init__.py +102 -0
- branches/general_agent/general_agent.py +1400 -0
- branches/general_agent/personality.py +169 -0
- branches/general_agent/utils/__init__.py +19 -0
- branches/general_agent/utils/prompt_builder.py +170 -0
- {crca-1.4.0.dist-info → crca-1.5.0.dist-info}/METADATA +8 -2
- {crca-1.4.0.dist-info → crca-1.5.0.dist-info}/RECORD +303 -20
- crca_core/__init__.py +35 -0
- crca_core/benchmarks/__init__.py +14 -0
- crca_core/benchmarks/synthetic_scm.py +103 -0
- crca_core/core/__init__.py +23 -0
- crca_core/core/api.py +120 -0
- crca_core/core/estimate.py +208 -0
- crca_core/core/godclass.py +72 -0
- crca_core/core/intervention_design.py +174 -0
- crca_core/core/lifecycle.py +48 -0
- crca_core/discovery/__init__.py +9 -0
- crca_core/discovery/tabular.py +193 -0
- crca_core/identify/__init__.py +171 -0
- crca_core/identify/backdoor.py +39 -0
- crca_core/identify/frontdoor.py +48 -0
- crca_core/identify/graph.py +106 -0
- crca_core/identify/id_algorithm.py +43 -0
- crca_core/identify/iv.py +48 -0
- crca_core/models/__init__.py +67 -0
- crca_core/models/provenance.py +56 -0
- crca_core/models/refusal.py +39 -0
- crca_core/models/result.py +83 -0
- crca_core/models/spec.py +151 -0
- crca_core/models/validation.py +68 -0
- crca_core/scm/__init__.py +9 -0
- crca_core/scm/linear_gaussian.py +198 -0
- crca_core/timeseries/__init__.py +6 -0
- crca_core/timeseries/pcmci.py +181 -0
- crca_llm/__init__.py +12 -0
- crca_llm/client.py +85 -0
- crca_llm/coauthor.py +118 -0
- crca_llm/orchestrator.py +289 -0
- crca_llm/types.py +21 -0
- crca_reasoning/__init__.py +16 -0
- crca_reasoning/critique.py +54 -0
- crca_reasoning/godclass.py +206 -0
- crca_reasoning/memory.py +24 -0
- crca_reasoning/rationale.py +10 -0
- crca_reasoning/react_controller.py +81 -0
- crca_reasoning/tool_router.py +97 -0
- crca_reasoning/types.py +40 -0
- crca_sd/__init__.py +15 -0
- crca_sd/crca_sd_core.py +2 -0
- crca_sd/crca_sd_governance.py +2 -0
- crca_sd/crca_sd_mpc.py +2 -0
- crca_sd/crca_sd_realtime.py +2 -0
- crca_sd/crca_sd_tui.py +2 -0
- cuda-keyring_1.1-1_all.deb +0 -0
- cuda-keyring_1.1-1_all.deb.1 +0 -0
- docs/IMAGE_ANNOTATION_USAGE.md +539 -0
- docs/INSTALL_DEEPSPEED.md +125 -0
- docs/api/branches/crca-cg.md +19 -0
- docs/api/branches/crca-q.md +27 -0
- docs/api/branches/crca-sd.md +37 -0
- docs/api/branches/general-agent.md +24 -0
- docs/api/branches/overview.md +19 -0
- docs/api/crca/agent-methods.md +62 -0
- docs/api/crca/operations.md +79 -0
- docs/api/crca/overview.md +32 -0
- docs/api/image-annotation/engine.md +52 -0
- docs/api/image-annotation/overview.md +17 -0
- docs/api/schemas/annotation.md +34 -0
- docs/api/schemas/core-schemas.md +82 -0
- docs/api/schemas/overview.md +32 -0
- docs/api/schemas/policy.md +30 -0
- docs/api/utils/conversation.md +22 -0
- docs/api/utils/graph-reasoner.md +32 -0
- docs/api/utils/overview.md +21 -0
- docs/api/utils/router.md +19 -0
- docs/api/utils/utilities.md +97 -0
- docs/architecture/causal-graphs.md +41 -0
- docs/architecture/data-flow.md +29 -0
- docs/architecture/design-principles.md +33 -0
- docs/architecture/hybrid-agent/components.md +38 -0
- docs/architecture/hybrid-agent/consistency.md +26 -0
- docs/architecture/hybrid-agent/overview.md +44 -0
- docs/architecture/hybrid-agent/reasoning.md +22 -0
- docs/architecture/llm-integration.md +26 -0
- docs/architecture/modular-structure.md +37 -0
- docs/architecture/overview.md +69 -0
- docs/architecture/policy-engine-arch.md +29 -0
- docs/branches/crca-cg/corposwarm.md +39 -0
- docs/branches/crca-cg/esg-scoring.md +30 -0
- docs/branches/crca-cg/multi-agent.md +35 -0
- docs/branches/crca-cg/overview.md +40 -0
- docs/branches/crca-q/alternative-data.md +55 -0
- docs/branches/crca-q/architecture.md +71 -0
- docs/branches/crca-q/backtesting.md +45 -0
- docs/branches/crca-q/causal-engine.md +33 -0
- docs/branches/crca-q/execution.md +39 -0
- docs/branches/crca-q/market-data.md +60 -0
- docs/branches/crca-q/overview.md +58 -0
- docs/branches/crca-q/philosophy.md +60 -0
- docs/branches/crca-q/portfolio-optimization.md +66 -0
- docs/branches/crca-q/risk-management.md +102 -0
- docs/branches/crca-q/setup.md +65 -0
- docs/branches/crca-q/signal-generation.md +61 -0
- docs/branches/crca-q/signal-validation.md +43 -0
- docs/branches/crca-sd/core.md +84 -0
- docs/branches/crca-sd/governance.md +53 -0
- docs/branches/crca-sd/mpc-solver.md +65 -0
- docs/branches/crca-sd/overview.md +59 -0
- docs/branches/crca-sd/realtime.md +28 -0
- docs/branches/crca-sd/tui.md +20 -0
- docs/branches/general-agent/overview.md +37 -0
- docs/branches/general-agent/personality.md +36 -0
- docs/branches/general-agent/prompt-builder.md +30 -0
- docs/changelog/index.md +79 -0
- docs/contributing/code-style.md +69 -0
- docs/contributing/documentation.md +43 -0
- docs/contributing/overview.md +29 -0
- docs/contributing/testing.md +29 -0
- docs/core/crcagent/async-operations.md +65 -0
- docs/core/crcagent/automatic-extraction.md +107 -0
- docs/core/crcagent/batch-prediction.md +80 -0
- docs/core/crcagent/bayesian-inference.md +60 -0
- docs/core/crcagent/causal-graph.md +92 -0
- docs/core/crcagent/counterfactuals.md +96 -0
- docs/core/crcagent/deterministic-simulation.md +78 -0
- docs/core/crcagent/dual-mode-operation.md +82 -0
- docs/core/crcagent/initialization.md +88 -0
- docs/core/crcagent/optimization.md +65 -0
- docs/core/crcagent/overview.md +63 -0
- docs/core/crcagent/time-series.md +57 -0
- docs/core/schemas/annotation.md +30 -0
- docs/core/schemas/core-schemas.md +82 -0
- docs/core/schemas/overview.md +30 -0
- docs/core/schemas/policy.md +41 -0
- docs/core/templates/base-agent.md +31 -0
- docs/core/templates/feature-mixins.md +31 -0
- docs/core/templates/overview.md +29 -0
- docs/core/templates/templates-guide.md +75 -0
- docs/core/tools/mcp-client.md +34 -0
- docs/core/tools/overview.md +24 -0
- docs/core/utils/conversation.md +27 -0
- docs/core/utils/graph-reasoner.md +29 -0
- docs/core/utils/overview.md +27 -0
- docs/core/utils/router.md +27 -0
- docs/core/utils/utilities.md +97 -0
- docs/css/custom.css +84 -0
- docs/examples/basic-usage.md +57 -0
- docs/examples/general-agent/general-agent-examples.md +50 -0
- docs/examples/hybrid-agent/hybrid-agent-examples.md +56 -0
- docs/examples/image-annotation/image-annotation-examples.md +54 -0
- docs/examples/integration/integration-examples.md +58 -0
- docs/examples/overview.md +37 -0
- docs/examples/trading/trading-examples.md +46 -0
- docs/features/causal-reasoning/advanced-topics.md +101 -0
- docs/features/causal-reasoning/counterfactuals.md +43 -0
- docs/features/causal-reasoning/do-calculus.md +50 -0
- docs/features/causal-reasoning/overview.md +47 -0
- docs/features/causal-reasoning/structural-models.md +52 -0
- docs/features/hybrid-agent/advanced-components.md +55 -0
- docs/features/hybrid-agent/core-components.md +64 -0
- docs/features/hybrid-agent/overview.md +34 -0
- docs/features/image-annotation/engine.md +82 -0
- docs/features/image-annotation/features.md +113 -0
- docs/features/image-annotation/integration.md +75 -0
- docs/features/image-annotation/overview.md +53 -0
- docs/features/image-annotation/quickstart.md +73 -0
- docs/features/policy-engine/doctrine-ledger.md +105 -0
- docs/features/policy-engine/monitoring.md +44 -0
- docs/features/policy-engine/mpc-control.md +89 -0
- docs/features/policy-engine/overview.md +46 -0
- docs/getting-started/configuration.md +225 -0
- docs/getting-started/first-agent.md +164 -0
- docs/getting-started/installation.md +144 -0
- docs/getting-started/quickstart.md +137 -0
- docs/index.md +118 -0
- docs/js/mathjax.js +13 -0
- docs/lrm/discovery_proof_notes.md +25 -0
- docs/lrm/finetune_full.md +83 -0
- docs/lrm/math_appendix.md +120 -0
- docs/lrm/overview.md +32 -0
- docs/mkdocs.yml +238 -0
- docs/stylesheets/extra.css +21 -0
- docs_generated/crca_core/CounterfactualResult.md +12 -0
- docs_generated/crca_core/DiscoveryHypothesisResult.md +13 -0
- docs_generated/crca_core/DraftSpec.md +13 -0
- docs_generated/crca_core/EstimateResult.md +13 -0
- docs_generated/crca_core/IdentificationResult.md +17 -0
- docs_generated/crca_core/InterventionDesignResult.md +12 -0
- docs_generated/crca_core/LockedSpec.md +15 -0
- docs_generated/crca_core/RefusalResult.md +12 -0
- docs_generated/crca_core/ValidationReport.md +9 -0
- docs_generated/crca_core/index.md +13 -0
- examples/general_agent_example.py +277 -0
- examples/general_agent_quickstart.py +202 -0
- examples/general_agent_simple.py +92 -0
- examples/hybrid_agent_auto_extraction.py +84 -0
- examples/hybrid_agent_dictionary_demo.py +104 -0
- examples/hybrid_agent_enhanced.py +179 -0
- examples/hybrid_agent_general_knowledge.py +107 -0
- examples/image_annotation_quickstart.py +328 -0
- examples/test_hybrid_fixes.py +77 -0
- image_annotation/__init__.py +27 -0
- image_annotation/annotation_engine.py +2593 -0
- install_cuda_wsl2.sh +59 -0
- install_deepspeed.sh +56 -0
- install_deepspeed_simple.sh +87 -0
- mkdocs.yml +252 -0
- ollama/Modelfile +8 -0
- prompts/__init__.py +2 -1
- prompts/default_crca.py +9 -1
- prompts/general_agent.py +227 -0
- prompts/image_annotation.py +56 -0
- pyproject.toml +17 -2
- requirements-docs.txt +10 -0
- requirements.txt +21 -2
- schemas/__init__.py +26 -1
- schemas/annotation.py +222 -0
- schemas/conversation.py +193 -0
- schemas/hybrid.py +211 -0
- schemas/reasoning.py +276 -0
- schemas_export/crca_core/CounterfactualResult.schema.json +108 -0
- schemas_export/crca_core/DiscoveryHypothesisResult.schema.json +113 -0
- schemas_export/crca_core/DraftSpec.schema.json +635 -0
- schemas_export/crca_core/EstimateResult.schema.json +113 -0
- schemas_export/crca_core/IdentificationResult.schema.json +145 -0
- schemas_export/crca_core/InterventionDesignResult.schema.json +111 -0
- schemas_export/crca_core/LockedSpec.schema.json +646 -0
- schemas_export/crca_core/RefusalResult.schema.json +90 -0
- schemas_export/crca_core/ValidationReport.schema.json +62 -0
- scripts/build_lrm_dataset.py +80 -0
- scripts/export_crca_core_schemas.py +54 -0
- scripts/export_hf_lrm.py +37 -0
- scripts/export_ollama_gguf.py +45 -0
- scripts/generate_changelog.py +157 -0
- scripts/generate_crca_core_docs_from_schemas.py +86 -0
- scripts/run_crca_core_benchmarks.py +163 -0
- scripts/run_full_finetune.py +198 -0
- scripts/run_lrm_eval.py +31 -0
- templates/graph_management.py +29 -0
- tests/conftest.py +9 -0
- tests/test_core.py +2 -3
- tests/test_crca_core_discovery_tabular.py +15 -0
- tests/test_crca_core_estimate_dowhy.py +36 -0
- tests/test_crca_core_identify.py +18 -0
- tests/test_crca_core_intervention_design.py +36 -0
- tests/test_crca_core_linear_gaussian_scm.py +69 -0
- tests/test_crca_core_spec.py +25 -0
- tests/test_crca_core_timeseries_pcmci.py +15 -0
- tests/test_crca_llm_coauthor.py +12 -0
- tests/test_crca_llm_orchestrator.py +80 -0
- tests/test_hybrid_agent_llm_enhanced.py +556 -0
- tests/test_image_annotation_demo.py +376 -0
- tests/test_image_annotation_operational.py +408 -0
- tests/test_image_annotation_unit.py +551 -0
- tests/test_training_moe.py +13 -0
- training/__init__.py +42 -0
- training/datasets.py +140 -0
- training/deepspeed_zero2_0_5b.json +22 -0
- training/deepspeed_zero2_1_5b.json +22 -0
- training/deepspeed_zero3_0_5b.json +28 -0
- training/deepspeed_zero3_14b.json +28 -0
- training/deepspeed_zero3_h100_3gpu.json +20 -0
- training/deepspeed_zero3_offload.json +28 -0
- training/eval.py +92 -0
- training/finetune.py +516 -0
- training/public_datasets.py +89 -0
- training_data/react_train.jsonl +7473 -0
- utils/agent_discovery.py +311 -0
- utils/batch_processor.py +317 -0
- utils/conversation.py +78 -0
- utils/edit_distance.py +118 -0
- utils/formatter.py +33 -0
- utils/graph_reasoner.py +530 -0
- utils/rate_limiter.py +283 -0
- utils/router.py +2 -2
- utils/tool_discovery.py +307 -0
- webui/__init__.py +10 -0
- webui/app.py +229 -0
- webui/config.py +104 -0
- webui/static/css/style.css +332 -0
- webui/static/js/main.js +284 -0
- webui/templates/index.html +42 -0
- tests/test_crca_excel.py +0 -166
- tests/test_data_broker.py +0 -424
- tests/test_palantir.py +0 -349
- {crca-1.4.0.dist-info → crca-1.5.0.dist-info}/WHEEL +0 -0
- {crca-1.4.0.dist-info → crca-1.5.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,551 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Unit tests for image annotation engine - testing every feature individually.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import os
|
|
6
|
+
import sys
|
|
7
|
+
import numpy as np
|
|
8
|
+
import pytest
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from PIL import Image
|
|
11
|
+
import tempfile
|
|
12
|
+
import json
|
|
13
|
+
import cv2
|
|
14
|
+
|
|
15
|
+
# Add parent directory to path
|
|
16
|
+
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
|
|
17
|
+
|
|
18
|
+
# Image annotation unit tests can be slow and may require LLM access depending on configuration.
|
|
19
|
+
if os.environ.get("CRCA_RUN_IMAGE_ANNOTATION_TESTS") != "1":
|
|
20
|
+
pytest.skip("Set CRCA_RUN_IMAGE_ANNOTATION_TESTS=1 to run image annotation tests", allow_module_level=True)
|
|
21
|
+
|
|
22
|
+
try:
|
|
23
|
+
from image_annotation.annotation_engine import ImageAnnotationEngine, AnnotationConfig
|
|
24
|
+
from image_annotation import AnnotationResult, AnnotationGraph, PrimitiveEntity, SemanticLabel
|
|
25
|
+
IMAGE_ANNOTATION_AVAILABLE = True
|
|
26
|
+
except ImportError:
|
|
27
|
+
IMAGE_ANNOTATION_AVAILABLE = False
|
|
28
|
+
pytest.skip("Image annotation not available", allow_module_level=True)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class TestImageAnnotationConfig:
|
|
32
|
+
"""Test configuration system."""
|
|
33
|
+
|
|
34
|
+
def test_default_config(self):
|
|
35
|
+
"""Test default configuration creation."""
|
|
36
|
+
config = AnnotationConfig()
|
|
37
|
+
assert config.gpt_model == "gpt-4o-mini"
|
|
38
|
+
assert config.cache_enabled is True
|
|
39
|
+
assert config.auto_retry is True
|
|
40
|
+
assert config.output_format == "overlay"
|
|
41
|
+
|
|
42
|
+
def test_config_from_env(self):
|
|
43
|
+
"""Test loading configuration from environment variables."""
|
|
44
|
+
import os
|
|
45
|
+
os.environ["ANNOTATION_GPT_MODEL"] = "gpt-4"
|
|
46
|
+
os.environ["ANNOTATION_CACHE_ENABLED"] = "false"
|
|
47
|
+
|
|
48
|
+
config = AnnotationConfig.from_env()
|
|
49
|
+
assert config.gpt_model == "gpt-4"
|
|
50
|
+
assert config.cache_enabled is False
|
|
51
|
+
|
|
52
|
+
# Cleanup
|
|
53
|
+
del os.environ["ANNOTATION_GPT_MODEL"]
|
|
54
|
+
del os.environ["ANNOTATION_CACHE_ENABLED"]
|
|
55
|
+
|
|
56
|
+
def test_config_override(self):
|
|
57
|
+
"""Test configuration parameter override."""
|
|
58
|
+
config = AnnotationConfig(gpt_model="gpt-4o", cache_enabled=False)
|
|
59
|
+
assert config.gpt_model == "gpt-4o"
|
|
60
|
+
assert config.cache_enabled is False
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class TestInputHandling:
|
|
64
|
+
"""Test smart input handling features."""
|
|
65
|
+
|
|
66
|
+
def test_detect_input_type_file_path(self):
|
|
67
|
+
"""Test file path detection."""
|
|
68
|
+
engine = ImageAnnotationEngine()
|
|
69
|
+
assert engine._detect_input_type("image.png") == "file_path"
|
|
70
|
+
assert engine._detect_input_type(Path("image.png")) == "file_path"
|
|
71
|
+
|
|
72
|
+
def test_detect_input_type_url(self):
|
|
73
|
+
"""Test URL detection."""
|
|
74
|
+
engine = ImageAnnotationEngine()
|
|
75
|
+
assert engine._detect_input_type("https://example.com/image.png") == "url"
|
|
76
|
+
assert engine._detect_input_type("http://example.com/image.png") == "url"
|
|
77
|
+
|
|
78
|
+
def test_detect_input_type_numpy(self):
|
|
79
|
+
"""Test numpy array detection."""
|
|
80
|
+
engine = ImageAnnotationEngine()
|
|
81
|
+
arr = np.zeros((100, 100, 3), dtype=np.uint8)
|
|
82
|
+
assert engine._detect_input_type(arr) == "numpy_array"
|
|
83
|
+
|
|
84
|
+
def test_detect_input_type_pil(self):
|
|
85
|
+
"""Test PIL Image detection."""
|
|
86
|
+
engine = ImageAnnotationEngine()
|
|
87
|
+
img = Image.new("RGB", (100, 100))
|
|
88
|
+
assert engine._detect_input_type(img) == "pil_image"
|
|
89
|
+
|
|
90
|
+
def test_detect_input_type_batch(self):
|
|
91
|
+
"""Test batch detection."""
|
|
92
|
+
engine = ImageAnnotationEngine()
|
|
93
|
+
assert engine._detect_input_type(["img1.png", "img2.png"]) == "batch"
|
|
94
|
+
|
|
95
|
+
def test_auto_load_numpy(self):
|
|
96
|
+
"""Test loading numpy array."""
|
|
97
|
+
engine = ImageAnnotationEngine()
|
|
98
|
+
arr = np.zeros((100, 100, 3), dtype=np.uint8)
|
|
99
|
+
loaded = engine._auto_load_input(arr)
|
|
100
|
+
assert isinstance(loaded, np.ndarray)
|
|
101
|
+
assert loaded.shape == (100, 100, 3)
|
|
102
|
+
|
|
103
|
+
def test_auto_load_pil(self):
|
|
104
|
+
"""Test loading PIL Image."""
|
|
105
|
+
engine = ImageAnnotationEngine()
|
|
106
|
+
img = Image.new("RGB", (100, 100), color=(255, 0, 0))
|
|
107
|
+
loaded = engine._auto_load_input(img)
|
|
108
|
+
assert isinstance(loaded, np.ndarray)
|
|
109
|
+
assert len(loaded.shape) == 3 # BGR format
|
|
110
|
+
|
|
111
|
+
def test_auto_load_file(self):
|
|
112
|
+
"""Test loading from file path."""
|
|
113
|
+
engine = ImageAnnotationEngine()
|
|
114
|
+
# Create temporary image file
|
|
115
|
+
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as f:
|
|
116
|
+
img = Image.new("RGB", (100, 100), color=(0, 255, 0))
|
|
117
|
+
img.save(f.name, "PNG")
|
|
118
|
+
temp_path = f.name
|
|
119
|
+
|
|
120
|
+
try:
|
|
121
|
+
loaded = engine._auto_load_input(temp_path)
|
|
122
|
+
assert isinstance(loaded, np.ndarray)
|
|
123
|
+
assert loaded.shape[0] > 0 and loaded.shape[1] > 0
|
|
124
|
+
finally:
|
|
125
|
+
os.unlink(temp_path)
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
class TestImageTypeDetection:
|
|
129
|
+
"""Test image type detection."""
|
|
130
|
+
|
|
131
|
+
def test_detect_circuit(self):
|
|
132
|
+
"""Test circuit diagram detection."""
|
|
133
|
+
engine = ImageAnnotationEngine()
|
|
134
|
+
# Create synthetic circuit-like image (high line density, circles)
|
|
135
|
+
img = np.zeros((500, 500, 3), dtype=np.uint8)
|
|
136
|
+
# Add lines
|
|
137
|
+
for i in range(0, 500, 20):
|
|
138
|
+
cv2.line(img, (i, 0), (i, 500), (255, 255, 255), 2)
|
|
139
|
+
# Add circles
|
|
140
|
+
for i in range(50, 450, 50):
|
|
141
|
+
cv2.circle(img, (i, i), 20, (255, 255, 255), 2)
|
|
142
|
+
|
|
143
|
+
img_type = engine._detect_image_type(img)
|
|
144
|
+
# Should detect as circuit or general
|
|
145
|
+
assert img_type in ["circuit", "general", "technical"]
|
|
146
|
+
|
|
147
|
+
def test_detect_architectural(self):
|
|
148
|
+
"""Test architectural drawing detection."""
|
|
149
|
+
engine = ImageAnnotationEngine()
|
|
150
|
+
# Create synthetic architectural drawing (many parallel lines)
|
|
151
|
+
img = np.zeros((800, 800, 3), dtype=np.uint8)
|
|
152
|
+
# Add many parallel lines
|
|
153
|
+
for i in range(0, 800, 10):
|
|
154
|
+
cv2.line(img, (0, i), (800, i), (255, 255, 255), 1)
|
|
155
|
+
cv2.line(img, (i, 0), (i, 800), (255, 255, 255), 1)
|
|
156
|
+
|
|
157
|
+
img_type = engine._detect_image_type(img)
|
|
158
|
+
# Should detect as architectural or general
|
|
159
|
+
assert img_type in ["architectural", "general", "technical"]
|
|
160
|
+
|
|
161
|
+
def test_get_type_specific_params(self):
|
|
162
|
+
"""Test getting type-specific parameters."""
|
|
163
|
+
engine = ImageAnnotationEngine()
|
|
164
|
+
params = engine._get_type_specific_params("circuit")
|
|
165
|
+
assert "hough_line_threshold" in params
|
|
166
|
+
assert "hough_circle_threshold" in params
|
|
167
|
+
assert "preprocessing_strength" in params
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
class TestParameterTuning:
|
|
171
|
+
"""Test automatic parameter tuning."""
|
|
172
|
+
|
|
173
|
+
def test_auto_tune_params(self):
|
|
174
|
+
"""Test automatic parameter tuning."""
|
|
175
|
+
engine = ImageAnnotationEngine()
|
|
176
|
+
img = np.zeros((500, 500), dtype=np.uint8)
|
|
177
|
+
# Add some edges
|
|
178
|
+
cv2.rectangle(img, (100, 100), (400, 400), 255, 2)
|
|
179
|
+
|
|
180
|
+
params = engine._auto_tune_params(img)
|
|
181
|
+
assert isinstance(params, dict)
|
|
182
|
+
assert "hough_line_threshold" in params
|
|
183
|
+
assert "canny_low" in params
|
|
184
|
+
assert "canny_high" in params
|
|
185
|
+
|
|
186
|
+
def test_heuristic_tune(self):
|
|
187
|
+
"""Test heuristic-based tuning."""
|
|
188
|
+
engine = ImageAnnotationEngine()
|
|
189
|
+
img = np.zeros((200, 200), dtype=np.uint8)
|
|
190
|
+
cv2.circle(img, (100, 100), 50, 255, 2)
|
|
191
|
+
|
|
192
|
+
params = engine._heuristic_tune(img, "circuit")
|
|
193
|
+
assert isinstance(params, dict)
|
|
194
|
+
assert params["hough_line_threshold"] > 0
|
|
195
|
+
|
|
196
|
+
def test_adaptive_refine(self):
|
|
197
|
+
"""Test adaptive parameter refinement."""
|
|
198
|
+
engine = ImageAnnotationEngine()
|
|
199
|
+
img = np.zeros((500, 500), dtype=np.uint8)
|
|
200
|
+
|
|
201
|
+
# Create empty result (no primitives)
|
|
202
|
+
from schemas.annotation import AnnotationGraph, AnnotationResult
|
|
203
|
+
empty_result = AnnotationResult(
|
|
204
|
+
annotation_graph=AnnotationGraph(),
|
|
205
|
+
overlay_image=None,
|
|
206
|
+
formal_report="",
|
|
207
|
+
json_output={},
|
|
208
|
+
processing_time=0.0
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
base_params = {"hough_line_threshold": 100, "hough_circle_threshold": 30}
|
|
212
|
+
refined = engine._adaptive_refine(img, empty_result, base_params)
|
|
213
|
+
# Should relax thresholds
|
|
214
|
+
assert refined["hough_line_threshold"] <= base_params["hough_line_threshold"]
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
class TestRetryLogic:
|
|
218
|
+
"""Test retry logic."""
|
|
219
|
+
|
|
220
|
+
def test_should_retry_no_primitives(self):
|
|
221
|
+
"""Test retry decision for no primitives."""
|
|
222
|
+
engine = ImageAnnotationEngine()
|
|
223
|
+
from schemas.annotation import AnnotationGraph, AnnotationResult
|
|
224
|
+
result = AnnotationResult(
|
|
225
|
+
annotation_graph=AnnotationGraph(), # Empty
|
|
226
|
+
overlay_image=None,
|
|
227
|
+
formal_report="",
|
|
228
|
+
json_output={},
|
|
229
|
+
processing_time=0.0
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
assert engine._should_retry(result, attempt=0) is True
|
|
233
|
+
|
|
234
|
+
def test_should_retry_low_confidence(self):
|
|
235
|
+
"""Test retry decision for low confidence."""
|
|
236
|
+
engine = ImageAnnotationEngine()
|
|
237
|
+
from schemas.annotation import AnnotationGraph, AnnotationResult, SemanticLabel, PrimitiveEntity
|
|
238
|
+
|
|
239
|
+
entity = PrimitiveEntity(id="test", pixel_coords=[(0, 0)], primitive_type="line")
|
|
240
|
+
label = SemanticLabel(entity_id="test", label="test", uncertainty=0.9) # High uncertainty
|
|
241
|
+
|
|
242
|
+
graph = AnnotationGraph(entities=[entity], labels=[label])
|
|
243
|
+
result = AnnotationResult(
|
|
244
|
+
annotation_graph=graph,
|
|
245
|
+
overlay_image=None,
|
|
246
|
+
formal_report="",
|
|
247
|
+
json_output={},
|
|
248
|
+
processing_time=0.0
|
|
249
|
+
)
|
|
250
|
+
|
|
251
|
+
assert engine._should_retry(result, attempt=0) is True
|
|
252
|
+
|
|
253
|
+
def test_should_retry_max_attempts(self):
|
|
254
|
+
"""Test retry decision when max attempts reached."""
|
|
255
|
+
engine = ImageAnnotationEngine()
|
|
256
|
+
engine.config.max_retries = 3
|
|
257
|
+
|
|
258
|
+
from schemas.annotation import AnnotationGraph, AnnotationResult
|
|
259
|
+
result = AnnotationResult(
|
|
260
|
+
annotation_graph=AnnotationGraph(),
|
|
261
|
+
overlay_image=None,
|
|
262
|
+
formal_report="",
|
|
263
|
+
json_output={},
|
|
264
|
+
processing_time=0.0
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
assert engine._should_retry(result, attempt=3) is False
|
|
268
|
+
|
|
269
|
+
def test_get_retry_params(self):
|
|
270
|
+
"""Test getting retry parameters."""
|
|
271
|
+
engine = ImageAnnotationEngine()
|
|
272
|
+
base_params = {"hough_line_threshold": 100, "canny_low": 50}
|
|
273
|
+
|
|
274
|
+
retry_params = engine._get_retry_params(attempt=1, previous_result=None, base_params=base_params)
|
|
275
|
+
# Should relax thresholds
|
|
276
|
+
assert retry_params["hough_line_threshold"] < base_params["hough_line_threshold"]
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
class TestCaching:
|
|
280
|
+
"""Test smart caching."""
|
|
281
|
+
|
|
282
|
+
def test_get_cache_key(self):
|
|
283
|
+
"""Test cache key generation."""
|
|
284
|
+
engine = ImageAnnotationEngine()
|
|
285
|
+
# Create actually different images
|
|
286
|
+
img1 = np.zeros((100, 100, 3), dtype=np.uint8)
|
|
287
|
+
img1[50, 50] = [255, 255, 255] # Add a white pixel
|
|
288
|
+
img2 = np.zeros((100, 100, 3), dtype=np.uint8)
|
|
289
|
+
img2[50, 50] = [128, 128, 128] # Add a gray pixel (different)
|
|
290
|
+
params = {"test": "value"}
|
|
291
|
+
|
|
292
|
+
key1 = engine._get_cache_key(img1, params)
|
|
293
|
+
key2 = engine._get_cache_key(img2, params)
|
|
294
|
+
key3 = engine._get_cache_key(img1, params)
|
|
295
|
+
|
|
296
|
+
# Same image + params should give same key
|
|
297
|
+
assert key1 == key3
|
|
298
|
+
# Different images should give different keys (even if same shape)
|
|
299
|
+
assert key1 != key2
|
|
300
|
+
|
|
301
|
+
def test_cache_primitives(self):
|
|
302
|
+
"""Test caching primitives."""
|
|
303
|
+
engine = ImageAnnotationEngine()
|
|
304
|
+
engine.config.cache_enabled = True
|
|
305
|
+
|
|
306
|
+
from schemas.annotation import PrimitiveEntity
|
|
307
|
+
primitives = [
|
|
308
|
+
PrimitiveEntity(id="1", pixel_coords=[(0, 0), (10, 10)], primitive_type="line")
|
|
309
|
+
]
|
|
310
|
+
|
|
311
|
+
cache_key = "test_key"
|
|
312
|
+
engine._cache_primitives(cache_key, primitives)
|
|
313
|
+
|
|
314
|
+
# Check cache file exists
|
|
315
|
+
cache_file = engine._cache_dir / f"{cache_key}_primitives.json"
|
|
316
|
+
assert cache_file.exists()
|
|
317
|
+
|
|
318
|
+
# Cleanup
|
|
319
|
+
if cache_file.exists():
|
|
320
|
+
cache_file.unlink()
|
|
321
|
+
|
|
322
|
+
def test_get_cached_primitives(self):
|
|
323
|
+
"""Test retrieving cached primitives."""
|
|
324
|
+
engine = ImageAnnotationEngine()
|
|
325
|
+
engine.config.cache_enabled = True
|
|
326
|
+
|
|
327
|
+
from schemas.annotation import PrimitiveEntity
|
|
328
|
+
primitives = [
|
|
329
|
+
PrimitiveEntity(id="1", pixel_coords=[(0, 0), (10, 10)], primitive_type="line")
|
|
330
|
+
]
|
|
331
|
+
|
|
332
|
+
cache_key = "test_key_2"
|
|
333
|
+
engine._cache_primitives(cache_key, primitives)
|
|
334
|
+
|
|
335
|
+
cached = engine._get_cached_primitives(cache_key)
|
|
336
|
+
assert cached is not None
|
|
337
|
+
assert len(cached) == 1
|
|
338
|
+
assert cached[0].id == "1"
|
|
339
|
+
|
|
340
|
+
# Cleanup
|
|
341
|
+
cache_file = engine._cache_dir / f"{cache_key}_primitives.json"
|
|
342
|
+
if cache_file.exists():
|
|
343
|
+
cache_file.unlink()
|
|
344
|
+
|
|
345
|
+
|
|
346
|
+
class TestPrimitiveExtraction:
|
|
347
|
+
"""Test primitive extraction methods."""
|
|
348
|
+
|
|
349
|
+
def test_extract_lines(self):
|
|
350
|
+
"""Test line extraction."""
|
|
351
|
+
engine = ImageAnnotationEngine()
|
|
352
|
+
img = np.zeros((500, 500), dtype=np.uint8)
|
|
353
|
+
cv2.line(img, (0, 0), (500, 500), 255, 2)
|
|
354
|
+
cv2.line(img, (0, 500), (500, 0), 255, 2)
|
|
355
|
+
|
|
356
|
+
params = {"hough_line_threshold": 50, "hough_line_min_length": 10}
|
|
357
|
+
lines = engine._extract_lines(img, params)
|
|
358
|
+
assert len(lines) > 0
|
|
359
|
+
|
|
360
|
+
def test_extract_circles(self):
|
|
361
|
+
"""Test circle extraction."""
|
|
362
|
+
engine = ImageAnnotationEngine()
|
|
363
|
+
img = np.zeros((500, 500), dtype=np.uint8)
|
|
364
|
+
cv2.circle(img, (250, 250), 50, 255, 2)
|
|
365
|
+
|
|
366
|
+
params = {"hough_circle_threshold": 30, "hough_circle_min_radius": 10}
|
|
367
|
+
circles = engine._extract_circles(img, params)
|
|
368
|
+
assert len(circles) > 0
|
|
369
|
+
|
|
370
|
+
def test_extract_contours(self):
|
|
371
|
+
"""Test contour extraction."""
|
|
372
|
+
engine = ImageAnnotationEngine()
|
|
373
|
+
img = np.zeros((500, 500), dtype=np.uint8)
|
|
374
|
+
cv2.rectangle(img, (100, 100), (400, 400), 255, 2)
|
|
375
|
+
|
|
376
|
+
params = {"canny_low": 50, "canny_high": 150}
|
|
377
|
+
contours = engine._extract_contours(img, params)
|
|
378
|
+
assert len(contours) > 0
|
|
379
|
+
|
|
380
|
+
def test_compute_intersections(self):
|
|
381
|
+
"""Test intersection computation."""
|
|
382
|
+
engine = ImageAnnotationEngine()
|
|
383
|
+
from schemas.annotation import PrimitiveEntity
|
|
384
|
+
|
|
385
|
+
# Create two lines that intersect
|
|
386
|
+
line1 = PrimitiveEntity(
|
|
387
|
+
id="line1",
|
|
388
|
+
pixel_coords=[(0, 0), (100, 100)],
|
|
389
|
+
primitive_type="line"
|
|
390
|
+
)
|
|
391
|
+
line2 = PrimitiveEntity(
|
|
392
|
+
id="line2",
|
|
393
|
+
pixel_coords=[(0, 100), (100, 0)],
|
|
394
|
+
primitive_type="line"
|
|
395
|
+
)
|
|
396
|
+
|
|
397
|
+
intersections = engine._compute_intersections([line1, line2])
|
|
398
|
+
assert len(intersections) > 0
|
|
399
|
+
|
|
400
|
+
|
|
401
|
+
class TestPreprocessing:
|
|
402
|
+
"""Test image preprocessing."""
|
|
403
|
+
|
|
404
|
+
def test_preprocess_image(self):
|
|
405
|
+
"""Test main preprocessing pipeline."""
|
|
406
|
+
engine = ImageAnnotationEngine()
|
|
407
|
+
img = np.random.randint(0, 255, (500, 500, 3), dtype=np.uint8)
|
|
408
|
+
|
|
409
|
+
processed = engine._preprocess_image(img)
|
|
410
|
+
assert processed.shape[0] > 0
|
|
411
|
+
assert processed.shape[1] > 0
|
|
412
|
+
assert len(processed.shape) == 2 # Grayscale
|
|
413
|
+
|
|
414
|
+
def test_adaptive_histogram_equalization(self):
|
|
415
|
+
"""Test adaptive histogram equalization."""
|
|
416
|
+
engine = ImageAnnotationEngine()
|
|
417
|
+
img = np.random.randint(0, 255, (500, 500), dtype=np.uint8)
|
|
418
|
+
|
|
419
|
+
equalized = engine._adaptive_histogram_equalization(img)
|
|
420
|
+
assert equalized.shape == img.shape
|
|
421
|
+
|
|
422
|
+
def test_edge_amplification(self):
|
|
423
|
+
"""Test edge amplification."""
|
|
424
|
+
engine = ImageAnnotationEngine()
|
|
425
|
+
img = np.random.randint(0, 255, (500, 500), dtype=np.uint8)
|
|
426
|
+
|
|
427
|
+
amplified = engine._edge_amplification(img, strength=0.7)
|
|
428
|
+
assert amplified.shape == img.shape
|
|
429
|
+
|
|
430
|
+
|
|
431
|
+
class TestQueryInterface:
|
|
432
|
+
"""Test query/task-based interface."""
|
|
433
|
+
|
|
434
|
+
def test_extract_relevant_entities(self):
|
|
435
|
+
"""Test extracting relevant entities from query."""
|
|
436
|
+
engine = ImageAnnotationEngine()
|
|
437
|
+
from schemas.annotation import AnnotationGraph, PrimitiveEntity, SemanticLabel
|
|
438
|
+
|
|
439
|
+
entity1 = PrimitiveEntity(id="1", pixel_coords=[(0, 0)], primitive_type="circle")
|
|
440
|
+
entity2 = PrimitiveEntity(id="2", pixel_coords=[(10, 10), (20, 20)], primitive_type="line")
|
|
441
|
+
label = SemanticLabel(entity_id="1", label="building", uncertainty=0.3)
|
|
442
|
+
|
|
443
|
+
graph = AnnotationGraph(entities=[entity1, entity2], labels=[label])
|
|
444
|
+
|
|
445
|
+
relevant = engine._extract_relevant_entities("find the largest building", graph)
|
|
446
|
+
assert len(relevant) > 0
|
|
447
|
+
|
|
448
|
+
def test_get_entity_size(self):
|
|
449
|
+
"""Test entity size calculation."""
|
|
450
|
+
engine = ImageAnnotationEngine()
|
|
451
|
+
from schemas.annotation import PrimitiveEntity
|
|
452
|
+
|
|
453
|
+
# Circle
|
|
454
|
+
circle = PrimitiveEntity(
|
|
455
|
+
id="circle1",
|
|
456
|
+
pixel_coords=[(50, 50)],
|
|
457
|
+
primitive_type="circle",
|
|
458
|
+
metadata={"radius": 10}
|
|
459
|
+
)
|
|
460
|
+
size = engine._get_entity_size(circle)
|
|
461
|
+
assert size > 0
|
|
462
|
+
|
|
463
|
+
# Line
|
|
464
|
+
line = PrimitiveEntity(
|
|
465
|
+
id="line1",
|
|
466
|
+
pixel_coords=[(0, 0), (10, 10)],
|
|
467
|
+
primitive_type="line"
|
|
468
|
+
)
|
|
469
|
+
size = engine._get_entity_size(line)
|
|
470
|
+
assert size > 0
|
|
471
|
+
|
|
472
|
+
def test_perform_query_measurements(self):
|
|
473
|
+
"""Test performing measurements for queries."""
|
|
474
|
+
engine = ImageAnnotationEngine()
|
|
475
|
+
from schemas.annotation import PrimitiveEntity, AnnotationGraph
|
|
476
|
+
|
|
477
|
+
circle = PrimitiveEntity(
|
|
478
|
+
id="circle1",
|
|
479
|
+
pixel_coords=[(50, 50)],
|
|
480
|
+
primitive_type="circle",
|
|
481
|
+
metadata={"radius": 10}
|
|
482
|
+
)
|
|
483
|
+
graph = AnnotationGraph(entities=[circle])
|
|
484
|
+
|
|
485
|
+
measurements = engine._perform_query_measurements("measure the circle", [circle], graph)
|
|
486
|
+
assert "circle1" in measurements
|
|
487
|
+
assert "radius" in measurements["circle1"]
|
|
488
|
+
assert "area" in measurements["circle1"]
|
|
489
|
+
|
|
490
|
+
|
|
491
|
+
class TestOutputFormatting:
|
|
492
|
+
"""Test output formatting."""
|
|
493
|
+
|
|
494
|
+
def test_format_output_overlay(self):
|
|
495
|
+
"""Test formatting output as overlay."""
|
|
496
|
+
engine = ImageAnnotationEngine()
|
|
497
|
+
from schemas.annotation import AnnotationGraph, AnnotationResult
|
|
498
|
+
import cv2
|
|
499
|
+
|
|
500
|
+
overlay_img = np.zeros((100, 100, 3), dtype=np.uint8)
|
|
501
|
+
_, buffer = cv2.imencode('.jpg', overlay_img)
|
|
502
|
+
overlay_bytes = buffer.tobytes()
|
|
503
|
+
|
|
504
|
+
result = AnnotationResult(
|
|
505
|
+
annotation_graph=AnnotationGraph(),
|
|
506
|
+
overlay_image=overlay_bytes,
|
|
507
|
+
formal_report="Test report",
|
|
508
|
+
json_output={"test": "data"},
|
|
509
|
+
processing_time=1.0
|
|
510
|
+
)
|
|
511
|
+
|
|
512
|
+
formatted = engine._format_output(result, "overlay")
|
|
513
|
+
assert isinstance(formatted, np.ndarray)
|
|
514
|
+
|
|
515
|
+
def test_format_output_json(self):
|
|
516
|
+
"""Test formatting output as JSON."""
|
|
517
|
+
engine = ImageAnnotationEngine()
|
|
518
|
+
from schemas.annotation import AnnotationGraph, AnnotationResult
|
|
519
|
+
|
|
520
|
+
result = AnnotationResult(
|
|
521
|
+
annotation_graph=AnnotationGraph(),
|
|
522
|
+
overlay_image=None,
|
|
523
|
+
formal_report="Test report",
|
|
524
|
+
json_output={"test": "data"},
|
|
525
|
+
processing_time=1.0
|
|
526
|
+
)
|
|
527
|
+
|
|
528
|
+
formatted = engine._format_output(result, "json")
|
|
529
|
+
assert isinstance(formatted, dict)
|
|
530
|
+
assert "test" in formatted
|
|
531
|
+
|
|
532
|
+
def test_format_output_report(self):
|
|
533
|
+
"""Test formatting output as report."""
|
|
534
|
+
engine = ImageAnnotationEngine()
|
|
535
|
+
from schemas.annotation import AnnotationGraph, AnnotationResult
|
|
536
|
+
|
|
537
|
+
result = AnnotationResult(
|
|
538
|
+
annotation_graph=AnnotationGraph(),
|
|
539
|
+
overlay_image=None,
|
|
540
|
+
formal_report="Test report content",
|
|
541
|
+
json_output={},
|
|
542
|
+
processing_time=1.0
|
|
543
|
+
)
|
|
544
|
+
|
|
545
|
+
formatted = engine._format_output(result, "report")
|
|
546
|
+
assert isinstance(formatted, str)
|
|
547
|
+
assert "Test report" in formatted
|
|
548
|
+
|
|
549
|
+
|
|
550
|
+
if __name__ == "__main__":
|
|
551
|
+
pytest.main([__file__, "-v"])
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
from training.finetune import _resolve_model_info
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def test_resolve_model_info_switch_is_seq2seq() -> None:
|
|
5
|
+
info = _resolve_model_info("google/switch-base-8")
|
|
6
|
+
assert info["arch"] == "seq2seq"
|
|
7
|
+
assert info["moe"] is True
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def test_resolve_model_info_qwen_is_causal() -> None:
|
|
11
|
+
info = _resolve_model_info("Qwen/Qwen2.5-1.5B-Instruct")
|
|
12
|
+
assert info["arch"] == "causal"
|
|
13
|
+
assert info["moe"] is False
|
training/__init__.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
"""Training utilities for LRM finetuning."""
|
|
2
|
+
|
|
3
|
+
from training.datasets import (
|
|
4
|
+
ReActExample,
|
|
5
|
+
examples_from_traces,
|
|
6
|
+
filter_examples,
|
|
7
|
+
merge_examples,
|
|
8
|
+
plan_result_to_examples,
|
|
9
|
+
save_jsonl,
|
|
10
|
+
)
|
|
11
|
+
from training.finetune import (
|
|
12
|
+
FinetuneConfig,
|
|
13
|
+
full_finetune_qwen25_0_5b_config,
|
|
14
|
+
full_finetune_qwen25_0_5b_config_cloud,
|
|
15
|
+
full_finetune_qwen25_1_5b_config,
|
|
16
|
+
full_finetune_qwen25_7b_config,
|
|
17
|
+
full_finetune_qwen25_14b_config,
|
|
18
|
+
full_finetune_switch_base_8_config,
|
|
19
|
+
full_finetune_switch_large_16_config,
|
|
20
|
+
run_finetune,
|
|
21
|
+
)
|
|
22
|
+
from training.eval import EvalConfig, run_eval
|
|
23
|
+
|
|
24
|
+
__all__ = [
|
|
25
|
+
"ReActExample",
|
|
26
|
+
"examples_from_traces",
|
|
27
|
+
"filter_examples",
|
|
28
|
+
"merge_examples",
|
|
29
|
+
"plan_result_to_examples",
|
|
30
|
+
"save_jsonl",
|
|
31
|
+
"FinetuneConfig",
|
|
32
|
+
"full_finetune_qwen25_0_5b_config",
|
|
33
|
+
"full_finetune_qwen25_0_5b_config_cloud",
|
|
34
|
+
"full_finetune_qwen25_1_5b_config",
|
|
35
|
+
"full_finetune_qwen25_7b_config",
|
|
36
|
+
"full_finetune_qwen25_14b_config",
|
|
37
|
+
"full_finetune_switch_base_8_config",
|
|
38
|
+
"full_finetune_switch_large_16_config",
|
|
39
|
+
"run_finetune",
|
|
40
|
+
"EvalConfig",
|
|
41
|
+
"run_eval",
|
|
42
|
+
]
|