crca 1.4.0__py3-none-any.whl → 1.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (306) hide show
  1. CRCA.py +172 -7
  2. MODEL_CARD.md +53 -0
  3. PKG-INFO +8 -2
  4. RELEASE_NOTES.md +17 -0
  5. STABILITY.md +19 -0
  6. architecture/hybrid/consistency_engine.py +362 -0
  7. architecture/hybrid/conversation_manager.py +421 -0
  8. architecture/hybrid/explanation_generator.py +452 -0
  9. architecture/hybrid/few_shot_learner.py +533 -0
  10. architecture/hybrid/graph_compressor.py +286 -0
  11. architecture/hybrid/hybrid_agent.py +4398 -0
  12. architecture/hybrid/language_compiler.py +623 -0
  13. architecture/hybrid/main,py +0 -0
  14. architecture/hybrid/reasoning_tracker.py +322 -0
  15. architecture/hybrid/self_verifier.py +524 -0
  16. architecture/hybrid/task_decomposer.py +567 -0
  17. architecture/hybrid/text_corrector.py +341 -0
  18. benchmark_results/crca_core_benchmarks.json +178 -0
  19. branches/crca_sd/crca_sd_realtime.py +6 -2
  20. branches/general_agent/__init__.py +102 -0
  21. branches/general_agent/general_agent.py +1400 -0
  22. branches/general_agent/personality.py +169 -0
  23. branches/general_agent/utils/__init__.py +19 -0
  24. branches/general_agent/utils/prompt_builder.py +170 -0
  25. {crca-1.4.0.dist-info → crca-1.5.0.dist-info}/METADATA +8 -2
  26. {crca-1.4.0.dist-info → crca-1.5.0.dist-info}/RECORD +303 -20
  27. crca_core/__init__.py +35 -0
  28. crca_core/benchmarks/__init__.py +14 -0
  29. crca_core/benchmarks/synthetic_scm.py +103 -0
  30. crca_core/core/__init__.py +23 -0
  31. crca_core/core/api.py +120 -0
  32. crca_core/core/estimate.py +208 -0
  33. crca_core/core/godclass.py +72 -0
  34. crca_core/core/intervention_design.py +174 -0
  35. crca_core/core/lifecycle.py +48 -0
  36. crca_core/discovery/__init__.py +9 -0
  37. crca_core/discovery/tabular.py +193 -0
  38. crca_core/identify/__init__.py +171 -0
  39. crca_core/identify/backdoor.py +39 -0
  40. crca_core/identify/frontdoor.py +48 -0
  41. crca_core/identify/graph.py +106 -0
  42. crca_core/identify/id_algorithm.py +43 -0
  43. crca_core/identify/iv.py +48 -0
  44. crca_core/models/__init__.py +67 -0
  45. crca_core/models/provenance.py +56 -0
  46. crca_core/models/refusal.py +39 -0
  47. crca_core/models/result.py +83 -0
  48. crca_core/models/spec.py +151 -0
  49. crca_core/models/validation.py +68 -0
  50. crca_core/scm/__init__.py +9 -0
  51. crca_core/scm/linear_gaussian.py +198 -0
  52. crca_core/timeseries/__init__.py +6 -0
  53. crca_core/timeseries/pcmci.py +181 -0
  54. crca_llm/__init__.py +12 -0
  55. crca_llm/client.py +85 -0
  56. crca_llm/coauthor.py +118 -0
  57. crca_llm/orchestrator.py +289 -0
  58. crca_llm/types.py +21 -0
  59. crca_reasoning/__init__.py +16 -0
  60. crca_reasoning/critique.py +54 -0
  61. crca_reasoning/godclass.py +206 -0
  62. crca_reasoning/memory.py +24 -0
  63. crca_reasoning/rationale.py +10 -0
  64. crca_reasoning/react_controller.py +81 -0
  65. crca_reasoning/tool_router.py +97 -0
  66. crca_reasoning/types.py +40 -0
  67. crca_sd/__init__.py +15 -0
  68. crca_sd/crca_sd_core.py +2 -0
  69. crca_sd/crca_sd_governance.py +2 -0
  70. crca_sd/crca_sd_mpc.py +2 -0
  71. crca_sd/crca_sd_realtime.py +2 -0
  72. crca_sd/crca_sd_tui.py +2 -0
  73. cuda-keyring_1.1-1_all.deb +0 -0
  74. cuda-keyring_1.1-1_all.deb.1 +0 -0
  75. docs/IMAGE_ANNOTATION_USAGE.md +539 -0
  76. docs/INSTALL_DEEPSPEED.md +125 -0
  77. docs/api/branches/crca-cg.md +19 -0
  78. docs/api/branches/crca-q.md +27 -0
  79. docs/api/branches/crca-sd.md +37 -0
  80. docs/api/branches/general-agent.md +24 -0
  81. docs/api/branches/overview.md +19 -0
  82. docs/api/crca/agent-methods.md +62 -0
  83. docs/api/crca/operations.md +79 -0
  84. docs/api/crca/overview.md +32 -0
  85. docs/api/image-annotation/engine.md +52 -0
  86. docs/api/image-annotation/overview.md +17 -0
  87. docs/api/schemas/annotation.md +34 -0
  88. docs/api/schemas/core-schemas.md +82 -0
  89. docs/api/schemas/overview.md +32 -0
  90. docs/api/schemas/policy.md +30 -0
  91. docs/api/utils/conversation.md +22 -0
  92. docs/api/utils/graph-reasoner.md +32 -0
  93. docs/api/utils/overview.md +21 -0
  94. docs/api/utils/router.md +19 -0
  95. docs/api/utils/utilities.md +97 -0
  96. docs/architecture/causal-graphs.md +41 -0
  97. docs/architecture/data-flow.md +29 -0
  98. docs/architecture/design-principles.md +33 -0
  99. docs/architecture/hybrid-agent/components.md +38 -0
  100. docs/architecture/hybrid-agent/consistency.md +26 -0
  101. docs/architecture/hybrid-agent/overview.md +44 -0
  102. docs/architecture/hybrid-agent/reasoning.md +22 -0
  103. docs/architecture/llm-integration.md +26 -0
  104. docs/architecture/modular-structure.md +37 -0
  105. docs/architecture/overview.md +69 -0
  106. docs/architecture/policy-engine-arch.md +29 -0
  107. docs/branches/crca-cg/corposwarm.md +39 -0
  108. docs/branches/crca-cg/esg-scoring.md +30 -0
  109. docs/branches/crca-cg/multi-agent.md +35 -0
  110. docs/branches/crca-cg/overview.md +40 -0
  111. docs/branches/crca-q/alternative-data.md +55 -0
  112. docs/branches/crca-q/architecture.md +71 -0
  113. docs/branches/crca-q/backtesting.md +45 -0
  114. docs/branches/crca-q/causal-engine.md +33 -0
  115. docs/branches/crca-q/execution.md +39 -0
  116. docs/branches/crca-q/market-data.md +60 -0
  117. docs/branches/crca-q/overview.md +58 -0
  118. docs/branches/crca-q/philosophy.md +60 -0
  119. docs/branches/crca-q/portfolio-optimization.md +66 -0
  120. docs/branches/crca-q/risk-management.md +102 -0
  121. docs/branches/crca-q/setup.md +65 -0
  122. docs/branches/crca-q/signal-generation.md +61 -0
  123. docs/branches/crca-q/signal-validation.md +43 -0
  124. docs/branches/crca-sd/core.md +84 -0
  125. docs/branches/crca-sd/governance.md +53 -0
  126. docs/branches/crca-sd/mpc-solver.md +65 -0
  127. docs/branches/crca-sd/overview.md +59 -0
  128. docs/branches/crca-sd/realtime.md +28 -0
  129. docs/branches/crca-sd/tui.md +20 -0
  130. docs/branches/general-agent/overview.md +37 -0
  131. docs/branches/general-agent/personality.md +36 -0
  132. docs/branches/general-agent/prompt-builder.md +30 -0
  133. docs/changelog/index.md +79 -0
  134. docs/contributing/code-style.md +69 -0
  135. docs/contributing/documentation.md +43 -0
  136. docs/contributing/overview.md +29 -0
  137. docs/contributing/testing.md +29 -0
  138. docs/core/crcagent/async-operations.md +65 -0
  139. docs/core/crcagent/automatic-extraction.md +107 -0
  140. docs/core/crcagent/batch-prediction.md +80 -0
  141. docs/core/crcagent/bayesian-inference.md +60 -0
  142. docs/core/crcagent/causal-graph.md +92 -0
  143. docs/core/crcagent/counterfactuals.md +96 -0
  144. docs/core/crcagent/deterministic-simulation.md +78 -0
  145. docs/core/crcagent/dual-mode-operation.md +82 -0
  146. docs/core/crcagent/initialization.md +88 -0
  147. docs/core/crcagent/optimization.md +65 -0
  148. docs/core/crcagent/overview.md +63 -0
  149. docs/core/crcagent/time-series.md +57 -0
  150. docs/core/schemas/annotation.md +30 -0
  151. docs/core/schemas/core-schemas.md +82 -0
  152. docs/core/schemas/overview.md +30 -0
  153. docs/core/schemas/policy.md +41 -0
  154. docs/core/templates/base-agent.md +31 -0
  155. docs/core/templates/feature-mixins.md +31 -0
  156. docs/core/templates/overview.md +29 -0
  157. docs/core/templates/templates-guide.md +75 -0
  158. docs/core/tools/mcp-client.md +34 -0
  159. docs/core/tools/overview.md +24 -0
  160. docs/core/utils/conversation.md +27 -0
  161. docs/core/utils/graph-reasoner.md +29 -0
  162. docs/core/utils/overview.md +27 -0
  163. docs/core/utils/router.md +27 -0
  164. docs/core/utils/utilities.md +97 -0
  165. docs/css/custom.css +84 -0
  166. docs/examples/basic-usage.md +57 -0
  167. docs/examples/general-agent/general-agent-examples.md +50 -0
  168. docs/examples/hybrid-agent/hybrid-agent-examples.md +56 -0
  169. docs/examples/image-annotation/image-annotation-examples.md +54 -0
  170. docs/examples/integration/integration-examples.md +58 -0
  171. docs/examples/overview.md +37 -0
  172. docs/examples/trading/trading-examples.md +46 -0
  173. docs/features/causal-reasoning/advanced-topics.md +101 -0
  174. docs/features/causal-reasoning/counterfactuals.md +43 -0
  175. docs/features/causal-reasoning/do-calculus.md +50 -0
  176. docs/features/causal-reasoning/overview.md +47 -0
  177. docs/features/causal-reasoning/structural-models.md +52 -0
  178. docs/features/hybrid-agent/advanced-components.md +55 -0
  179. docs/features/hybrid-agent/core-components.md +64 -0
  180. docs/features/hybrid-agent/overview.md +34 -0
  181. docs/features/image-annotation/engine.md +82 -0
  182. docs/features/image-annotation/features.md +113 -0
  183. docs/features/image-annotation/integration.md +75 -0
  184. docs/features/image-annotation/overview.md +53 -0
  185. docs/features/image-annotation/quickstart.md +73 -0
  186. docs/features/policy-engine/doctrine-ledger.md +105 -0
  187. docs/features/policy-engine/monitoring.md +44 -0
  188. docs/features/policy-engine/mpc-control.md +89 -0
  189. docs/features/policy-engine/overview.md +46 -0
  190. docs/getting-started/configuration.md +225 -0
  191. docs/getting-started/first-agent.md +164 -0
  192. docs/getting-started/installation.md +144 -0
  193. docs/getting-started/quickstart.md +137 -0
  194. docs/index.md +118 -0
  195. docs/js/mathjax.js +13 -0
  196. docs/lrm/discovery_proof_notes.md +25 -0
  197. docs/lrm/finetune_full.md +83 -0
  198. docs/lrm/math_appendix.md +120 -0
  199. docs/lrm/overview.md +32 -0
  200. docs/mkdocs.yml +238 -0
  201. docs/stylesheets/extra.css +21 -0
  202. docs_generated/crca_core/CounterfactualResult.md +12 -0
  203. docs_generated/crca_core/DiscoveryHypothesisResult.md +13 -0
  204. docs_generated/crca_core/DraftSpec.md +13 -0
  205. docs_generated/crca_core/EstimateResult.md +13 -0
  206. docs_generated/crca_core/IdentificationResult.md +17 -0
  207. docs_generated/crca_core/InterventionDesignResult.md +12 -0
  208. docs_generated/crca_core/LockedSpec.md +15 -0
  209. docs_generated/crca_core/RefusalResult.md +12 -0
  210. docs_generated/crca_core/ValidationReport.md +9 -0
  211. docs_generated/crca_core/index.md +13 -0
  212. examples/general_agent_example.py +277 -0
  213. examples/general_agent_quickstart.py +202 -0
  214. examples/general_agent_simple.py +92 -0
  215. examples/hybrid_agent_auto_extraction.py +84 -0
  216. examples/hybrid_agent_dictionary_demo.py +104 -0
  217. examples/hybrid_agent_enhanced.py +179 -0
  218. examples/hybrid_agent_general_knowledge.py +107 -0
  219. examples/image_annotation_quickstart.py +328 -0
  220. examples/test_hybrid_fixes.py +77 -0
  221. image_annotation/__init__.py +27 -0
  222. image_annotation/annotation_engine.py +2593 -0
  223. install_cuda_wsl2.sh +59 -0
  224. install_deepspeed.sh +56 -0
  225. install_deepspeed_simple.sh +87 -0
  226. mkdocs.yml +252 -0
  227. ollama/Modelfile +8 -0
  228. prompts/__init__.py +2 -1
  229. prompts/default_crca.py +9 -1
  230. prompts/general_agent.py +227 -0
  231. prompts/image_annotation.py +56 -0
  232. pyproject.toml +17 -2
  233. requirements-docs.txt +10 -0
  234. requirements.txt +21 -2
  235. schemas/__init__.py +26 -1
  236. schemas/annotation.py +222 -0
  237. schemas/conversation.py +193 -0
  238. schemas/hybrid.py +211 -0
  239. schemas/reasoning.py +276 -0
  240. schemas_export/crca_core/CounterfactualResult.schema.json +108 -0
  241. schemas_export/crca_core/DiscoveryHypothesisResult.schema.json +113 -0
  242. schemas_export/crca_core/DraftSpec.schema.json +635 -0
  243. schemas_export/crca_core/EstimateResult.schema.json +113 -0
  244. schemas_export/crca_core/IdentificationResult.schema.json +145 -0
  245. schemas_export/crca_core/InterventionDesignResult.schema.json +111 -0
  246. schemas_export/crca_core/LockedSpec.schema.json +646 -0
  247. schemas_export/crca_core/RefusalResult.schema.json +90 -0
  248. schemas_export/crca_core/ValidationReport.schema.json +62 -0
  249. scripts/build_lrm_dataset.py +80 -0
  250. scripts/export_crca_core_schemas.py +54 -0
  251. scripts/export_hf_lrm.py +37 -0
  252. scripts/export_ollama_gguf.py +45 -0
  253. scripts/generate_changelog.py +157 -0
  254. scripts/generate_crca_core_docs_from_schemas.py +86 -0
  255. scripts/run_crca_core_benchmarks.py +163 -0
  256. scripts/run_full_finetune.py +198 -0
  257. scripts/run_lrm_eval.py +31 -0
  258. templates/graph_management.py +29 -0
  259. tests/conftest.py +9 -0
  260. tests/test_core.py +2 -3
  261. tests/test_crca_core_discovery_tabular.py +15 -0
  262. tests/test_crca_core_estimate_dowhy.py +36 -0
  263. tests/test_crca_core_identify.py +18 -0
  264. tests/test_crca_core_intervention_design.py +36 -0
  265. tests/test_crca_core_linear_gaussian_scm.py +69 -0
  266. tests/test_crca_core_spec.py +25 -0
  267. tests/test_crca_core_timeseries_pcmci.py +15 -0
  268. tests/test_crca_llm_coauthor.py +12 -0
  269. tests/test_crca_llm_orchestrator.py +80 -0
  270. tests/test_hybrid_agent_llm_enhanced.py +556 -0
  271. tests/test_image_annotation_demo.py +376 -0
  272. tests/test_image_annotation_operational.py +408 -0
  273. tests/test_image_annotation_unit.py +551 -0
  274. tests/test_training_moe.py +13 -0
  275. training/__init__.py +42 -0
  276. training/datasets.py +140 -0
  277. training/deepspeed_zero2_0_5b.json +22 -0
  278. training/deepspeed_zero2_1_5b.json +22 -0
  279. training/deepspeed_zero3_0_5b.json +28 -0
  280. training/deepspeed_zero3_14b.json +28 -0
  281. training/deepspeed_zero3_h100_3gpu.json +20 -0
  282. training/deepspeed_zero3_offload.json +28 -0
  283. training/eval.py +92 -0
  284. training/finetune.py +516 -0
  285. training/public_datasets.py +89 -0
  286. training_data/react_train.jsonl +7473 -0
  287. utils/agent_discovery.py +311 -0
  288. utils/batch_processor.py +317 -0
  289. utils/conversation.py +78 -0
  290. utils/edit_distance.py +118 -0
  291. utils/formatter.py +33 -0
  292. utils/graph_reasoner.py +530 -0
  293. utils/rate_limiter.py +283 -0
  294. utils/router.py +2 -2
  295. utils/tool_discovery.py +307 -0
  296. webui/__init__.py +10 -0
  297. webui/app.py +229 -0
  298. webui/config.py +104 -0
  299. webui/static/css/style.css +332 -0
  300. webui/static/js/main.js +284 -0
  301. webui/templates/index.html +42 -0
  302. tests/test_crca_excel.py +0 -166
  303. tests/test_data_broker.py +0 -424
  304. tests/test_palantir.py +0 -349
  305. {crca-1.4.0.dist-info → crca-1.5.0.dist-info}/WHEEL +0 -0
  306. {crca-1.4.0.dist-info → crca-1.5.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,328 @@
1
+ """
2
+ Quick Start Guide for Image Annotation
3
+
4
+ This example demonstrates the most common use cases for the image annotation system.
5
+ """
6
+
7
+ import os
8
+ import sys
9
+ from pathlib import Path
10
+
11
+ # Add parent directory to path if needed
12
+ sys.path.insert(0, str(Path(__file__).parent.parent))
13
+
14
+ try:
15
+ from image_annotation import ImageAnnotationEngine
16
+ from CRCA import CRCAAgent
17
+ IMAGE_ANNOTATION_AVAILABLE = True
18
+ except ImportError as e:
19
+ print(f"Image annotation not available: {e}")
20
+ print("Make sure all dependencies are installed:")
21
+ print(" pip install opencv-python numpy pillow loguru rustworkx")
22
+ IMAGE_ANNOTATION_AVAILABLE = False
23
+
24
+
25
+ def example_1_basic_annotation():
26
+ """Example 1: Basic image annotation."""
27
+ print("\n=== Example 1: Basic Image Annotation ===")
28
+
29
+ if not IMAGE_ANNOTATION_AVAILABLE:
30
+ print("Skipping - image annotation not available")
31
+ return
32
+
33
+ # Initialize the engine
34
+ engine = ImageAnnotationEngine()
35
+
36
+ # Example: Annotate an image file
37
+ # Replace with your image path
38
+ image_path = "path/to/your/image.png"
39
+
40
+ if not os.path.exists(image_path):
41
+ print(f"Image not found: {image_path}")
42
+ print("Please update image_path with a valid image file")
43
+ return
44
+
45
+ # Annotate the image
46
+ result = engine.annotate(image_path, output="all")
47
+
48
+ # Access results
49
+ print(f"Found {len(result.annotation_graph.entities)} geometric primitives")
50
+ print(f"Generated {len(result.annotation_graph.labels)} semantic labels")
51
+ print(f"Processing time: {result.processing_time:.2f} seconds")
52
+
53
+ # Print some labels
54
+ print("\nLabels:")
55
+ for label in result.annotation_graph.labels[:5]: # First 5 labels
56
+ print(f" - {label.label} (uncertainty: {label.uncertainty:.2f})")
57
+
58
+ # Save overlay image
59
+ if result.overlay_image:
60
+ import cv2
61
+ import numpy as np
62
+ overlay = cv2.imdecode(
63
+ np.frombuffer(result.overlay_image, np.uint8),
64
+ cv2.IMREAD_COLOR
65
+ )
66
+ cv2.imwrite("annotated_output.png", overlay)
67
+ print("\nSaved annotated image to: annotated_output.png")
68
+
69
+
70
+ def example_2_query_image():
71
+ """Example 2: Query an image with natural language."""
72
+ print("\n=== Example 2: Query Image ===")
73
+
74
+ if not IMAGE_ANNOTATION_AVAILABLE:
75
+ print("Skipping - image annotation not available")
76
+ return
77
+
78
+ engine = ImageAnnotationEngine()
79
+
80
+ image_path = "path/to/your/image.png"
81
+
82
+ if not os.path.exists(image_path):
83
+ print(f"Image not found: {image_path}")
84
+ print("Please update image_path with a valid image file")
85
+ return
86
+
87
+ # Query 1: Find specific objects
88
+ print("\nQuery 1: Find all circles")
89
+ result = engine.query(image_path, "find all circles")
90
+ print(f"Answer: {result['answer']}")
91
+ print(f"Found {len(result['entities'])} circles")
92
+
93
+ # Query 2: Measure something
94
+ print("\nQuery 2: Measure distances")
95
+ result = engine.query(image_path, "measure the distance from the border to the largest structure")
96
+ print(f"Answer: {result['answer']}")
97
+ if result.get('measurements'):
98
+ print(f"Measurements: {result['measurements']}")
99
+
100
+ # Query 3: Identify objects
101
+ print("\nQuery 3: Identify components")
102
+ result = engine.query(image_path, "identify all components in this image")
103
+ print(f"Answer: {result['answer']}")
104
+
105
+
106
+ def example_3_different_output_formats():
107
+ """Example 3: Using different output formats."""
108
+ print("\n=== Example 3: Different Output Formats ===")
109
+
110
+ if not IMAGE_ANNOTATION_AVAILABLE:
111
+ print("Skipping - image annotation not available")
112
+ return
113
+
114
+ engine = ImageAnnotationEngine()
115
+
116
+ image_path = "path/to/your/image.png"
117
+
118
+ if not os.path.exists(image_path):
119
+ print(f"Image not found: {image_path}")
120
+ print("Please update image_path with a valid image file")
121
+ return
122
+
123
+ # Output format: overlay (numpy array)
124
+ print("\n1. Getting overlay image (numpy array)...")
125
+ overlay = engine.annotate(image_path, output="overlay")
126
+ print(f" Overlay shape: {overlay.shape}")
127
+ import cv2
128
+ cv2.imwrite("overlay_output.png", overlay)
129
+ print(" Saved to: overlay_output.png")
130
+
131
+ # Output format: JSON
132
+ print("\n2. Getting JSON data...")
133
+ json_data = engine.annotate(image_path, output="json")
134
+ print(f" JSON keys: {list(json_data.keys())}")
135
+ print(f" Number of entities: {len(json_data.get('entities', []))}")
136
+
137
+ # Output format: report
138
+ print("\n3. Getting formal report...")
139
+ report = engine.annotate(image_path, output="report")
140
+ print(f" Report length: {len(report)} characters")
141
+ print(f" First 200 chars: {report[:200]}...")
142
+
143
+ # Output format: all (complete result)
144
+ print("\n4. Getting complete result...")
145
+ result = engine.annotate(image_path, output="all")
146
+ print(f" Type: {type(result)}")
147
+ print(f" Has annotation_graph: {hasattr(result, 'annotation_graph')}")
148
+ print(f" Has overlay_image: {hasattr(result, 'overlay_image')}")
149
+ print(f" Has formal_report: {hasattr(result, 'formal_report')}")
150
+
151
+
152
+ def example_4_batch_processing():
153
+ """Example 4: Batch processing multiple images."""
154
+ print("\n=== Example 4: Batch Processing ===")
155
+
156
+ if not IMAGE_ANNOTATION_AVAILABLE:
157
+ print("Skipping - image annotation not available")
158
+ return
159
+
160
+ engine = ImageAnnotationEngine(
161
+ cache_enabled=True, # Enable caching for faster processing
162
+ show_progress=True # Show progress bar (requires tqdm)
163
+ )
164
+
165
+ # List of image paths
166
+ image_paths = [
167
+ "path/to/image1.png",
168
+ "path/to/image2.png",
169
+ "path/to/image3.png"
170
+ ]
171
+
172
+ # Filter to only existing files
173
+ existing_paths = [p for p in image_paths if os.path.exists(p)]
174
+
175
+ if not existing_paths:
176
+ print("No image files found. Please update image_paths with valid files")
177
+ return
178
+
179
+ print(f"Processing {len(existing_paths)} images...")
180
+
181
+ # Process all images
182
+ results = engine.annotate(existing_paths, output="all")
183
+
184
+ # Analyze results
185
+ print(f"\nProcessed {len(results)} images:")
186
+ for i, result in enumerate(results):
187
+ print(f" Image {i+1}: {len(result.annotation_graph.entities)} entities, "
188
+ f"{len(result.annotation_graph.labels)} labels, "
189
+ f"{result.processing_time:.2f}s")
190
+
191
+
192
+ def example_5_with_crca_agent():
193
+ """Example 5: Using image annotation with CRCAAgent."""
194
+ print("\n=== Example 5: Integration with CRCAAgent ===")
195
+
196
+ if not IMAGE_ANNOTATION_AVAILABLE:
197
+ print("Skipping - image annotation not available")
198
+ return
199
+
200
+ # Check for API key
201
+ if not os.getenv("OPENAI_API_KEY"):
202
+ print("Warning: OPENAI_API_KEY not set. Set it to use CRCAAgent.")
203
+ print(" export OPENAI_API_KEY='your-key-here'")
204
+ return
205
+
206
+ # Create agent with image annotation enabled
207
+ agent = CRCAAgent(
208
+ model_name="gpt-4o-mini",
209
+ use_image_annotation=True, # Enable image annotation tools
210
+ use_crca_tools=True
211
+ )
212
+
213
+ image_path = "path/to/your/image.png"
214
+
215
+ if not os.path.exists(image_path):
216
+ print(f"Image not found: {image_path}")
217
+ print("Please update image_path with a valid image file")
218
+ return
219
+
220
+ # Task that uses image annotation
221
+ task = f"""
222
+ Analyze the image at {image_path}:
223
+ 1. Use query_image to identify all objects in the image
224
+ 2. Use query_image to find the largest object
225
+ 3. Use query_image to measure distances between key objects
226
+ 4. Summarize your findings
227
+ """
228
+
229
+ print("Running agent with image annotation tools...")
230
+ print("(This will make API calls to OpenAI)")
231
+
232
+ # Uncomment to actually run (requires API key and credits)
233
+ # response = agent.run(task)
234
+ # print(f"\nAgent response:\n{response}")
235
+
236
+ print("\nNote: Uncomment the code above to actually run the agent")
237
+
238
+
239
+ def example_6_different_input_types():
240
+ """Example 6: Using different input types."""
241
+ print("\n=== Example 6: Different Input Types ===")
242
+
243
+ if not IMAGE_ANNOTATION_AVAILABLE:
244
+ print("Skipping - image annotation not available")
245
+ return
246
+
247
+ engine = ImageAnnotationEngine()
248
+
249
+ # 1. File path (string)
250
+ image_path = "path/to/your/image.png"
251
+ if os.path.exists(image_path):
252
+ print("1. Annotating from file path (string)...")
253
+ result = engine.annotate(image_path, output="json")
254
+ print(f" Success: {len(result.get('entities', []))} entities")
255
+
256
+ # 2. Path object
257
+ image_path_obj = Path("path/to/your/image.png")
258
+ if image_path_obj.exists():
259
+ print("\n2. Annotating from Path object...")
260
+ result = engine.annotate(image_path_obj, output="json")
261
+ print(f" Success: {len(result.get('entities', []))} entities")
262
+
263
+ # 3. NumPy array
264
+ try:
265
+ import cv2
266
+ import numpy as np
267
+ if os.path.exists(image_path):
268
+ print("\n3. Annotating from NumPy array...")
269
+ img = cv2.imread(image_path)
270
+ result = engine.annotate(img, output="json")
271
+ print(f" Success: {len(result.get('entities', []))} entities")
272
+ except ImportError:
273
+ print("\n3. Skipping NumPy array example (cv2 not available)")
274
+
275
+ # 4. PIL Image
276
+ try:
277
+ from PIL import Image as PILImage
278
+ if os.path.exists(image_path):
279
+ print("\n4. Annotating from PIL Image...")
280
+ img = PILImage.open(image_path)
281
+ result = engine.annotate(img, output="json")
282
+ print(f" Success: {len(result.get('entities', []))} entities")
283
+ except ImportError:
284
+ print("\n4. Skipping PIL Image example (PIL not available)")
285
+
286
+ # 5. URL (if requests is available)
287
+ try:
288
+ import requests
289
+ print("\n5. Annotating from URL...")
290
+ url = "https://example.com/image.png" # Replace with actual URL
291
+ # Uncomment to test:
292
+ # result = engine.annotate(url, output="json")
293
+ # print(f" Success: {len(result.get('entities', []))} entities")
294
+ print(" (Skipped - update URL to test)")
295
+ except ImportError:
296
+ print("\n5. Skipping URL example (requests not available)")
297
+
298
+
299
+ def main():
300
+ """Run all examples."""
301
+ print("=" * 60)
302
+ print("Image Annotation Quick Start Examples")
303
+ print("=" * 60)
304
+
305
+ if not IMAGE_ANNOTATION_AVAILABLE:
306
+ print("\nImage annotation is not available.")
307
+ print("Please install required dependencies:")
308
+ print(" pip install opencv-python numpy pillow loguru rustworkx")
309
+ return
310
+
311
+ # Run examples
312
+ example_1_basic_annotation()
313
+ example_2_query_image()
314
+ example_3_different_output_formats()
315
+ example_4_batch_processing()
316
+ example_5_with_crca_agent()
317
+ example_6_different_input_types()
318
+
319
+ print("\n" + "=" * 60)
320
+ print("Examples completed!")
321
+ print("=" * 60)
322
+ print("\nFor more information, see:")
323
+ print(" - docs/IMAGE_ANNOTATION_USAGE.md")
324
+ print(" - tests/test_image_annotation_*.py")
325
+
326
+
327
+ if __name__ == "__main__":
328
+ main()
@@ -0,0 +1,77 @@
1
+ """
2
+ Test script to verify hybrid agent fixes for variable cleaning and state usage.
3
+ """
4
+
5
+ import sys
6
+ import os
7
+ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
8
+
9
+ from architecture.hybrid.hybrid_agent import HybridAgent
10
+ import json
11
+
12
+ def test_extraction():
13
+ """Test variable extraction and cleaning."""
14
+ agent = HybridAgent()
15
+ task = "If product price is 20000 & demand is 61% buy, 39% sell, what is the expected price of the product in 7 days?"
16
+
17
+ print("=" * 70)
18
+ print("Testing Variable Extraction and Cleaning")
19
+ print("=" * 70)
20
+
21
+ result = agent.extract_causal_variables(task)
22
+
23
+ print(f"\nVariables extracted: {result['variables']}")
24
+ print(f"Edges extracted: {result['edges']}")
25
+ print(f"Values extracted: {result['metadata']['variables_with_values']}")
26
+
27
+ # Check for invalid variables
28
+ invalid_vars = ['if', 'sell', 'buy', 'days', 'day']
29
+ found_invalid = [v for v in result['variables'] if any(inv in v.lower() for inv in invalid_vars)]
30
+ if found_invalid:
31
+ print(f"\nWARNING: Found potentially invalid variables: {found_invalid}")
32
+ else:
33
+ print("\n✓ No invalid variables found")
34
+
35
+ return result
36
+
37
+ def test_full_analysis():
38
+ """Test full analysis with factual state."""
39
+ agent = HybridAgent()
40
+ task = "If product price is 20000 & demand is 61% buy, 39% sell, what is the expected price of the product in 7 days?"
41
+
42
+ print("\n" + "=" * 70)
43
+ print("Testing Full Analysis with Factual State")
44
+ print("=" * 70)
45
+
46
+ result_dict = agent.orchestrator.reason_hybrid(task)
47
+
48
+ print(f"\nVariables in analysis: {result_dict['analysis']['variables']}")
49
+ print(f"Number of relationships: {len(result_dict['analysis']['relationships'])}")
50
+ print(f"Factual state: {result_dict['analysis'].get('factual_state', {})}")
51
+
52
+ # Check if factual state has non-zero values
53
+ factual_state = result_dict['analysis'].get('factual_state', {})
54
+ non_zero = {k: v for k, v in factual_state.items() if v != 0.0}
55
+ if non_zero:
56
+ print(f"\n✓ Factual state has extracted values: {non_zero}")
57
+ else:
58
+ print("\nWARNING: Factual state has no extracted values (all zeros)")
59
+
60
+ # Check counterfactuals
61
+ if result_dict.get('counterfactuals'):
62
+ cf = result_dict['counterfactuals'][0]
63
+ outcomes = cf.get('expected_outcomes', {})
64
+ non_zero_outcomes = {k: v for k, v in outcomes.items() if v != 0.0}
65
+ if non_zero_outcomes:
66
+ print(f"✓ Counterfactuals have non-zero predictions: {len(non_zero_outcomes)} variables")
67
+ else:
68
+ print("WARNING: Counterfactuals show all zeros")
69
+
70
+ return result_dict
71
+
72
+ if __name__ == "__main__":
73
+ test_extraction()
74
+ test_full_analysis()
75
+ print("\n" + "=" * 70)
76
+ print("Test Complete")
77
+ print("=" * 70)
@@ -0,0 +1,27 @@
1
+ """Image Annotation module for CR-CA.
2
+
3
+ This module provides live image annotation with GPT-4o-mini under adversarial constraints.
4
+ """
5
+
6
+ from image_annotation.annotation_engine import ImageAnnotationEngine
7
+
8
+ from schemas.annotation import (
9
+ AnnotationResult,
10
+ AnnotationGraph,
11
+ PrimitiveEntity,
12
+ SemanticLabel,
13
+ Relation,
14
+ Contradiction,
15
+ Claim
16
+ )
17
+
18
+ __all__ = [
19
+ "ImageAnnotationEngine",
20
+ "AnnotationResult",
21
+ "AnnotationGraph",
22
+ "PrimitiveEntity",
23
+ "SemanticLabel",
24
+ "Relation",
25
+ "Contradiction",
26
+ "Claim"
27
+ ]