crca 1.4.0__py3-none-any.whl → 1.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (306) hide show
  1. CRCA.py +172 -7
  2. MODEL_CARD.md +53 -0
  3. PKG-INFO +8 -2
  4. RELEASE_NOTES.md +17 -0
  5. STABILITY.md +19 -0
  6. architecture/hybrid/consistency_engine.py +362 -0
  7. architecture/hybrid/conversation_manager.py +421 -0
  8. architecture/hybrid/explanation_generator.py +452 -0
  9. architecture/hybrid/few_shot_learner.py +533 -0
  10. architecture/hybrid/graph_compressor.py +286 -0
  11. architecture/hybrid/hybrid_agent.py +4398 -0
  12. architecture/hybrid/language_compiler.py +623 -0
  13. architecture/hybrid/main,py +0 -0
  14. architecture/hybrid/reasoning_tracker.py +322 -0
  15. architecture/hybrid/self_verifier.py +524 -0
  16. architecture/hybrid/task_decomposer.py +567 -0
  17. architecture/hybrid/text_corrector.py +341 -0
  18. benchmark_results/crca_core_benchmarks.json +178 -0
  19. branches/crca_sd/crca_sd_realtime.py +6 -2
  20. branches/general_agent/__init__.py +102 -0
  21. branches/general_agent/general_agent.py +1400 -0
  22. branches/general_agent/personality.py +169 -0
  23. branches/general_agent/utils/__init__.py +19 -0
  24. branches/general_agent/utils/prompt_builder.py +170 -0
  25. {crca-1.4.0.dist-info → crca-1.5.0.dist-info}/METADATA +8 -2
  26. {crca-1.4.0.dist-info → crca-1.5.0.dist-info}/RECORD +303 -20
  27. crca_core/__init__.py +35 -0
  28. crca_core/benchmarks/__init__.py +14 -0
  29. crca_core/benchmarks/synthetic_scm.py +103 -0
  30. crca_core/core/__init__.py +23 -0
  31. crca_core/core/api.py +120 -0
  32. crca_core/core/estimate.py +208 -0
  33. crca_core/core/godclass.py +72 -0
  34. crca_core/core/intervention_design.py +174 -0
  35. crca_core/core/lifecycle.py +48 -0
  36. crca_core/discovery/__init__.py +9 -0
  37. crca_core/discovery/tabular.py +193 -0
  38. crca_core/identify/__init__.py +171 -0
  39. crca_core/identify/backdoor.py +39 -0
  40. crca_core/identify/frontdoor.py +48 -0
  41. crca_core/identify/graph.py +106 -0
  42. crca_core/identify/id_algorithm.py +43 -0
  43. crca_core/identify/iv.py +48 -0
  44. crca_core/models/__init__.py +67 -0
  45. crca_core/models/provenance.py +56 -0
  46. crca_core/models/refusal.py +39 -0
  47. crca_core/models/result.py +83 -0
  48. crca_core/models/spec.py +151 -0
  49. crca_core/models/validation.py +68 -0
  50. crca_core/scm/__init__.py +9 -0
  51. crca_core/scm/linear_gaussian.py +198 -0
  52. crca_core/timeseries/__init__.py +6 -0
  53. crca_core/timeseries/pcmci.py +181 -0
  54. crca_llm/__init__.py +12 -0
  55. crca_llm/client.py +85 -0
  56. crca_llm/coauthor.py +118 -0
  57. crca_llm/orchestrator.py +289 -0
  58. crca_llm/types.py +21 -0
  59. crca_reasoning/__init__.py +16 -0
  60. crca_reasoning/critique.py +54 -0
  61. crca_reasoning/godclass.py +206 -0
  62. crca_reasoning/memory.py +24 -0
  63. crca_reasoning/rationale.py +10 -0
  64. crca_reasoning/react_controller.py +81 -0
  65. crca_reasoning/tool_router.py +97 -0
  66. crca_reasoning/types.py +40 -0
  67. crca_sd/__init__.py +15 -0
  68. crca_sd/crca_sd_core.py +2 -0
  69. crca_sd/crca_sd_governance.py +2 -0
  70. crca_sd/crca_sd_mpc.py +2 -0
  71. crca_sd/crca_sd_realtime.py +2 -0
  72. crca_sd/crca_sd_tui.py +2 -0
  73. cuda-keyring_1.1-1_all.deb +0 -0
  74. cuda-keyring_1.1-1_all.deb.1 +0 -0
  75. docs/IMAGE_ANNOTATION_USAGE.md +539 -0
  76. docs/INSTALL_DEEPSPEED.md +125 -0
  77. docs/api/branches/crca-cg.md +19 -0
  78. docs/api/branches/crca-q.md +27 -0
  79. docs/api/branches/crca-sd.md +37 -0
  80. docs/api/branches/general-agent.md +24 -0
  81. docs/api/branches/overview.md +19 -0
  82. docs/api/crca/agent-methods.md +62 -0
  83. docs/api/crca/operations.md +79 -0
  84. docs/api/crca/overview.md +32 -0
  85. docs/api/image-annotation/engine.md +52 -0
  86. docs/api/image-annotation/overview.md +17 -0
  87. docs/api/schemas/annotation.md +34 -0
  88. docs/api/schemas/core-schemas.md +82 -0
  89. docs/api/schemas/overview.md +32 -0
  90. docs/api/schemas/policy.md +30 -0
  91. docs/api/utils/conversation.md +22 -0
  92. docs/api/utils/graph-reasoner.md +32 -0
  93. docs/api/utils/overview.md +21 -0
  94. docs/api/utils/router.md +19 -0
  95. docs/api/utils/utilities.md +97 -0
  96. docs/architecture/causal-graphs.md +41 -0
  97. docs/architecture/data-flow.md +29 -0
  98. docs/architecture/design-principles.md +33 -0
  99. docs/architecture/hybrid-agent/components.md +38 -0
  100. docs/architecture/hybrid-agent/consistency.md +26 -0
  101. docs/architecture/hybrid-agent/overview.md +44 -0
  102. docs/architecture/hybrid-agent/reasoning.md +22 -0
  103. docs/architecture/llm-integration.md +26 -0
  104. docs/architecture/modular-structure.md +37 -0
  105. docs/architecture/overview.md +69 -0
  106. docs/architecture/policy-engine-arch.md +29 -0
  107. docs/branches/crca-cg/corposwarm.md +39 -0
  108. docs/branches/crca-cg/esg-scoring.md +30 -0
  109. docs/branches/crca-cg/multi-agent.md +35 -0
  110. docs/branches/crca-cg/overview.md +40 -0
  111. docs/branches/crca-q/alternative-data.md +55 -0
  112. docs/branches/crca-q/architecture.md +71 -0
  113. docs/branches/crca-q/backtesting.md +45 -0
  114. docs/branches/crca-q/causal-engine.md +33 -0
  115. docs/branches/crca-q/execution.md +39 -0
  116. docs/branches/crca-q/market-data.md +60 -0
  117. docs/branches/crca-q/overview.md +58 -0
  118. docs/branches/crca-q/philosophy.md +60 -0
  119. docs/branches/crca-q/portfolio-optimization.md +66 -0
  120. docs/branches/crca-q/risk-management.md +102 -0
  121. docs/branches/crca-q/setup.md +65 -0
  122. docs/branches/crca-q/signal-generation.md +61 -0
  123. docs/branches/crca-q/signal-validation.md +43 -0
  124. docs/branches/crca-sd/core.md +84 -0
  125. docs/branches/crca-sd/governance.md +53 -0
  126. docs/branches/crca-sd/mpc-solver.md +65 -0
  127. docs/branches/crca-sd/overview.md +59 -0
  128. docs/branches/crca-sd/realtime.md +28 -0
  129. docs/branches/crca-sd/tui.md +20 -0
  130. docs/branches/general-agent/overview.md +37 -0
  131. docs/branches/general-agent/personality.md +36 -0
  132. docs/branches/general-agent/prompt-builder.md +30 -0
  133. docs/changelog/index.md +79 -0
  134. docs/contributing/code-style.md +69 -0
  135. docs/contributing/documentation.md +43 -0
  136. docs/contributing/overview.md +29 -0
  137. docs/contributing/testing.md +29 -0
  138. docs/core/crcagent/async-operations.md +65 -0
  139. docs/core/crcagent/automatic-extraction.md +107 -0
  140. docs/core/crcagent/batch-prediction.md +80 -0
  141. docs/core/crcagent/bayesian-inference.md +60 -0
  142. docs/core/crcagent/causal-graph.md +92 -0
  143. docs/core/crcagent/counterfactuals.md +96 -0
  144. docs/core/crcagent/deterministic-simulation.md +78 -0
  145. docs/core/crcagent/dual-mode-operation.md +82 -0
  146. docs/core/crcagent/initialization.md +88 -0
  147. docs/core/crcagent/optimization.md +65 -0
  148. docs/core/crcagent/overview.md +63 -0
  149. docs/core/crcagent/time-series.md +57 -0
  150. docs/core/schemas/annotation.md +30 -0
  151. docs/core/schemas/core-schemas.md +82 -0
  152. docs/core/schemas/overview.md +30 -0
  153. docs/core/schemas/policy.md +41 -0
  154. docs/core/templates/base-agent.md +31 -0
  155. docs/core/templates/feature-mixins.md +31 -0
  156. docs/core/templates/overview.md +29 -0
  157. docs/core/templates/templates-guide.md +75 -0
  158. docs/core/tools/mcp-client.md +34 -0
  159. docs/core/tools/overview.md +24 -0
  160. docs/core/utils/conversation.md +27 -0
  161. docs/core/utils/graph-reasoner.md +29 -0
  162. docs/core/utils/overview.md +27 -0
  163. docs/core/utils/router.md +27 -0
  164. docs/core/utils/utilities.md +97 -0
  165. docs/css/custom.css +84 -0
  166. docs/examples/basic-usage.md +57 -0
  167. docs/examples/general-agent/general-agent-examples.md +50 -0
  168. docs/examples/hybrid-agent/hybrid-agent-examples.md +56 -0
  169. docs/examples/image-annotation/image-annotation-examples.md +54 -0
  170. docs/examples/integration/integration-examples.md +58 -0
  171. docs/examples/overview.md +37 -0
  172. docs/examples/trading/trading-examples.md +46 -0
  173. docs/features/causal-reasoning/advanced-topics.md +101 -0
  174. docs/features/causal-reasoning/counterfactuals.md +43 -0
  175. docs/features/causal-reasoning/do-calculus.md +50 -0
  176. docs/features/causal-reasoning/overview.md +47 -0
  177. docs/features/causal-reasoning/structural-models.md +52 -0
  178. docs/features/hybrid-agent/advanced-components.md +55 -0
  179. docs/features/hybrid-agent/core-components.md +64 -0
  180. docs/features/hybrid-agent/overview.md +34 -0
  181. docs/features/image-annotation/engine.md +82 -0
  182. docs/features/image-annotation/features.md +113 -0
  183. docs/features/image-annotation/integration.md +75 -0
  184. docs/features/image-annotation/overview.md +53 -0
  185. docs/features/image-annotation/quickstart.md +73 -0
  186. docs/features/policy-engine/doctrine-ledger.md +105 -0
  187. docs/features/policy-engine/monitoring.md +44 -0
  188. docs/features/policy-engine/mpc-control.md +89 -0
  189. docs/features/policy-engine/overview.md +46 -0
  190. docs/getting-started/configuration.md +225 -0
  191. docs/getting-started/first-agent.md +164 -0
  192. docs/getting-started/installation.md +144 -0
  193. docs/getting-started/quickstart.md +137 -0
  194. docs/index.md +118 -0
  195. docs/js/mathjax.js +13 -0
  196. docs/lrm/discovery_proof_notes.md +25 -0
  197. docs/lrm/finetune_full.md +83 -0
  198. docs/lrm/math_appendix.md +120 -0
  199. docs/lrm/overview.md +32 -0
  200. docs/mkdocs.yml +238 -0
  201. docs/stylesheets/extra.css +21 -0
  202. docs_generated/crca_core/CounterfactualResult.md +12 -0
  203. docs_generated/crca_core/DiscoveryHypothesisResult.md +13 -0
  204. docs_generated/crca_core/DraftSpec.md +13 -0
  205. docs_generated/crca_core/EstimateResult.md +13 -0
  206. docs_generated/crca_core/IdentificationResult.md +17 -0
  207. docs_generated/crca_core/InterventionDesignResult.md +12 -0
  208. docs_generated/crca_core/LockedSpec.md +15 -0
  209. docs_generated/crca_core/RefusalResult.md +12 -0
  210. docs_generated/crca_core/ValidationReport.md +9 -0
  211. docs_generated/crca_core/index.md +13 -0
  212. examples/general_agent_example.py +277 -0
  213. examples/general_agent_quickstart.py +202 -0
  214. examples/general_agent_simple.py +92 -0
  215. examples/hybrid_agent_auto_extraction.py +84 -0
  216. examples/hybrid_agent_dictionary_demo.py +104 -0
  217. examples/hybrid_agent_enhanced.py +179 -0
  218. examples/hybrid_agent_general_knowledge.py +107 -0
  219. examples/image_annotation_quickstart.py +328 -0
  220. examples/test_hybrid_fixes.py +77 -0
  221. image_annotation/__init__.py +27 -0
  222. image_annotation/annotation_engine.py +2593 -0
  223. install_cuda_wsl2.sh +59 -0
  224. install_deepspeed.sh +56 -0
  225. install_deepspeed_simple.sh +87 -0
  226. mkdocs.yml +252 -0
  227. ollama/Modelfile +8 -0
  228. prompts/__init__.py +2 -1
  229. prompts/default_crca.py +9 -1
  230. prompts/general_agent.py +227 -0
  231. prompts/image_annotation.py +56 -0
  232. pyproject.toml +17 -2
  233. requirements-docs.txt +10 -0
  234. requirements.txt +21 -2
  235. schemas/__init__.py +26 -1
  236. schemas/annotation.py +222 -0
  237. schemas/conversation.py +193 -0
  238. schemas/hybrid.py +211 -0
  239. schemas/reasoning.py +276 -0
  240. schemas_export/crca_core/CounterfactualResult.schema.json +108 -0
  241. schemas_export/crca_core/DiscoveryHypothesisResult.schema.json +113 -0
  242. schemas_export/crca_core/DraftSpec.schema.json +635 -0
  243. schemas_export/crca_core/EstimateResult.schema.json +113 -0
  244. schemas_export/crca_core/IdentificationResult.schema.json +145 -0
  245. schemas_export/crca_core/InterventionDesignResult.schema.json +111 -0
  246. schemas_export/crca_core/LockedSpec.schema.json +646 -0
  247. schemas_export/crca_core/RefusalResult.schema.json +90 -0
  248. schemas_export/crca_core/ValidationReport.schema.json +62 -0
  249. scripts/build_lrm_dataset.py +80 -0
  250. scripts/export_crca_core_schemas.py +54 -0
  251. scripts/export_hf_lrm.py +37 -0
  252. scripts/export_ollama_gguf.py +45 -0
  253. scripts/generate_changelog.py +157 -0
  254. scripts/generate_crca_core_docs_from_schemas.py +86 -0
  255. scripts/run_crca_core_benchmarks.py +163 -0
  256. scripts/run_full_finetune.py +198 -0
  257. scripts/run_lrm_eval.py +31 -0
  258. templates/graph_management.py +29 -0
  259. tests/conftest.py +9 -0
  260. tests/test_core.py +2 -3
  261. tests/test_crca_core_discovery_tabular.py +15 -0
  262. tests/test_crca_core_estimate_dowhy.py +36 -0
  263. tests/test_crca_core_identify.py +18 -0
  264. tests/test_crca_core_intervention_design.py +36 -0
  265. tests/test_crca_core_linear_gaussian_scm.py +69 -0
  266. tests/test_crca_core_spec.py +25 -0
  267. tests/test_crca_core_timeseries_pcmci.py +15 -0
  268. tests/test_crca_llm_coauthor.py +12 -0
  269. tests/test_crca_llm_orchestrator.py +80 -0
  270. tests/test_hybrid_agent_llm_enhanced.py +556 -0
  271. tests/test_image_annotation_demo.py +376 -0
  272. tests/test_image_annotation_operational.py +408 -0
  273. tests/test_image_annotation_unit.py +551 -0
  274. tests/test_training_moe.py +13 -0
  275. training/__init__.py +42 -0
  276. training/datasets.py +140 -0
  277. training/deepspeed_zero2_0_5b.json +22 -0
  278. training/deepspeed_zero2_1_5b.json +22 -0
  279. training/deepspeed_zero3_0_5b.json +28 -0
  280. training/deepspeed_zero3_14b.json +28 -0
  281. training/deepspeed_zero3_h100_3gpu.json +20 -0
  282. training/deepspeed_zero3_offload.json +28 -0
  283. training/eval.py +92 -0
  284. training/finetune.py +516 -0
  285. training/public_datasets.py +89 -0
  286. training_data/react_train.jsonl +7473 -0
  287. utils/agent_discovery.py +311 -0
  288. utils/batch_processor.py +317 -0
  289. utils/conversation.py +78 -0
  290. utils/edit_distance.py +118 -0
  291. utils/formatter.py +33 -0
  292. utils/graph_reasoner.py +530 -0
  293. utils/rate_limiter.py +283 -0
  294. utils/router.py +2 -2
  295. utils/tool_discovery.py +307 -0
  296. webui/__init__.py +10 -0
  297. webui/app.py +229 -0
  298. webui/config.py +104 -0
  299. webui/static/css/style.css +332 -0
  300. webui/static/js/main.js +284 -0
  301. webui/templates/index.html +42 -0
  302. tests/test_crca_excel.py +0 -166
  303. tests/test_data_broker.py +0 -424
  304. tests/test_palantir.py +0 -349
  305. {crca-1.4.0.dist-info → crca-1.5.0.dist-info}/WHEEL +0 -0
  306. {crca-1.4.0.dist-info → crca-1.5.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,539 @@
1
+ # Image Annotation Usage Guide
2
+
3
+ This guide explains how to use the image annotation system in CR-CA. The image annotation engine provides automated image analysis with geometric primitive extraction, semantic labeling, and query capabilities.
4
+
5
+ ## Table of Contents
6
+
7
+ 1. [Quick Start](#quick-start)
8
+ 2. [Direct Usage with ImageAnnotationEngine](#direct-usage-with-imageannotationengine)
9
+ 3. [Integration with CRCAAgent](#integration-with-crcaagent)
10
+ 4. [Integration with GeneralAgent](#integration-with-generalagent)
11
+ 5. [Main Methods](#main-methods)
12
+ 6. [Output Formats](#output-formats)
13
+ 7. [Advanced Features](#advanced-features)
14
+ 8. [Examples](#examples)
15
+
16
+ ## Quick Start
17
+
18
+ ### Basic Annotation
19
+
20
+ ```python
21
+ from image_annotation import ImageAnnotationEngine
22
+
23
+ # Initialize the engine
24
+ engine = ImageAnnotationEngine()
25
+
26
+ # Annotate an image (file path, URL, numpy array, or PIL Image)
27
+ result = engine.annotate("path/to/image.png", output="all")
28
+
29
+ # Access the results
30
+ print(f"Found {len(result.annotation_graph.entities)} entities")
31
+ print(f"Generated {len(result.annotation_graph.labels)} labels")
32
+ print(result.formal_report)
33
+ ```
34
+
35
+ ### With CRCAAgent
36
+
37
+ ```python
38
+ from CRCA import CRCAAgent
39
+
40
+ # Create agent with image annotation enabled
41
+ agent = CRCAAgent(
42
+ model_name="gpt-4o-mini",
43
+ use_image_annotation=True
44
+ )
45
+
46
+ # The agent now has access to annotate_image and query_image tools
47
+ response = agent.run("Analyze the image at path/to/image.png and identify all objects")
48
+ ```
49
+
50
+ ## Direct Usage with ImageAnnotationEngine
51
+
52
+ ### Initialization
53
+
54
+ ```python
55
+ from image_annotation import ImageAnnotationEngine
56
+ from image_annotation.annotation_engine import AnnotationConfig
57
+
58
+ # Basic initialization with defaults
59
+ engine = ImageAnnotationEngine()
60
+
61
+ # With custom configuration
62
+ config = AnnotationConfig(
63
+ gpt_model="gpt-4o-mini",
64
+ enable_temporal_tracking=True,
65
+ auto_detect_type=True,
66
+ output_format="all"
67
+ )
68
+ engine = ImageAnnotationEngine(config=config)
69
+
70
+ # With individual parameters
71
+ engine = ImageAnnotationEngine(
72
+ gpt_model="gpt-4o-mini",
73
+ enable_temporal_tracking=False,
74
+ use_crca_tools=True,
75
+ cache_enabled=True,
76
+ auto_retry=True
77
+ )
78
+ ```
79
+
80
+ ### Supported Input Types
81
+
82
+ The `annotate()` method accepts multiple input types:
83
+
84
+ ```python
85
+ # File path (string or Path)
86
+ result = engine.annotate("image.png")
87
+ result = engine.annotate(Path("image.png"))
88
+
89
+ # URL
90
+ result = engine.annotate("https://example.com/image.png")
91
+
92
+ # NumPy array
93
+ import numpy as np
94
+ import cv2
95
+ img = cv2.imread("image.png")
96
+ result = engine.annotate(img)
97
+
98
+ # PIL Image
99
+ from PIL import Image
100
+ img = Image.open("image.png")
101
+ result = engine.annotate(img)
102
+
103
+ # Batch processing (list of any above)
104
+ results = engine.annotate(["img1.png", "img2.png", "img3.png"])
105
+ ```
106
+
107
+ ## Integration with CRCAAgent
108
+
109
+ ### Basic Setup
110
+
111
+ ```python
112
+ from CRCA import CRCAAgent
113
+
114
+ agent = CRCAAgent(
115
+ model_name="gpt-4o-mini",
116
+ use_image_annotation=True, # Enable image annotation tools
117
+ use_crca_tools=True
118
+ )
119
+ ```
120
+
121
+ ### Available Tools
122
+
123
+ When `use_image_annotation=True`, the agent automatically gets two tools:
124
+
125
+ 1. **`annotate_image`**: Annotate an image and get structured results
126
+ 2. **`query_image`**: Query an annotated image with natural language
127
+
128
+ ### Using the Tools
129
+
130
+ ```python
131
+ # The agent can use these tools automatically in conversations
132
+ task = """
133
+ Analyze the image at path/to/circuit.png:
134
+ 1. Identify all components
135
+ 2. Measure distances between components
136
+ 3. Find the largest component
137
+ """
138
+
139
+ response = agent.run(task)
140
+ ```
141
+
142
+ ### Manual Tool Usage
143
+
144
+ ```python
145
+ # Get the tool handlers directly
146
+ annotate_tool = agent.tools_list_dictionary.get("annotate_image")
147
+ query_tool = agent.tools_list_dictionary.get("query_image")
148
+
149
+ # Use annotate_image
150
+ result = annotate_tool(
151
+ image_path="path/to/image.png",
152
+ output_format="all",
153
+ frame_id=None
154
+ )
155
+
156
+ # Use query_image
157
+ answer = query_tool(
158
+ image_path="path/to/image.png",
159
+ query="What is the largest object in this image?",
160
+ frame_id=None
161
+ )
162
+ ```
163
+
164
+ ## Integration with GeneralAgent
165
+
166
+ ### Basic Setup
167
+
168
+ ```python
169
+ from branches.general_agent.general_agent import GeneralAgent
170
+
171
+ agent = GeneralAgent(
172
+ model_name="gpt-4o-mini",
173
+ enable_multimodal=True # Enables image annotation tools
174
+ )
175
+ ```
176
+
177
+ The GeneralAgent automatically includes image annotation tools when `enable_multimodal=True`.
178
+
179
+ ## Main Methods
180
+
181
+ ### 1. `annotate()` - Main Annotation Method
182
+
183
+ ```python
184
+ def annotate(
185
+ self,
186
+ input: Union[str, np.ndarray, Image.Image, List, Path],
187
+ frame_id: Optional[int] = None,
188
+ output: Optional[str] = None
189
+ ) -> Union[AnnotationResult, np.ndarray, Dict[str, Any], str, List]:
190
+ """
191
+ Annotate image(s) with full automation.
192
+
193
+ Args:
194
+ input: Image input (file path, URL, numpy array, PIL Image, or list for batch)
195
+ frame_id: Optional frame ID for temporal tracking
196
+ output: Output format - "overlay", "json", "report", or "all"
197
+
198
+ Returns:
199
+ Depends on output format:
200
+ - "overlay": numpy array (annotated image)
201
+ - "json": dict (JSON data)
202
+ - "report": str (formal report)
203
+ - "all": AnnotationResult object
204
+ - If input is list: List of above
205
+ """
206
+ ```
207
+
208
+ **Examples:**
209
+
210
+ ```python
211
+ # Get annotated overlay image
212
+ overlay = engine.annotate("image.png", output="overlay")
213
+ cv2.imwrite("annotated.png", overlay)
214
+
215
+ # Get JSON data
216
+ json_data = engine.annotate("image.png", output="json")
217
+ print(json_data["entities"])
218
+
219
+ # Get formal report
220
+ report = engine.annotate("image.png", output="report")
221
+ print(report)
222
+
223
+ # Get complete result
224
+ result = engine.annotate("image.png", output="all")
225
+ print(result.annotation_graph.entities)
226
+ print(result.formal_report)
227
+ print(result.processing_time)
228
+ ```
229
+
230
+ ### 2. `query()` - Natural Language Querying
231
+
232
+ ```python
233
+ def query(
234
+ self,
235
+ input: Union[str, np.ndarray, Image.Image, Path],
236
+ query: str,
237
+ frame_id: Optional[int] = None
238
+ ) -> Dict[str, Any]:
239
+ """
240
+ Query an image with natural language.
241
+
242
+ Args:
243
+ input: Image input (file path, URL, numpy array, or PIL Image)
244
+ query: Natural language query (e.g., "find all circles", "measure distance")
245
+ frame_id: Optional frame ID for temporal tracking
246
+
247
+ Returns:
248
+ Dictionary with:
249
+ - "answer": Natural language answer
250
+ - "entities": List of relevant entities
251
+ - "measurements": Dict of measurements (if requested)
252
+ - "graph": AnnotationGraph (if needed)
253
+ """
254
+ ```
255
+
256
+ **Examples:**
257
+
258
+ ```python
259
+ # Find specific objects
260
+ result = engine.query("image.png", "find all circles")
261
+ print(result["answer"])
262
+ print(result["entities"])
263
+
264
+ # Measure distances
265
+ result = engine.query("image.png", "measure the distance from the border to the largest city")
266
+ print(result["measurements"])
267
+
268
+ # Identify objects
269
+ result = engine.query("image.png", "identify all military installations")
270
+ print(result["answer"])
271
+
272
+ # Count objects
273
+ result = engine.query("image.png", "how many lines are in this image?")
274
+ print(result["answer"])
275
+ ```
276
+
277
+ ## Output Formats
278
+
279
+ ### AnnotationResult Object
280
+
281
+ When using `output="all"`, you get a complete `AnnotationResult` object:
282
+
283
+ ```python
284
+ result = engine.annotate("image.png", output="all")
285
+
286
+ # Access annotation graph
287
+ graph = result.annotation_graph
288
+ entities = graph.entities # List[PrimitiveEntity]
289
+ labels = graph.labels # List[SemanticLabel]
290
+ relations = graph.relations # List[Relation]
291
+ contradictions = graph.contradictions # List[Contradiction]
292
+
293
+ # Access overlay image (as bytes, need to decode)
294
+ if result.overlay_image:
295
+ overlay = cv2.imdecode(
296
+ np.frombuffer(result.overlay_image, np.uint8),
297
+ cv2.IMREAD_COLOR
298
+ )
299
+
300
+ # Access formal report
301
+ report = result.formal_report
302
+
303
+ # Access JSON output
304
+ json_data = result.json_output
305
+
306
+ # Access metadata
307
+ processing_time = result.processing_time
308
+ instability_detected = result.instability_detected
309
+ ```
310
+
311
+ ### AnnotationGraph Structure
312
+
313
+ ```python
314
+ graph = result.annotation_graph
315
+
316
+ # Get entity by ID
317
+ entity = graph.get_entity_by_id("entity-id-123")
318
+
319
+ # Get labels for an entity
320
+ labels = graph.get_labels_for_entity("entity-id-123")
321
+
322
+ # Get relations for an entity
323
+ relations = graph.get_relations_for_entity("entity-id-123")
324
+ ```
325
+
326
+ ## Advanced Features
327
+
328
+ ### Temporal Tracking
329
+
330
+ For video sequences or time-series images:
331
+
332
+ ```python
333
+ engine = ImageAnnotationEngine(enable_temporal_tracking=True)
334
+
335
+ # Process frame sequence
336
+ for frame_id, image_path in enumerate(frame_paths):
337
+ result = engine.annotate(image_path, frame_id=frame_id)
338
+ # Engine tracks entities across frames using Kalman filters
339
+ ```
340
+
341
+ ### Batch Processing
342
+
343
+ ```python
344
+ # Process multiple images in parallel
345
+ image_paths = ["img1.png", "img2.png", "img3.png"]
346
+ results = engine.annotate(image_paths, output="all")
347
+
348
+ # Results is a list of AnnotationResult objects
349
+ for i, result in enumerate(results):
350
+ print(f"Image {i}: {len(result.annotation_graph.entities)} entities")
351
+ ```
352
+
353
+ ### Custom Configuration
354
+
355
+ ```python
356
+ from image_annotation.annotation_engine import AnnotationConfig
357
+
358
+ config = AnnotationConfig(
359
+ gpt_model="gpt-4o-mini",
360
+ enable_temporal_tracking=True,
361
+ auto_detect_type=True,
362
+ auto_tune_params=True,
363
+ cache_enabled=True,
364
+ auto_retry=True,
365
+ max_retries=3,
366
+ output_format="all",
367
+ parallel_workers=4,
368
+ show_progress=True
369
+ )
370
+
371
+ engine = ImageAnnotationEngine(config=config)
372
+ ```
373
+
374
+ ### Caching
375
+
376
+ The engine supports automatic caching to avoid re-processing the same images:
377
+
378
+ ```python
379
+ engine = ImageAnnotationEngine(cache_enabled=True)
380
+ # First call processes the image
381
+ result1 = engine.annotate("image.png")
382
+ # Second call uses cache (much faster)
383
+ result2 = engine.annotate("image.png")
384
+ ```
385
+
386
+ ## Examples
387
+
388
+ ### Example 1: Circuit Diagram Analysis
389
+
390
+ ```python
391
+ from image_annotation import ImageAnnotationEngine
392
+
393
+ engine = ImageAnnotationEngine()
394
+
395
+ # Annotate circuit diagram
396
+ result = engine.annotate("circuit.png", output="all")
397
+
398
+ # Find all components
399
+ components = [e for e in result.annotation_graph.entities
400
+ if e.primitive_type in ["circle", "contour"]]
401
+
402
+ print(f"Found {len(components)} components")
403
+
404
+ # Get labels
405
+ for label in result.annotation_graph.labels:
406
+ print(f"Entity {label.entity_id}: {label.label} (uncertainty: {label.uncertainty})")
407
+ ```
408
+
409
+ ### Example 2: Tactical Map Analysis
410
+
411
+ ```python
412
+ engine = ImageAnnotationEngine()
413
+
414
+ # Query for specific information
415
+ result = engine.query(
416
+ "tactical_map.png",
417
+ "identify all military bases and measure their sizes"
418
+ )
419
+
420
+ print(result["answer"])
421
+ for entity in result["entities"]:
422
+ if entity.primitive_type == "circle":
423
+ radius = entity.metadata.get("radius", 0)
424
+ area = 3.14159 * radius * radius
425
+ print(f"Base at {entity.pixel_coords[0]}: area = {area:.2f} pixels²")
426
+ ```
427
+
428
+ ### Example 3: Integration with CR-CA for Strategic Analysis
429
+
430
+ ```python
431
+ from CRCA import CRCAAgent
432
+
433
+ # Create agent with image annotation
434
+ agent = CRCAAgent(
435
+ model_name="gpt-4o-mini",
436
+ use_image_annotation=True,
437
+ use_crca_tools=True
438
+ )
439
+
440
+ # Complex task combining image analysis and causal reasoning
441
+ task = """
442
+ Analyze the tactical map at path/to/map.png:
443
+ 1. Use query_image to identify all military installations
444
+ 2. Use query_image to measure distances between key targets
445
+ 3. Extract causal variables:
446
+ - Distance from border to capital
447
+ - Number of military bases
448
+ - Road network connectivity
449
+ 4. Perform causal analysis:
450
+ - What factors affect invasion success?
451
+ - What are the critical chokepoints?
452
+ 5. Provide strategic recommendations
453
+ """
454
+
455
+ response = agent.run(task)
456
+ print(response)
457
+ ```
458
+
459
+ ### Example 4: Batch Processing with Progress
460
+
461
+ ```python
462
+ import os
463
+ from pathlib import Path
464
+
465
+ engine = ImageAnnotationEngine(
466
+ cache_enabled=True,
467
+ show_progress=True # Requires tqdm
468
+ )
469
+
470
+ # Get all images in directory
471
+ image_dir = Path("images")
472
+ image_paths = list(image_dir.glob("*.png"))
473
+
474
+ # Process all images
475
+ results = engine.annotate(image_paths, output="all")
476
+
477
+ # Analyze results
478
+ total_entities = sum(len(r.annotation_graph.entities) for r in results)
479
+ print(f"Total entities across all images: {total_entities}")
480
+ ```
481
+
482
+ ### Example 5: Custom Query Processing
483
+
484
+ ```python
485
+ engine = ImageAnnotationEngine()
486
+
487
+ # Multiple queries on same image
488
+ queries = [
489
+ "find all circles",
490
+ "identify the largest structure",
491
+ "measure distances between all circles",
492
+ "count the number of lines"
493
+ ]
494
+
495
+ for query_text in queries:
496
+ result = engine.query("image.png", query_text)
497
+ print(f"\nQuery: {query_text}")
498
+ print(f"Answer: {result['answer']}")
499
+ if result.get("measurements"):
500
+ print(f"Measurements: {result['measurements']}")
501
+ ```
502
+
503
+ ## Troubleshooting
504
+
505
+ ### Common Issues
506
+
507
+ 1. **Import Error**: Make sure all dependencies are installed
508
+ ```bash
509
+ pip install opencv-python numpy pillow loguru rustworkx
510
+ ```
511
+
512
+ 2. **GPT API Error**: Check your OpenAI API key
513
+ ```python
514
+ import os
515
+ os.environ["OPENAI_API_KEY"] = "your-key-here"
516
+ ```
517
+
518
+ 3. **Memory Issues with Large Images**: The engine automatically downscales, but you can pre-process:
519
+ ```python
520
+ import cv2
521
+ img = cv2.imread("large_image.png")
522
+ img = cv2.resize(img, (1920, 1080)) # Resize before annotation
523
+ result = engine.annotate(img)
524
+ ```
525
+
526
+ 4. **Slow Processing**: Enable caching and use batch processing with parallel workers
527
+ ```python
528
+ engine = ImageAnnotationEngine(
529
+ cache_enabled=True,
530
+ parallel_workers=4 # Adjust based on CPU cores
531
+ )
532
+ ```
533
+
534
+ ## API Reference
535
+
536
+ For complete API documentation, see:
537
+ - `image_annotation/annotation_engine.py` - Main engine class
538
+ - `schemas/annotation.py` - Data models
539
+ - `tests/test_image_annotation_*.py` - Test examples
@@ -0,0 +1,125 @@
1
+ # Installing DeepSpeed on WSL2 Ubuntu 24.04
2
+
3
+ ## Prerequisites
4
+
5
+ - Python 3.12+ (already installed)
6
+ - PyTorch with CUDA support (already installed: PyTorch 2.10.0+cu128)
7
+ - CUDA 12.8+ (already available)
8
+
9
+ ## Installation Steps
10
+
11
+ ### Option 1: Using the Installation Script (Recommended)
12
+
13
+ ```bash
14
+ # Navigate to project directory
15
+ cd /mnt/c/Users/ilum/Documents/Work/agents/CR-CA
16
+
17
+ # Run the installation script
18
+ sudo bash install_deepspeed.sh
19
+ ```
20
+
21
+ ### Option 2: Manual Installation
22
+
23
+ ```bash
24
+ # 1. Update package lists
25
+ sudo apt-get update
26
+
27
+ # 2. Install build dependencies
28
+ sudo apt-get install -y build-essential python3-dev
29
+
30
+ # 3. Upgrade pip
31
+ python3 -m pip install --upgrade pip setuptools wheel
32
+
33
+ # 4. Install DeepSpeed
34
+ python3 -m pip install deepspeed --upgrade
35
+
36
+ # 5. Verify installation
37
+ python3 -c "import deepspeed; print('DeepSpeed version:', deepspeed.__version__)"
38
+ ```
39
+
40
+ ### Option 3: Install with Specific CUDA Version
41
+
42
+ If you need to match your PyTorch CUDA version (12.8):
43
+
44
+ ```bash
45
+ # Install DeepSpeed with CUDA 12.x support
46
+ DS_BUILD_OPS=0 python3 -m pip install deepspeed --upgrade
47
+ ```
48
+
49
+ The `DS_BUILD_OPS=0` flag prevents DeepSpeed from trying to build custom CUDA operations, which can be problematic in WSL2.
50
+
51
+ ## Troubleshooting
52
+
53
+ ### Issue: "No package metadata was found for deepspeed"
54
+
55
+ This means DeepSpeed isn't installed. Run the installation steps above.
56
+
57
+ ### Issue: Build errors during installation
58
+
59
+ If you encounter build errors:
60
+
61
+ ```bash
62
+ # Disable CUDA operation building (uses PyTorch's CUDA ops instead)
63
+ export DS_BUILD_OPS=0
64
+ python3 -m pip install deepspeed --upgrade
65
+ ```
66
+
67
+ ### Issue: CUDA not detected
68
+
69
+ Make sure:
70
+ 1. NVIDIA drivers are installed in Windows
71
+ 2. WSL2 has access to GPU (check with `nvidia-smi` in WSL2)
72
+ 3. PyTorch can see CUDA: `python3 -c "import torch; print(torch.cuda.is_available())"`
73
+
74
+ ### Issue: Permission errors
75
+
76
+ Use `python3 -m pip` instead of `pip3`, or install in a virtual environment:
77
+
78
+ ```bash
79
+ python3 -m venv venv
80
+ source venv/bin/activate
81
+ python3 -m pip install deepspeed --upgrade
82
+ ```
83
+
84
+ ## Verification
85
+
86
+ After installation, verify DeepSpeed works:
87
+
88
+ ```bash
89
+ python3 -c "import deepspeed; print('DeepSpeed version:', deepspeed.__version__)"
90
+ ```
91
+
92
+ You should see output like: `DeepSpeed version: 0.x.x`
93
+
94
+ ## Using DeepSpeed in Your Code
95
+
96
+ Once installed, your training script should work. The code in `training/finetune.py` will automatically use DeepSpeed if:
97
+ 1. DeepSpeed is installed
98
+ 2. A DeepSpeed config file is provided (e.g., `training/deepspeed_zero3_offload.json`)
99
+
100
+ ## MoE (Switch/Flan-MoE) Training
101
+
102
+ Switch/Flan-MoE models are **Seq2Seq (encoder-decoder)**. The training pipeline now supports a dual path:
103
+ - **Causal LM** (Qwen/Qwen2.5 and similar)
104
+ - **Seq2Seq** (Switch/Flan-MoE)
105
+
106
+ Example command for Switch MoE:
107
+
108
+ ```bash
109
+ deepspeed --num_gpus=3 scripts/run_full_finetune.py \
110
+ --model-size 1.5b \
111
+ --model-id google/switch-base-8 \
112
+ --train-file training_data/react_train.jsonl \
113
+ --output-dir lrm_switch_base_8_full_finetune \
114
+ --deepspeed-config training/deepspeed_zero3_h100_3gpu.json
115
+ ```
116
+
117
+ ## CRCA MoE Model Selection
118
+
119
+ To have CRCA use a MoE model for LLM orchestration, set:
120
+
121
+ ```bash
122
+ export CRCA_MOE_MODEL=google/switch-base-8
123
+ ```
124
+
125
+ If `CRCA_MOE_MODEL` is set, it overrides the default LLM model. You can also set `CRCA_LLM_MODEL` for a non-MoE override.
@@ -0,0 +1,19 @@
1
+ # CRCA-CG API
2
+
3
+ API reference for CRCA-CG corporate governance.
4
+
5
+ ## Classes
6
+
7
+ ### CorporateSwarm
8
+
9
+ Multi-agent system for corporate governance.
10
+
11
+ ```python
12
+ class CorporateSwarm:
13
+ def make_decision(self, problem: Dict[str, Any]) -> Dict[str, Any]:
14
+ """Make governance decision."""
15
+ ```
16
+
17
+ ## Next Steps
18
+
19
+ - [General Agent](general-agent.md) - General agent API
@@ -0,0 +1,27 @@
1
+ # CRCA-Q API
2
+
3
+ API reference for CRCA-Q quantitative trading.
4
+
5
+ ## Classes
6
+
7
+ ### QuantTradingAgent
8
+
9
+ Main trading agent.
10
+
11
+ ```python
12
+ class QuantTradingAgent:
13
+ def run(self, asset: str) -> Dict[str, Any]:
14
+ """Run trading analysis for asset."""
15
+ ```
16
+
17
+ ## Mathematical Foundation
18
+
19
+ Trading decisions use:
20
+
21
+ $$Signal = f(Predictions, Risk, Constraints)$$
22
+
23
+ Where predictions come from causal models.
24
+
25
+ ## Next Steps
26
+
27
+ - [CRCA-SD](crca-sd.md) - Socioeconomic dynamics API