crca 1.4.0__py3-none-any.whl → 1.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- CRCA.py +172 -7
- MODEL_CARD.md +53 -0
- PKG-INFO +8 -2
- RELEASE_NOTES.md +17 -0
- STABILITY.md +19 -0
- architecture/hybrid/consistency_engine.py +362 -0
- architecture/hybrid/conversation_manager.py +421 -0
- architecture/hybrid/explanation_generator.py +452 -0
- architecture/hybrid/few_shot_learner.py +533 -0
- architecture/hybrid/graph_compressor.py +286 -0
- architecture/hybrid/hybrid_agent.py +4398 -0
- architecture/hybrid/language_compiler.py +623 -0
- architecture/hybrid/main,py +0 -0
- architecture/hybrid/reasoning_tracker.py +322 -0
- architecture/hybrid/self_verifier.py +524 -0
- architecture/hybrid/task_decomposer.py +567 -0
- architecture/hybrid/text_corrector.py +341 -0
- benchmark_results/crca_core_benchmarks.json +178 -0
- branches/crca_sd/crca_sd_realtime.py +6 -2
- branches/general_agent/__init__.py +102 -0
- branches/general_agent/general_agent.py +1400 -0
- branches/general_agent/personality.py +169 -0
- branches/general_agent/utils/__init__.py +19 -0
- branches/general_agent/utils/prompt_builder.py +170 -0
- {crca-1.4.0.dist-info → crca-1.5.0.dist-info}/METADATA +8 -2
- {crca-1.4.0.dist-info → crca-1.5.0.dist-info}/RECORD +303 -20
- crca_core/__init__.py +35 -0
- crca_core/benchmarks/__init__.py +14 -0
- crca_core/benchmarks/synthetic_scm.py +103 -0
- crca_core/core/__init__.py +23 -0
- crca_core/core/api.py +120 -0
- crca_core/core/estimate.py +208 -0
- crca_core/core/godclass.py +72 -0
- crca_core/core/intervention_design.py +174 -0
- crca_core/core/lifecycle.py +48 -0
- crca_core/discovery/__init__.py +9 -0
- crca_core/discovery/tabular.py +193 -0
- crca_core/identify/__init__.py +171 -0
- crca_core/identify/backdoor.py +39 -0
- crca_core/identify/frontdoor.py +48 -0
- crca_core/identify/graph.py +106 -0
- crca_core/identify/id_algorithm.py +43 -0
- crca_core/identify/iv.py +48 -0
- crca_core/models/__init__.py +67 -0
- crca_core/models/provenance.py +56 -0
- crca_core/models/refusal.py +39 -0
- crca_core/models/result.py +83 -0
- crca_core/models/spec.py +151 -0
- crca_core/models/validation.py +68 -0
- crca_core/scm/__init__.py +9 -0
- crca_core/scm/linear_gaussian.py +198 -0
- crca_core/timeseries/__init__.py +6 -0
- crca_core/timeseries/pcmci.py +181 -0
- crca_llm/__init__.py +12 -0
- crca_llm/client.py +85 -0
- crca_llm/coauthor.py +118 -0
- crca_llm/orchestrator.py +289 -0
- crca_llm/types.py +21 -0
- crca_reasoning/__init__.py +16 -0
- crca_reasoning/critique.py +54 -0
- crca_reasoning/godclass.py +206 -0
- crca_reasoning/memory.py +24 -0
- crca_reasoning/rationale.py +10 -0
- crca_reasoning/react_controller.py +81 -0
- crca_reasoning/tool_router.py +97 -0
- crca_reasoning/types.py +40 -0
- crca_sd/__init__.py +15 -0
- crca_sd/crca_sd_core.py +2 -0
- crca_sd/crca_sd_governance.py +2 -0
- crca_sd/crca_sd_mpc.py +2 -0
- crca_sd/crca_sd_realtime.py +2 -0
- crca_sd/crca_sd_tui.py +2 -0
- cuda-keyring_1.1-1_all.deb +0 -0
- cuda-keyring_1.1-1_all.deb.1 +0 -0
- docs/IMAGE_ANNOTATION_USAGE.md +539 -0
- docs/INSTALL_DEEPSPEED.md +125 -0
- docs/api/branches/crca-cg.md +19 -0
- docs/api/branches/crca-q.md +27 -0
- docs/api/branches/crca-sd.md +37 -0
- docs/api/branches/general-agent.md +24 -0
- docs/api/branches/overview.md +19 -0
- docs/api/crca/agent-methods.md +62 -0
- docs/api/crca/operations.md +79 -0
- docs/api/crca/overview.md +32 -0
- docs/api/image-annotation/engine.md +52 -0
- docs/api/image-annotation/overview.md +17 -0
- docs/api/schemas/annotation.md +34 -0
- docs/api/schemas/core-schemas.md +82 -0
- docs/api/schemas/overview.md +32 -0
- docs/api/schemas/policy.md +30 -0
- docs/api/utils/conversation.md +22 -0
- docs/api/utils/graph-reasoner.md +32 -0
- docs/api/utils/overview.md +21 -0
- docs/api/utils/router.md +19 -0
- docs/api/utils/utilities.md +97 -0
- docs/architecture/causal-graphs.md +41 -0
- docs/architecture/data-flow.md +29 -0
- docs/architecture/design-principles.md +33 -0
- docs/architecture/hybrid-agent/components.md +38 -0
- docs/architecture/hybrid-agent/consistency.md +26 -0
- docs/architecture/hybrid-agent/overview.md +44 -0
- docs/architecture/hybrid-agent/reasoning.md +22 -0
- docs/architecture/llm-integration.md +26 -0
- docs/architecture/modular-structure.md +37 -0
- docs/architecture/overview.md +69 -0
- docs/architecture/policy-engine-arch.md +29 -0
- docs/branches/crca-cg/corposwarm.md +39 -0
- docs/branches/crca-cg/esg-scoring.md +30 -0
- docs/branches/crca-cg/multi-agent.md +35 -0
- docs/branches/crca-cg/overview.md +40 -0
- docs/branches/crca-q/alternative-data.md +55 -0
- docs/branches/crca-q/architecture.md +71 -0
- docs/branches/crca-q/backtesting.md +45 -0
- docs/branches/crca-q/causal-engine.md +33 -0
- docs/branches/crca-q/execution.md +39 -0
- docs/branches/crca-q/market-data.md +60 -0
- docs/branches/crca-q/overview.md +58 -0
- docs/branches/crca-q/philosophy.md +60 -0
- docs/branches/crca-q/portfolio-optimization.md +66 -0
- docs/branches/crca-q/risk-management.md +102 -0
- docs/branches/crca-q/setup.md +65 -0
- docs/branches/crca-q/signal-generation.md +61 -0
- docs/branches/crca-q/signal-validation.md +43 -0
- docs/branches/crca-sd/core.md +84 -0
- docs/branches/crca-sd/governance.md +53 -0
- docs/branches/crca-sd/mpc-solver.md +65 -0
- docs/branches/crca-sd/overview.md +59 -0
- docs/branches/crca-sd/realtime.md +28 -0
- docs/branches/crca-sd/tui.md +20 -0
- docs/branches/general-agent/overview.md +37 -0
- docs/branches/general-agent/personality.md +36 -0
- docs/branches/general-agent/prompt-builder.md +30 -0
- docs/changelog/index.md +79 -0
- docs/contributing/code-style.md +69 -0
- docs/contributing/documentation.md +43 -0
- docs/contributing/overview.md +29 -0
- docs/contributing/testing.md +29 -0
- docs/core/crcagent/async-operations.md +65 -0
- docs/core/crcagent/automatic-extraction.md +107 -0
- docs/core/crcagent/batch-prediction.md +80 -0
- docs/core/crcagent/bayesian-inference.md +60 -0
- docs/core/crcagent/causal-graph.md +92 -0
- docs/core/crcagent/counterfactuals.md +96 -0
- docs/core/crcagent/deterministic-simulation.md +78 -0
- docs/core/crcagent/dual-mode-operation.md +82 -0
- docs/core/crcagent/initialization.md +88 -0
- docs/core/crcagent/optimization.md +65 -0
- docs/core/crcagent/overview.md +63 -0
- docs/core/crcagent/time-series.md +57 -0
- docs/core/schemas/annotation.md +30 -0
- docs/core/schemas/core-schemas.md +82 -0
- docs/core/schemas/overview.md +30 -0
- docs/core/schemas/policy.md +41 -0
- docs/core/templates/base-agent.md +31 -0
- docs/core/templates/feature-mixins.md +31 -0
- docs/core/templates/overview.md +29 -0
- docs/core/templates/templates-guide.md +75 -0
- docs/core/tools/mcp-client.md +34 -0
- docs/core/tools/overview.md +24 -0
- docs/core/utils/conversation.md +27 -0
- docs/core/utils/graph-reasoner.md +29 -0
- docs/core/utils/overview.md +27 -0
- docs/core/utils/router.md +27 -0
- docs/core/utils/utilities.md +97 -0
- docs/css/custom.css +84 -0
- docs/examples/basic-usage.md +57 -0
- docs/examples/general-agent/general-agent-examples.md +50 -0
- docs/examples/hybrid-agent/hybrid-agent-examples.md +56 -0
- docs/examples/image-annotation/image-annotation-examples.md +54 -0
- docs/examples/integration/integration-examples.md +58 -0
- docs/examples/overview.md +37 -0
- docs/examples/trading/trading-examples.md +46 -0
- docs/features/causal-reasoning/advanced-topics.md +101 -0
- docs/features/causal-reasoning/counterfactuals.md +43 -0
- docs/features/causal-reasoning/do-calculus.md +50 -0
- docs/features/causal-reasoning/overview.md +47 -0
- docs/features/causal-reasoning/structural-models.md +52 -0
- docs/features/hybrid-agent/advanced-components.md +55 -0
- docs/features/hybrid-agent/core-components.md +64 -0
- docs/features/hybrid-agent/overview.md +34 -0
- docs/features/image-annotation/engine.md +82 -0
- docs/features/image-annotation/features.md +113 -0
- docs/features/image-annotation/integration.md +75 -0
- docs/features/image-annotation/overview.md +53 -0
- docs/features/image-annotation/quickstart.md +73 -0
- docs/features/policy-engine/doctrine-ledger.md +105 -0
- docs/features/policy-engine/monitoring.md +44 -0
- docs/features/policy-engine/mpc-control.md +89 -0
- docs/features/policy-engine/overview.md +46 -0
- docs/getting-started/configuration.md +225 -0
- docs/getting-started/first-agent.md +164 -0
- docs/getting-started/installation.md +144 -0
- docs/getting-started/quickstart.md +137 -0
- docs/index.md +118 -0
- docs/js/mathjax.js +13 -0
- docs/lrm/discovery_proof_notes.md +25 -0
- docs/lrm/finetune_full.md +83 -0
- docs/lrm/math_appendix.md +120 -0
- docs/lrm/overview.md +32 -0
- docs/mkdocs.yml +238 -0
- docs/stylesheets/extra.css +21 -0
- docs_generated/crca_core/CounterfactualResult.md +12 -0
- docs_generated/crca_core/DiscoveryHypothesisResult.md +13 -0
- docs_generated/crca_core/DraftSpec.md +13 -0
- docs_generated/crca_core/EstimateResult.md +13 -0
- docs_generated/crca_core/IdentificationResult.md +17 -0
- docs_generated/crca_core/InterventionDesignResult.md +12 -0
- docs_generated/crca_core/LockedSpec.md +15 -0
- docs_generated/crca_core/RefusalResult.md +12 -0
- docs_generated/crca_core/ValidationReport.md +9 -0
- docs_generated/crca_core/index.md +13 -0
- examples/general_agent_example.py +277 -0
- examples/general_agent_quickstart.py +202 -0
- examples/general_agent_simple.py +92 -0
- examples/hybrid_agent_auto_extraction.py +84 -0
- examples/hybrid_agent_dictionary_demo.py +104 -0
- examples/hybrid_agent_enhanced.py +179 -0
- examples/hybrid_agent_general_knowledge.py +107 -0
- examples/image_annotation_quickstart.py +328 -0
- examples/test_hybrid_fixes.py +77 -0
- image_annotation/__init__.py +27 -0
- image_annotation/annotation_engine.py +2593 -0
- install_cuda_wsl2.sh +59 -0
- install_deepspeed.sh +56 -0
- install_deepspeed_simple.sh +87 -0
- mkdocs.yml +252 -0
- ollama/Modelfile +8 -0
- prompts/__init__.py +2 -1
- prompts/default_crca.py +9 -1
- prompts/general_agent.py +227 -0
- prompts/image_annotation.py +56 -0
- pyproject.toml +17 -2
- requirements-docs.txt +10 -0
- requirements.txt +21 -2
- schemas/__init__.py +26 -1
- schemas/annotation.py +222 -0
- schemas/conversation.py +193 -0
- schemas/hybrid.py +211 -0
- schemas/reasoning.py +276 -0
- schemas_export/crca_core/CounterfactualResult.schema.json +108 -0
- schemas_export/crca_core/DiscoveryHypothesisResult.schema.json +113 -0
- schemas_export/crca_core/DraftSpec.schema.json +635 -0
- schemas_export/crca_core/EstimateResult.schema.json +113 -0
- schemas_export/crca_core/IdentificationResult.schema.json +145 -0
- schemas_export/crca_core/InterventionDesignResult.schema.json +111 -0
- schemas_export/crca_core/LockedSpec.schema.json +646 -0
- schemas_export/crca_core/RefusalResult.schema.json +90 -0
- schemas_export/crca_core/ValidationReport.schema.json +62 -0
- scripts/build_lrm_dataset.py +80 -0
- scripts/export_crca_core_schemas.py +54 -0
- scripts/export_hf_lrm.py +37 -0
- scripts/export_ollama_gguf.py +45 -0
- scripts/generate_changelog.py +157 -0
- scripts/generate_crca_core_docs_from_schemas.py +86 -0
- scripts/run_crca_core_benchmarks.py +163 -0
- scripts/run_full_finetune.py +198 -0
- scripts/run_lrm_eval.py +31 -0
- templates/graph_management.py +29 -0
- tests/conftest.py +9 -0
- tests/test_core.py +2 -3
- tests/test_crca_core_discovery_tabular.py +15 -0
- tests/test_crca_core_estimate_dowhy.py +36 -0
- tests/test_crca_core_identify.py +18 -0
- tests/test_crca_core_intervention_design.py +36 -0
- tests/test_crca_core_linear_gaussian_scm.py +69 -0
- tests/test_crca_core_spec.py +25 -0
- tests/test_crca_core_timeseries_pcmci.py +15 -0
- tests/test_crca_llm_coauthor.py +12 -0
- tests/test_crca_llm_orchestrator.py +80 -0
- tests/test_hybrid_agent_llm_enhanced.py +556 -0
- tests/test_image_annotation_demo.py +376 -0
- tests/test_image_annotation_operational.py +408 -0
- tests/test_image_annotation_unit.py +551 -0
- tests/test_training_moe.py +13 -0
- training/__init__.py +42 -0
- training/datasets.py +140 -0
- training/deepspeed_zero2_0_5b.json +22 -0
- training/deepspeed_zero2_1_5b.json +22 -0
- training/deepspeed_zero3_0_5b.json +28 -0
- training/deepspeed_zero3_14b.json +28 -0
- training/deepspeed_zero3_h100_3gpu.json +20 -0
- training/deepspeed_zero3_offload.json +28 -0
- training/eval.py +92 -0
- training/finetune.py +516 -0
- training/public_datasets.py +89 -0
- training_data/react_train.jsonl +7473 -0
- utils/agent_discovery.py +311 -0
- utils/batch_processor.py +317 -0
- utils/conversation.py +78 -0
- utils/edit_distance.py +118 -0
- utils/formatter.py +33 -0
- utils/graph_reasoner.py +530 -0
- utils/rate_limiter.py +283 -0
- utils/router.py +2 -2
- utils/tool_discovery.py +307 -0
- webui/__init__.py +10 -0
- webui/app.py +229 -0
- webui/config.py +104 -0
- webui/static/css/style.css +332 -0
- webui/static/js/main.js +284 -0
- webui/templates/index.html +42 -0
- tests/test_crca_excel.py +0 -166
- tests/test_data_broker.py +0 -424
- tests/test_palantir.py +0 -349
- {crca-1.4.0.dist-info → crca-1.5.0.dist-info}/WHEEL +0 -0
- {crca-1.4.0.dist-info → crca-1.5.0.dist-info}/licenses/LICENSE +0 -0
utils/agent_discovery.py
ADDED
|
@@ -0,0 +1,311 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Agent discovery utilities.
|
|
3
|
+
|
|
4
|
+
Provides functionality for:
|
|
5
|
+
- Auto-discovery of AOP instances
|
|
6
|
+
- Auto-discovery of router instances
|
|
7
|
+
- Agent listing and metadata
|
|
8
|
+
- Route-first routing helpers
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import inspect
|
|
12
|
+
import sys
|
|
13
|
+
from typing import Any, Dict, List, Optional, Union
|
|
14
|
+
from loguru import logger
|
|
15
|
+
|
|
16
|
+
# Try to import AOP and Router
|
|
17
|
+
try:
|
|
18
|
+
from utils.aop import AOP
|
|
19
|
+
AOP_AVAILABLE = True
|
|
20
|
+
except ImportError:
|
|
21
|
+
AOP = None
|
|
22
|
+
AOP_AVAILABLE = False
|
|
23
|
+
logger.debug("AOP not available for agent discovery")
|
|
24
|
+
|
|
25
|
+
try:
|
|
26
|
+
from utils.router import SwarmRouter
|
|
27
|
+
ROUTER_AVAILABLE = True
|
|
28
|
+
except ImportError:
|
|
29
|
+
SwarmRouter = None
|
|
30
|
+
ROUTER_AVAILABLE = False
|
|
31
|
+
logger.debug("Router not available for agent discovery")
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def discover_aop_instances() -> List[Any]:
|
|
35
|
+
"""Auto-discover AOP instances in the current runtime.
|
|
36
|
+
|
|
37
|
+
Searches for AOP instances in:
|
|
38
|
+
- Global namespace
|
|
39
|
+
- Module-level variables
|
|
40
|
+
- Active objects
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
List of discovered AOP instances
|
|
44
|
+
"""
|
|
45
|
+
if not AOP_AVAILABLE:
|
|
46
|
+
return []
|
|
47
|
+
|
|
48
|
+
instances = []
|
|
49
|
+
|
|
50
|
+
# Search in global namespace
|
|
51
|
+
try:
|
|
52
|
+
frame = sys._getframe(1)
|
|
53
|
+
while frame:
|
|
54
|
+
for name, obj in frame.f_globals.items():
|
|
55
|
+
if isinstance(obj, AOP):
|
|
56
|
+
instances.append(obj)
|
|
57
|
+
frame = frame.f_back
|
|
58
|
+
except Exception as e:
|
|
59
|
+
logger.debug(f"Error discovering AOP instances from frames: {e}")
|
|
60
|
+
|
|
61
|
+
# Search in module-level variables (limited approach)
|
|
62
|
+
# This is a best-effort discovery
|
|
63
|
+
try:
|
|
64
|
+
import gc
|
|
65
|
+
for obj in gc.get_objects():
|
|
66
|
+
if isinstance(obj, AOP):
|
|
67
|
+
if obj not in instances:
|
|
68
|
+
instances.append(obj)
|
|
69
|
+
except Exception as e:
|
|
70
|
+
logger.debug(f"Error discovering AOP instances from GC: {e}")
|
|
71
|
+
|
|
72
|
+
logger.debug(f"Discovered {len(instances)} AOP instance(s)")
|
|
73
|
+
return instances
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def discover_router_instances() -> List[Any]:
|
|
77
|
+
"""Auto-discover router instances in the current runtime.
|
|
78
|
+
|
|
79
|
+
Searches for router instances in:
|
|
80
|
+
- Global namespace
|
|
81
|
+
- Module-level variables
|
|
82
|
+
- Active objects
|
|
83
|
+
|
|
84
|
+
Returns:
|
|
85
|
+
List of discovered router instances
|
|
86
|
+
"""
|
|
87
|
+
if not ROUTER_AVAILABLE:
|
|
88
|
+
return []
|
|
89
|
+
|
|
90
|
+
instances = []
|
|
91
|
+
|
|
92
|
+
# Search in global namespace
|
|
93
|
+
try:
|
|
94
|
+
frame = sys._getframe(1)
|
|
95
|
+
while frame:
|
|
96
|
+
for name, obj in frame.f_globals.items():
|
|
97
|
+
if isinstance(obj, SwarmRouter):
|
|
98
|
+
instances.append(obj)
|
|
99
|
+
frame = frame.f_back
|
|
100
|
+
except Exception as e:
|
|
101
|
+
logger.debug(f"Error discovering router instances from frames: {e}")
|
|
102
|
+
|
|
103
|
+
# Search in module-level variables
|
|
104
|
+
try:
|
|
105
|
+
import gc
|
|
106
|
+
for obj in gc.get_objects():
|
|
107
|
+
if isinstance(obj, SwarmRouter):
|
|
108
|
+
if obj not in instances:
|
|
109
|
+
instances.append(obj)
|
|
110
|
+
except Exception as e:
|
|
111
|
+
logger.debug(f"Error discovering router instances from GC: {e}")
|
|
112
|
+
|
|
113
|
+
logger.debug(f"Discovered {len(instances)} router instance(s)")
|
|
114
|
+
return instances
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def get_agents_from_aop(aop_instance: Any) -> Dict[str, Any]:
|
|
118
|
+
"""Get list of agents from an AOP instance.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
aop_instance: AOP instance to query
|
|
122
|
+
|
|
123
|
+
Returns:
|
|
124
|
+
Dictionary mapping agent names to agent metadata
|
|
125
|
+
"""
|
|
126
|
+
if not AOP_AVAILABLE or not isinstance(aop_instance, AOP):
|
|
127
|
+
return {}
|
|
128
|
+
|
|
129
|
+
agents = {}
|
|
130
|
+
|
|
131
|
+
try:
|
|
132
|
+
if hasattr(aop_instance, 'agents'):
|
|
133
|
+
for agent_name, agent in aop_instance.agents.items():
|
|
134
|
+
agents[agent_name] = {
|
|
135
|
+
"name": agent_name,
|
|
136
|
+
"type": type(agent).__name__,
|
|
137
|
+
"description": getattr(agent, 'agent_description', 'No description'),
|
|
138
|
+
"available": True,
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
if hasattr(aop_instance, 'tool_configs'):
|
|
142
|
+
for tool_name, config in aop_instance.tool_configs.items():
|
|
143
|
+
if tool_name not in agents:
|
|
144
|
+
agents[tool_name] = {
|
|
145
|
+
"name": tool_name,
|
|
146
|
+
"type": "tool",
|
|
147
|
+
"description": getattr(config, 'tool_description', 'No description'),
|
|
148
|
+
"available": True,
|
|
149
|
+
}
|
|
150
|
+
except Exception as e:
|
|
151
|
+
logger.error(f"Error getting agents from AOP instance: {e}")
|
|
152
|
+
|
|
153
|
+
return agents
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def get_agents_from_router(router_instance: Any) -> Dict[str, Any]:
|
|
157
|
+
"""Get list of agents from a router instance.
|
|
158
|
+
|
|
159
|
+
Args:
|
|
160
|
+
router_instance: Router instance to query
|
|
161
|
+
|
|
162
|
+
Returns:
|
|
163
|
+
Dictionary mapping agent names to agent metadata
|
|
164
|
+
"""
|
|
165
|
+
if not ROUTER_AVAILABLE or not isinstance(router_instance, SwarmRouter):
|
|
166
|
+
return {}
|
|
167
|
+
|
|
168
|
+
agents = {}
|
|
169
|
+
|
|
170
|
+
try:
|
|
171
|
+
if hasattr(router_instance, 'agents'):
|
|
172
|
+
for i, agent in enumerate(router_instance.agents):
|
|
173
|
+
agent_name = getattr(agent, 'agent_name', f"agent_{i}")
|
|
174
|
+
agents[agent_name] = {
|
|
175
|
+
"name": agent_name,
|
|
176
|
+
"type": type(agent).__name__,
|
|
177
|
+
"description": getattr(agent, 'agent_description', 'No description'),
|
|
178
|
+
"available": True,
|
|
179
|
+
}
|
|
180
|
+
except Exception as e:
|
|
181
|
+
logger.error(f"Error getting agents from router instance: {e}")
|
|
182
|
+
|
|
183
|
+
return agents
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def discover_all_agents(
|
|
187
|
+
aop_instances: Optional[List[Any]] = None,
|
|
188
|
+
router_instances: Optional[List[Any]] = None,
|
|
189
|
+
) -> Dict[str, Any]:
|
|
190
|
+
"""Discover all available agents from AOP and router instances.
|
|
191
|
+
|
|
192
|
+
Args:
|
|
193
|
+
aop_instances: Optional list of AOP instances (auto-discovered if None)
|
|
194
|
+
router_instances: Optional list of router instances (auto-discovered if None)
|
|
195
|
+
|
|
196
|
+
Returns:
|
|
197
|
+
Dictionary mapping agent names to agent metadata
|
|
198
|
+
"""
|
|
199
|
+
all_agents = {}
|
|
200
|
+
|
|
201
|
+
# Discover AOP instances if not provided
|
|
202
|
+
if aop_instances is None:
|
|
203
|
+
aop_instances = discover_aop_instances()
|
|
204
|
+
|
|
205
|
+
# Discover router instances if not provided
|
|
206
|
+
if router_instances is None:
|
|
207
|
+
router_instances = discover_router_instances()
|
|
208
|
+
|
|
209
|
+
# Get agents from AOP instances
|
|
210
|
+
for aop_instance in aop_instances:
|
|
211
|
+
agents = get_agents_from_aop(aop_instance)
|
|
212
|
+
all_agents.update(agents)
|
|
213
|
+
|
|
214
|
+
# Get agents from router instances
|
|
215
|
+
for router_instance in router_instances:
|
|
216
|
+
agents = get_agents_from_router(router_instance)
|
|
217
|
+
all_agents.update(agents)
|
|
218
|
+
|
|
219
|
+
logger.info(f"Discovered {len(all_agents)} total agent(s)")
|
|
220
|
+
return all_agents
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
def find_best_agent_for_task(
|
|
224
|
+
task: str,
|
|
225
|
+
available_agents: Dict[str, Any],
|
|
226
|
+
aop_instances: Optional[List[Any]] = None,
|
|
227
|
+
router_instances: Optional[List[Any]] = None,
|
|
228
|
+
) -> Optional[tuple[str, Any, str]]:
|
|
229
|
+
"""Find the best agent for a given task (route-first strategy).
|
|
230
|
+
|
|
231
|
+
Args:
|
|
232
|
+
task: Task description
|
|
233
|
+
available_agents: Dictionary of available agents
|
|
234
|
+
aop_instances: Optional list of AOP instances
|
|
235
|
+
router_instances: Optional list of router instances
|
|
236
|
+
|
|
237
|
+
Returns:
|
|
238
|
+
Tuple of (agent_name, agent_instance, source) or None if no suitable agent found
|
|
239
|
+
source is either 'aop' or 'router'
|
|
240
|
+
"""
|
|
241
|
+
if not available_agents:
|
|
242
|
+
return None
|
|
243
|
+
|
|
244
|
+
# Simple keyword-based matching (can be enhanced with LLM-based routing)
|
|
245
|
+
task_lower = task.lower()
|
|
246
|
+
|
|
247
|
+
# Check for specialized agents first (route-first strategy)
|
|
248
|
+
for agent_name, agent_info in available_agents.items():
|
|
249
|
+
description = agent_info.get("description", "").lower()
|
|
250
|
+
agent_type = agent_info.get("type", "").lower()
|
|
251
|
+
|
|
252
|
+
# Simple matching logic
|
|
253
|
+
if any(keyword in description or keyword in agent_type for keyword in task_lower.split()):
|
|
254
|
+
# Try to get the actual agent instance
|
|
255
|
+
if aop_instances:
|
|
256
|
+
for aop in aop_instances:
|
|
257
|
+
if hasattr(aop, 'agents') and agent_name in aop.agents:
|
|
258
|
+
return (agent_name, aop.agents[agent_name], "aop")
|
|
259
|
+
|
|
260
|
+
if router_instances:
|
|
261
|
+
for router in router_instances:
|
|
262
|
+
if hasattr(router, 'agents'):
|
|
263
|
+
for agent in router.agents:
|
|
264
|
+
if getattr(agent, 'agent_name', None) == agent_name:
|
|
265
|
+
return (agent_name, agent, "router")
|
|
266
|
+
|
|
267
|
+
# If no match found, return None (fallback to direct handling)
|
|
268
|
+
return None
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
def route_to_agent(
|
|
272
|
+
agent_name: str,
|
|
273
|
+
task: str,
|
|
274
|
+
aop_instances: Optional[List[Any]] = None,
|
|
275
|
+
router_instances: Optional[List[Any]] = None,
|
|
276
|
+
) -> Optional[Any]:
|
|
277
|
+
"""Route a task to a specific agent.
|
|
278
|
+
|
|
279
|
+
Args:
|
|
280
|
+
agent_name: Name of the agent to route to
|
|
281
|
+
task: Task to execute
|
|
282
|
+
aop_instances: Optional list of AOP instances
|
|
283
|
+
router_instances: Optional list of router instances
|
|
284
|
+
|
|
285
|
+
Returns:
|
|
286
|
+
Agent response or None if agent not found
|
|
287
|
+
"""
|
|
288
|
+
# Discover instances if not provided
|
|
289
|
+
if aop_instances is None:
|
|
290
|
+
aop_instances = discover_aop_instances()
|
|
291
|
+
|
|
292
|
+
if router_instances is None:
|
|
293
|
+
router_instances = discover_router_instances()
|
|
294
|
+
|
|
295
|
+
# Try AOP instances first
|
|
296
|
+
for aop in aop_instances:
|
|
297
|
+
if hasattr(aop, 'agents') and agent_name in aop.agents:
|
|
298
|
+
agent = aop.agents[agent_name]
|
|
299
|
+
if hasattr(agent, 'run'):
|
|
300
|
+
return agent.run(task)
|
|
301
|
+
|
|
302
|
+
# Try router instances
|
|
303
|
+
for router in router_instances:
|
|
304
|
+
if hasattr(router, 'agents'):
|
|
305
|
+
for agent in router.agents:
|
|
306
|
+
if getattr(agent, 'agent_name', None) == agent_name:
|
|
307
|
+
if hasattr(agent, 'run'):
|
|
308
|
+
return agent.run(task)
|
|
309
|
+
|
|
310
|
+
logger.warning(f"Agent '{agent_name}' not found for routing")
|
|
311
|
+
return None
|
utils/batch_processor.py
ADDED
|
@@ -0,0 +1,317 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Batch processing utilities.
|
|
3
|
+
|
|
4
|
+
Provides functionality for:
|
|
5
|
+
- Parallel task execution
|
|
6
|
+
- Batch rate limiting
|
|
7
|
+
- Progress tracking
|
|
8
|
+
- Error aggregation
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import asyncio
|
|
12
|
+
import concurrent.futures
|
|
13
|
+
import time
|
|
14
|
+
from dataclasses import dataclass, field
|
|
15
|
+
from typing import Any, Callable, Dict, List, Optional, Tuple
|
|
16
|
+
from loguru import logger
|
|
17
|
+
|
|
18
|
+
try:
|
|
19
|
+
from utils.rate_limiter import RateLimiter, RateLimitConfig
|
|
20
|
+
RATE_LIMITER_AVAILABLE = True
|
|
21
|
+
except ImportError:
|
|
22
|
+
RATE_LIMITER_AVAILABLE = False
|
|
23
|
+
logger.debug("Rate limiter not available for batch processing")
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@dataclass
|
|
27
|
+
class BatchResult:
|
|
28
|
+
"""Result of a batch processing operation.
|
|
29
|
+
|
|
30
|
+
Attributes:
|
|
31
|
+
task_id: Task identifier
|
|
32
|
+
success: Whether task succeeded
|
|
33
|
+
result: Task result (if successful)
|
|
34
|
+
error: Error message (if failed)
|
|
35
|
+
execution_time: Time taken to execute task
|
|
36
|
+
"""
|
|
37
|
+
task_id: str
|
|
38
|
+
success: bool
|
|
39
|
+
result: Any = None
|
|
40
|
+
error: Optional[str] = None
|
|
41
|
+
execution_time: float = 0.0
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
@dataclass
|
|
45
|
+
class BatchStats:
|
|
46
|
+
"""Statistics for batch processing.
|
|
47
|
+
|
|
48
|
+
Attributes:
|
|
49
|
+
total_tasks: Total number of tasks
|
|
50
|
+
completed_tasks: Number of completed tasks
|
|
51
|
+
failed_tasks: Number of failed tasks
|
|
52
|
+
total_time: Total execution time
|
|
53
|
+
average_time: Average execution time per task
|
|
54
|
+
"""
|
|
55
|
+
total_tasks: int = 0
|
|
56
|
+
completed_tasks: int = 0
|
|
57
|
+
failed_tasks: int = 0
|
|
58
|
+
total_time: float = 0.0
|
|
59
|
+
average_time: float = 0.0
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class BatchProcessor:
|
|
63
|
+
"""Batch processor for parallel task execution.
|
|
64
|
+
|
|
65
|
+
Provides functionality for:
|
|
66
|
+
- Parallel task execution with configurable workers
|
|
67
|
+
- Batch rate limiting
|
|
68
|
+
- Progress tracking
|
|
69
|
+
- Error aggregation
|
|
70
|
+
"""
|
|
71
|
+
|
|
72
|
+
def __init__(
|
|
73
|
+
self,
|
|
74
|
+
max_workers: int = 4,
|
|
75
|
+
rate_limiter: Optional[Any] = None,
|
|
76
|
+
rate_limit_config: Optional[Any] = None,
|
|
77
|
+
):
|
|
78
|
+
"""Initialize batch processor.
|
|
79
|
+
|
|
80
|
+
Args:
|
|
81
|
+
max_workers: Maximum number of parallel workers
|
|
82
|
+
rate_limiter: Optional rate limiter instance
|
|
83
|
+
rate_limit_config: Optional rate limit configuration
|
|
84
|
+
"""
|
|
85
|
+
self.max_workers = max_workers
|
|
86
|
+
|
|
87
|
+
# Set up rate limiting
|
|
88
|
+
if rate_limiter is not None:
|
|
89
|
+
self.rate_limiter = rate_limiter
|
|
90
|
+
elif RATE_LIMITER_AVAILABLE and rate_limit_config is not None:
|
|
91
|
+
self.rate_limiter = RateLimiter(rate_limit_config)
|
|
92
|
+
elif RATE_LIMITER_AVAILABLE:
|
|
93
|
+
# Default rate limiting for batch processing
|
|
94
|
+
config = RateLimitConfig(
|
|
95
|
+
requests_per_minute=100,
|
|
96
|
+
requests_per_hour=5000,
|
|
97
|
+
)
|
|
98
|
+
self.rate_limiter = RateLimiter(config)
|
|
99
|
+
else:
|
|
100
|
+
self.rate_limiter = None
|
|
101
|
+
|
|
102
|
+
logger.debug(f"Initialized BatchProcessor with {max_workers} workers")
|
|
103
|
+
|
|
104
|
+
def process_batch(
|
|
105
|
+
self,
|
|
106
|
+
tasks: List[Any],
|
|
107
|
+
task_fn: Callable,
|
|
108
|
+
task_ids: Optional[List[str]] = None,
|
|
109
|
+
user_id: str = "default",
|
|
110
|
+
show_progress: bool = True,
|
|
111
|
+
) -> Tuple[List[BatchResult], BatchStats]:
|
|
112
|
+
"""Process a batch of tasks in parallel.
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
tasks: List of task inputs
|
|
116
|
+
task_fn: Function to execute for each task
|
|
117
|
+
task_ids: Optional list of task identifiers
|
|
118
|
+
user_id: User identifier for rate limiting
|
|
119
|
+
show_progress: Whether to show progress updates
|
|
120
|
+
|
|
121
|
+
Returns:
|
|
122
|
+
Tuple of (results, stats)
|
|
123
|
+
"""
|
|
124
|
+
if not tasks:
|
|
125
|
+
return [], BatchStats()
|
|
126
|
+
|
|
127
|
+
start_time = time.time()
|
|
128
|
+
results: List[BatchResult] = []
|
|
129
|
+
|
|
130
|
+
# Generate task IDs if not provided
|
|
131
|
+
if task_ids is None:
|
|
132
|
+
task_ids = [f"task_{i}" for i in range(len(tasks))]
|
|
133
|
+
|
|
134
|
+
if len(task_ids) != len(tasks):
|
|
135
|
+
logger.warning("Task IDs length doesn't match tasks length, generating new IDs")
|
|
136
|
+
task_ids = [f"task_{i}" for i in range(len(tasks))]
|
|
137
|
+
|
|
138
|
+
def process_single_task(task: Any, task_id: str) -> BatchResult:
|
|
139
|
+
"""Process a single task with error handling."""
|
|
140
|
+
task_start = time.time()
|
|
141
|
+
|
|
142
|
+
try:
|
|
143
|
+
# Apply rate limiting if available
|
|
144
|
+
if self.rate_limiter:
|
|
145
|
+
is_allowed, error_msg = self.rate_limiter.check_rate_limit(user_id)
|
|
146
|
+
if not is_allowed:
|
|
147
|
+
# Wait if rate limited
|
|
148
|
+
self.rate_limiter.wait_if_rate_limited(user_id, max_wait=60.0)
|
|
149
|
+
|
|
150
|
+
# Execute task
|
|
151
|
+
result = task_fn(task)
|
|
152
|
+
execution_time = time.time() - task_start
|
|
153
|
+
|
|
154
|
+
return BatchResult(
|
|
155
|
+
task_id=task_id,
|
|
156
|
+
success=True,
|
|
157
|
+
result=result,
|
|
158
|
+
execution_time=execution_time,
|
|
159
|
+
)
|
|
160
|
+
except Exception as e:
|
|
161
|
+
execution_time = time.time() - task_start
|
|
162
|
+
error_msg = str(e)
|
|
163
|
+
logger.error(f"Error processing task {task_id}: {error_msg}")
|
|
164
|
+
|
|
165
|
+
return BatchResult(
|
|
166
|
+
task_id=task_id,
|
|
167
|
+
success=False,
|
|
168
|
+
error=error_msg,
|
|
169
|
+
execution_time=execution_time,
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
# Process tasks in parallel
|
|
173
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=self.max_workers) as executor:
|
|
174
|
+
# Submit all tasks
|
|
175
|
+
future_to_task = {
|
|
176
|
+
executor.submit(process_single_task, task, task_id): (task, task_id)
|
|
177
|
+
for task, task_id in zip(tasks, task_ids)
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
# Collect results as they complete
|
|
181
|
+
completed = 0
|
|
182
|
+
for future in concurrent.futures.as_completed(future_to_task):
|
|
183
|
+
result = future.result()
|
|
184
|
+
results.append(result)
|
|
185
|
+
completed += 1
|
|
186
|
+
|
|
187
|
+
if show_progress:
|
|
188
|
+
logger.info(f"Batch progress: {completed}/{len(tasks)} tasks completed")
|
|
189
|
+
|
|
190
|
+
# Calculate statistics
|
|
191
|
+
total_time = time.time() - start_time
|
|
192
|
+
completed_tasks = sum(1 for r in results if r.success)
|
|
193
|
+
failed_tasks = len(results) - completed_tasks
|
|
194
|
+
average_time = total_time / len(results) if results else 0.0
|
|
195
|
+
|
|
196
|
+
stats = BatchStats(
|
|
197
|
+
total_tasks=len(tasks),
|
|
198
|
+
completed_tasks=completed_tasks,
|
|
199
|
+
failed_tasks=failed_tasks,
|
|
200
|
+
total_time=total_time,
|
|
201
|
+
average_time=average_time,
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
logger.info(
|
|
205
|
+
f"Batch processing complete: {completed_tasks}/{len(tasks)} succeeded, "
|
|
206
|
+
f"{failed_tasks} failed, {total_time:.2f}s total"
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
return results, stats
|
|
210
|
+
|
|
211
|
+
async def process_batch_async(
|
|
212
|
+
self,
|
|
213
|
+
tasks: List[Any],
|
|
214
|
+
task_fn: Callable,
|
|
215
|
+
task_ids: Optional[List[str]] = None,
|
|
216
|
+
user_id: str = "default",
|
|
217
|
+
show_progress: bool = True,
|
|
218
|
+
) -> Tuple[List[BatchResult], BatchStats]:
|
|
219
|
+
"""Process a batch of tasks asynchronously.
|
|
220
|
+
|
|
221
|
+
Args:
|
|
222
|
+
tasks: List of task inputs
|
|
223
|
+
task_fn: Async function to execute for each task
|
|
224
|
+
task_ids: Optional list of task identifiers
|
|
225
|
+
user_id: User identifier for rate limiting
|
|
226
|
+
show_progress: Whether to show progress updates
|
|
227
|
+
|
|
228
|
+
Returns:
|
|
229
|
+
Tuple of (results, stats)
|
|
230
|
+
"""
|
|
231
|
+
if not tasks:
|
|
232
|
+
return [], BatchStats()
|
|
233
|
+
|
|
234
|
+
start_time = time.time()
|
|
235
|
+
results: List[BatchResult] = []
|
|
236
|
+
|
|
237
|
+
# Generate task IDs if not provided
|
|
238
|
+
if task_ids is None:
|
|
239
|
+
task_ids = [f"task_{i}" for i in range(len(tasks))]
|
|
240
|
+
|
|
241
|
+
if len(task_ids) != len(tasks):
|
|
242
|
+
logger.warning("Task IDs length doesn't match tasks length, generating new IDs")
|
|
243
|
+
task_ids = [f"task_{i}" for i in range(len(tasks))]
|
|
244
|
+
|
|
245
|
+
async def process_single_task_async(task: Any, task_id: str) -> BatchResult:
|
|
246
|
+
"""Process a single task asynchronously with error handling."""
|
|
247
|
+
task_start = time.time()
|
|
248
|
+
|
|
249
|
+
try:
|
|
250
|
+
# Apply rate limiting if available
|
|
251
|
+
if self.rate_limiter:
|
|
252
|
+
is_allowed, error_msg = self.rate_limiter.check_rate_limit(user_id)
|
|
253
|
+
if not is_allowed:
|
|
254
|
+
# Wait if rate limited (async sleep)
|
|
255
|
+
await asyncio.sleep(1.0)
|
|
256
|
+
# Try again
|
|
257
|
+
is_allowed, error_msg = self.rate_limiter.check_rate_limit(user_id)
|
|
258
|
+
if not is_allowed:
|
|
259
|
+
await asyncio.sleep(5.0)
|
|
260
|
+
|
|
261
|
+
# Execute task (assume it's async or can be awaited)
|
|
262
|
+
if asyncio.iscoroutinefunction(task_fn):
|
|
263
|
+
result = await task_fn(task)
|
|
264
|
+
else:
|
|
265
|
+
# Run sync function in executor
|
|
266
|
+
try:
|
|
267
|
+
loop = asyncio.get_running_loop()
|
|
268
|
+
except RuntimeError:
|
|
269
|
+
# No running loop, create new one
|
|
270
|
+
loop = asyncio.new_event_loop()
|
|
271
|
+
asyncio.set_event_loop(loop)
|
|
272
|
+
result = await loop.run_in_executor(None, task_fn, task)
|
|
273
|
+
|
|
274
|
+
execution_time = time.time() - task_start
|
|
275
|
+
|
|
276
|
+
return BatchResult(
|
|
277
|
+
task_id=task_id,
|
|
278
|
+
success=True,
|
|
279
|
+
result=result,
|
|
280
|
+
execution_time=execution_time,
|
|
281
|
+
)
|
|
282
|
+
except Exception as e:
|
|
283
|
+
execution_time = time.time() - task_start
|
|
284
|
+
error_msg = str(e)
|
|
285
|
+
logger.error(f"Error processing task {task_id}: {error_msg}")
|
|
286
|
+
|
|
287
|
+
return BatchResult(
|
|
288
|
+
task_id=task_id,
|
|
289
|
+
success=False,
|
|
290
|
+
error=error_msg,
|
|
291
|
+
execution_time=execution_time,
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
# Process tasks concurrently
|
|
295
|
+
tasks_to_run = [process_single_task_async(task, task_id) for task, task_id in zip(tasks, task_ids)]
|
|
296
|
+
results = await asyncio.gather(*tasks_to_run)
|
|
297
|
+
|
|
298
|
+
# Calculate statistics
|
|
299
|
+
total_time = time.time() - start_time
|
|
300
|
+
completed_tasks = sum(1 for r in results if r.success)
|
|
301
|
+
failed_tasks = len(results) - completed_tasks
|
|
302
|
+
average_time = total_time / len(results) if results else 0.0
|
|
303
|
+
|
|
304
|
+
stats = BatchStats(
|
|
305
|
+
total_tasks=len(tasks),
|
|
306
|
+
completed_tasks=completed_tasks,
|
|
307
|
+
failed_tasks=failed_tasks,
|
|
308
|
+
total_time=total_time,
|
|
309
|
+
average_time=average_time,
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
logger.info(
|
|
313
|
+
f"Async batch processing complete: {completed_tasks}/{len(tasks)} succeeded, "
|
|
314
|
+
f"{failed_tasks} failed, {total_time:.2f}s total"
|
|
315
|
+
)
|
|
316
|
+
|
|
317
|
+
return results, stats
|
utils/conversation.py
CHANGED
|
@@ -1192,4 +1192,82 @@ class Conversation:
|
|
|
1192
1192
|
except Exception as e:
|
|
1193
1193
|
logger.error(f"Dynamic auto chunking failed: {e}")
|
|
1194
1194
|
return self._return_history_as_string_worker()
|
|
1195
|
+
|
|
1196
|
+
def get_conversation_stats(self) -> Dict[str, Any]:
|
|
1197
|
+
"""Get statistics about the conversation.
|
|
1198
|
+
|
|
1199
|
+
Returns:
|
|
1200
|
+
Dictionary with conversation statistics
|
|
1201
|
+
"""
|
|
1202
|
+
stats = {
|
|
1203
|
+
"total_messages": len(self.conversation_history),
|
|
1204
|
+
"total_tokens": 0,
|
|
1205
|
+
"messages_by_role": {},
|
|
1206
|
+
"created_at": getattr(self, "created_at", None),
|
|
1207
|
+
"last_updated": None,
|
|
1208
|
+
}
|
|
1209
|
+
|
|
1210
|
+
# Count messages by role
|
|
1211
|
+
for message in self.conversation_history:
|
|
1212
|
+
role = message.get("role", "unknown")
|
|
1213
|
+
stats["messages_by_role"][role] = stats["messages_by_role"].get(role, 0) + 1
|
|
1214
|
+
|
|
1215
|
+
# Sum token counts if available
|
|
1216
|
+
if "token_count" in message:
|
|
1217
|
+
stats["total_tokens"] += message["token_count"]
|
|
1218
|
+
|
|
1219
|
+
# Get last updated timestamp
|
|
1220
|
+
if "timestamp" in message:
|
|
1221
|
+
stats["last_updated"] = message["timestamp"]
|
|
1222
|
+
|
|
1223
|
+
return stats
|
|
1224
|
+
|
|
1225
|
+
def get_enhanced_metadata(self) -> Dict[str, Any]:
|
|
1226
|
+
"""Get enhanced metadata about the conversation.
|
|
1227
|
+
|
|
1228
|
+
Returns:
|
|
1229
|
+
Dictionary with enhanced metadata
|
|
1230
|
+
"""
|
|
1231
|
+
metadata = {
|
|
1232
|
+
"id": self.id,
|
|
1233
|
+
"name": self.name,
|
|
1234
|
+
"created_at": getattr(self, "created_at", None),
|
|
1235
|
+
"context_length": self.context_length,
|
|
1236
|
+
"export_method": self.export_method,
|
|
1237
|
+
"save_filepath": self.save_filepath,
|
|
1238
|
+
"stats": self.get_conversation_stats(),
|
|
1239
|
+
}
|
|
1240
|
+
|
|
1241
|
+
return metadata
|
|
1242
|
+
|
|
1243
|
+
def save_with_metadata(self, filepath: Optional[str] = None, force: bool = True) -> None:
|
|
1244
|
+
"""Save conversation with enhanced metadata.
|
|
1245
|
+
|
|
1246
|
+
Args:
|
|
1247
|
+
filepath: Optional filepath to save to (uses default if None)
|
|
1248
|
+
force: If True, saves regardless of autosave setting
|
|
1249
|
+
"""
|
|
1250
|
+
if filepath:
|
|
1251
|
+
self.save_filepath = filepath
|
|
1252
|
+
|
|
1253
|
+
# Prepare data with metadata
|
|
1254
|
+
data = {
|
|
1255
|
+
"metadata": self.get_enhanced_metadata(),
|
|
1256
|
+
"history": self.conversation_history,
|
|
1257
|
+
}
|
|
1258
|
+
|
|
1259
|
+
try:
|
|
1260
|
+
self._ensure_save_path()
|
|
1261
|
+
|
|
1262
|
+
if self.export_method == "json":
|
|
1263
|
+
with open(self.save_filepath, "w", encoding="utf-8") as f:
|
|
1264
|
+
json.dump(data, f, indent=4, default=str)
|
|
1265
|
+
else:
|
|
1266
|
+
with open(self.save_filepath, "w", encoding="utf-8") as f:
|
|
1267
|
+
yaml.dump(data, f, indent=4, default_flow_style=False, sort_keys=False)
|
|
1268
|
+
|
|
1269
|
+
logger.info(f"Conversation with metadata saved to {self.save_filepath}")
|
|
1270
|
+
except Exception as e:
|
|
1271
|
+
logger.error(f"Failed to save conversation with metadata: {str(e)}\n{traceback.format_exc()}")
|
|
1272
|
+
raise
|
|
1195
1273
|
|