flock-core 0.3.15__py3-none-any.whl → 0.3.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of flock-core might be problematic. Click here for more details.
- flock/cli/assets/release_notes.md +1 -0
- flock/core/context/context.py +21 -23
- flock/core/flock_agent.py +20 -5
- flock/core/flock_factory.py +2 -0
- flock/core/flock_module.py +27 -5
- flock/core/tools/azure_tools.py +496 -0
- flock/core/tools/basic_tools.py +96 -8
- flock/modules/callback/callback_module.py +21 -4
- flock/modules/memory/memory_module.py +19 -4
- flock/modules/output/output_module.py +12 -1
- flock/modules/performance/metrics_module.py +24 -5
- flock/modules/zep/zep_module.py +10 -2
- flock/workflow/activities.py +2 -2
- {flock_core-0.3.15.dist-info → flock_core-0.3.17.dist-info}/METADATA +3 -1
- {flock_core-0.3.15.dist-info → flock_core-0.3.17.dist-info}/RECORD +18 -17
- {flock_core-0.3.15.dist-info → flock_core-0.3.17.dist-info}/WHEEL +0 -0
- {flock_core-0.3.15.dist-info → flock_core-0.3.17.dist-info}/entry_points.txt +0 -0
- {flock_core-0.3.15.dist-info → flock_core-0.3.17.dist-info}/licenses/LICENSE +0 -0
|
@@ -69,6 +69,7 @@ Like a hummingbird, modules are small and nimble code packages. Put enough of th
|
|
|
69
69
|
- **Output Module** – Advanced output formatting and storage
|
|
70
70
|
- **Metrics Module** – Detailed performance tracking
|
|
71
71
|
- **Zep Module** – Uses Zep for Knowledge Graphs
|
|
72
|
+
- **Azure Search Tools** – Integration with Azure AI Search for vector search and document retrieval
|
|
72
73
|
|
|
73
74
|
---
|
|
74
75
|
|
flock/core/context/context.py
CHANGED
|
@@ -1,9 +1,10 @@
|
|
|
1
1
|
import uuid
|
|
2
|
-
from dataclasses import asdict
|
|
2
|
+
from dataclasses import asdict
|
|
3
3
|
from datetime import datetime
|
|
4
4
|
from typing import Any, Literal
|
|
5
5
|
|
|
6
6
|
from opentelemetry import trace
|
|
7
|
+
from pydantic import BaseModel, Field
|
|
7
8
|
|
|
8
9
|
from flock.core.context.context_vars import FLOCK_LAST_AGENT, FLOCK_LAST_RESULT
|
|
9
10
|
from flock.core.logging.logging import get_logger
|
|
@@ -13,34 +14,31 @@ logger = get_logger("context")
|
|
|
13
14
|
tracer = trace.get_tracer(__name__)
|
|
14
15
|
|
|
15
16
|
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
called_from: str = field(default="")
|
|
17
|
+
class AgentRunRecord(BaseModel):
|
|
18
|
+
id: str = Field(default="")
|
|
19
|
+
agent: str = Field(default="")
|
|
20
|
+
data: dict[str, Any] = Field(default_factory=dict)
|
|
21
|
+
timestamp: str = Field(default="")
|
|
22
|
+
hand_off: dict | None = Field(default_factory=dict)
|
|
23
|
+
called_from: str = Field(default="")
|
|
24
24
|
|
|
25
25
|
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
serializer: Literal["json", "cloudpickle", "msgpack"] = field(
|
|
26
|
+
class AgentDefinition(BaseModel):
|
|
27
|
+
agent_type: str = Field(default="")
|
|
28
|
+
agent_name: str = Field(default="")
|
|
29
|
+
agent_data: dict = Field(default_factory=dict)
|
|
30
|
+
serializer: Literal["json", "cloudpickle", "msgpack"] = Field(
|
|
32
31
|
default="cloudpickle"
|
|
33
32
|
)
|
|
34
33
|
|
|
35
34
|
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
workflow_timestamp: str = field(default="")
|
|
35
|
+
class FlockContext(Serializable, BaseModel):
|
|
36
|
+
state: dict[str, Any] = Field(default_factory=dict)
|
|
37
|
+
history: list[AgentRunRecord] = Field(default_factory=list)
|
|
38
|
+
agent_definitions: dict[str, AgentDefinition] = Field(default_factory=dict)
|
|
39
|
+
run_id: str = Field(default="")
|
|
40
|
+
workflow_id: str = Field(default="")
|
|
41
|
+
workflow_timestamp: str = Field(default="")
|
|
44
42
|
|
|
45
43
|
def record(
|
|
46
44
|
self,
|
flock/core/flock_agent.py
CHANGED
|
@@ -11,6 +11,7 @@ import cloudpickle
|
|
|
11
11
|
from opentelemetry import trace
|
|
12
12
|
from pydantic import BaseModel, Field
|
|
13
13
|
|
|
14
|
+
from flock.core.context.context import FlockContext
|
|
14
15
|
from flock.core.flock_evaluator import FlockEvaluator
|
|
15
16
|
from flock.core.flock_module import FlockModule
|
|
16
17
|
from flock.core.flock_router import FlockRouter
|
|
@@ -73,6 +74,11 @@ class FlockAgent(BaseModel, ABC, DSPyIntegrationMixin):
|
|
|
73
74
|
description="FlockModules attached to this agent",
|
|
74
75
|
)
|
|
75
76
|
|
|
77
|
+
context: FlockContext | None = Field(
|
|
78
|
+
default=None,
|
|
79
|
+
description="Context associated with flock",
|
|
80
|
+
)
|
|
81
|
+
|
|
76
82
|
def add_module(self, module: FlockModule) -> None:
|
|
77
83
|
"""Add a module to this agent."""
|
|
78
84
|
self.modules[module.name] = module
|
|
@@ -95,6 +101,13 @@ class FlockAgent(BaseModel, ABC, DSPyIntegrationMixin):
|
|
|
95
101
|
with tracer.start_as_current_span("agent.initialize") as span:
|
|
96
102
|
span.set_attribute("agent.name", self.name)
|
|
97
103
|
span.set_attribute("inputs", str(inputs))
|
|
104
|
+
if not self.context:
|
|
105
|
+
self.context = FlockContext()
|
|
106
|
+
|
|
107
|
+
if self.name not in self.context.agent_definitions:
|
|
108
|
+
self.context.add_agent_definition(
|
|
109
|
+
type(self), self.name, self.to_dict()
|
|
110
|
+
)
|
|
98
111
|
|
|
99
112
|
try:
|
|
100
113
|
for module in self.get_enabled_modules():
|
|
@@ -102,7 +115,7 @@ class FlockAgent(BaseModel, ABC, DSPyIntegrationMixin):
|
|
|
102
115
|
f"agent.initialize - module {module.name}",
|
|
103
116
|
agent=self.name,
|
|
104
117
|
)
|
|
105
|
-
await module.initialize(self, inputs)
|
|
118
|
+
await module.initialize(self, inputs, self.context)
|
|
106
119
|
except Exception as module_error:
|
|
107
120
|
logger.error(
|
|
108
121
|
"Error during initialize",
|
|
@@ -124,7 +137,7 @@ class FlockAgent(BaseModel, ABC, DSPyIntegrationMixin):
|
|
|
124
137
|
)
|
|
125
138
|
try:
|
|
126
139
|
for module in self.get_enabled_modules():
|
|
127
|
-
await module.terminate(self, inputs, inputs)
|
|
140
|
+
await module.terminate(self, inputs, inputs, self.context)
|
|
128
141
|
except Exception as module_error:
|
|
129
142
|
logger.error(
|
|
130
143
|
"Error during terminate",
|
|
@@ -139,7 +152,7 @@ class FlockAgent(BaseModel, ABC, DSPyIntegrationMixin):
|
|
|
139
152
|
span.set_attribute("inputs", str(inputs))
|
|
140
153
|
try:
|
|
141
154
|
for module in self.get_enabled_modules():
|
|
142
|
-
await module.on_error(self, error, inputs)
|
|
155
|
+
await module.on_error(self, error, inputs, self.context)
|
|
143
156
|
except Exception as module_error:
|
|
144
157
|
logger.error(
|
|
145
158
|
"Error during on_error",
|
|
@@ -154,13 +167,15 @@ class FlockAgent(BaseModel, ABC, DSPyIntegrationMixin):
|
|
|
154
167
|
span.set_attribute("inputs", str(inputs))
|
|
155
168
|
|
|
156
169
|
for module in self.get_enabled_modules():
|
|
157
|
-
inputs = await module.pre_evaluate(self, inputs)
|
|
170
|
+
inputs = await module.pre_evaluate(self, inputs, self.context)
|
|
158
171
|
|
|
159
172
|
try:
|
|
160
173
|
result = await self.evaluator.evaluate(self, inputs, self.tools)
|
|
161
174
|
|
|
162
175
|
for module in self.get_enabled_modules():
|
|
163
|
-
result = await module.post_evaluate(
|
|
176
|
+
result = await module.post_evaluate(
|
|
177
|
+
self, inputs, result, self.context
|
|
178
|
+
)
|
|
164
179
|
|
|
165
180
|
span.set_attribute("result", str(result))
|
|
166
181
|
|
flock/core/flock_factory.py
CHANGED
|
@@ -35,6 +35,7 @@ class FlockFactory:
|
|
|
35
35
|
max_tokens: int = 4096,
|
|
36
36
|
alert_latency_threshold_ms: int = 30000,
|
|
37
37
|
no_output: bool = False,
|
|
38
|
+
print_context: bool = False,
|
|
38
39
|
) -> FlockAgent:
|
|
39
40
|
"""Creates a default FlockAgent.
|
|
40
41
|
|
|
@@ -65,6 +66,7 @@ class FlockFactory:
|
|
|
65
66
|
theme=output_theme,
|
|
66
67
|
wait_for_input=wait_for_input,
|
|
67
68
|
no_output=no_output,
|
|
69
|
+
print_context=print_context,
|
|
68
70
|
)
|
|
69
71
|
output_module = OutputModule("output", config=output_config)
|
|
70
72
|
|
flock/core/flock_module.py
CHANGED
|
@@ -5,6 +5,8 @@ from typing import Any, TypeVar
|
|
|
5
5
|
|
|
6
6
|
from pydantic import BaseModel, Field, create_model
|
|
7
7
|
|
|
8
|
+
from flock.core.context.context import FlockContext
|
|
9
|
+
|
|
8
10
|
T = TypeVar("T", bound="FlockModuleConfig")
|
|
9
11
|
|
|
10
12
|
|
|
@@ -50,30 +52,50 @@ class FlockModule(BaseModel, ABC):
|
|
|
50
52
|
default_factory=FlockModuleConfig, description="Module configuration"
|
|
51
53
|
)
|
|
52
54
|
|
|
53
|
-
async def initialize(
|
|
55
|
+
async def initialize(
|
|
56
|
+
self,
|
|
57
|
+
agent: Any,
|
|
58
|
+
inputs: dict[str, Any],
|
|
59
|
+
context: FlockContext | None = None,
|
|
60
|
+
) -> None:
|
|
54
61
|
"""Called when the agent starts running."""
|
|
55
62
|
pass
|
|
56
63
|
|
|
57
64
|
async def pre_evaluate(
|
|
58
|
-
self,
|
|
65
|
+
self,
|
|
66
|
+
agent: Any,
|
|
67
|
+
inputs: dict[str, Any],
|
|
68
|
+
context: FlockContext | None = None,
|
|
59
69
|
) -> dict[str, Any]:
|
|
60
70
|
"""Called before agent evaluation, can modify inputs."""
|
|
61
71
|
return inputs
|
|
62
72
|
|
|
63
73
|
async def post_evaluate(
|
|
64
|
-
self,
|
|
74
|
+
self,
|
|
75
|
+
agent: Any,
|
|
76
|
+
inputs: dict[str, Any],
|
|
77
|
+
result: dict[str, Any],
|
|
78
|
+
context: FlockContext | None = None,
|
|
65
79
|
) -> dict[str, Any]:
|
|
66
80
|
"""Called after agent evaluation, can modify results."""
|
|
67
81
|
return result
|
|
68
82
|
|
|
69
83
|
async def terminate(
|
|
70
|
-
self,
|
|
84
|
+
self,
|
|
85
|
+
agent: Any,
|
|
86
|
+
inputs: dict[str, Any],
|
|
87
|
+
result: dict[str, Any],
|
|
88
|
+
context: FlockContext | None = None,
|
|
71
89
|
) -> None:
|
|
72
90
|
"""Called when the agent finishes running."""
|
|
73
91
|
pass
|
|
74
92
|
|
|
75
93
|
async def on_error(
|
|
76
|
-
self,
|
|
94
|
+
self,
|
|
95
|
+
agent: Any,
|
|
96
|
+
error: Exception,
|
|
97
|
+
inputs: dict[str, Any],
|
|
98
|
+
context: FlockContext | None = None,
|
|
77
99
|
) -> None:
|
|
78
100
|
"""Called when an error occurs during agent execution."""
|
|
79
101
|
pass
|
|
@@ -0,0 +1,496 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
4
|
+
from azure.core.credentials import AzureKeyCredential
|
|
5
|
+
from azure.search.documents import SearchClient
|
|
6
|
+
from azure.search.documents.indexes import SearchIndexClient
|
|
7
|
+
from azure.search.documents.indexes.models import (
|
|
8
|
+
ExhaustiveKnnAlgorithmConfiguration,
|
|
9
|
+
HnswAlgorithmConfiguration,
|
|
10
|
+
SearchableField,
|
|
11
|
+
SearchField,
|
|
12
|
+
SearchFieldDataType,
|
|
13
|
+
SearchIndex,
|
|
14
|
+
SimpleField,
|
|
15
|
+
VectorSearch,
|
|
16
|
+
VectorSearchProfile,
|
|
17
|
+
)
|
|
18
|
+
from azure.search.documents.models import VectorizedQuery
|
|
19
|
+
|
|
20
|
+
from flock.core.logging.trace_and_logged import traced_and_logged
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def _get_default_endpoint() -> str:
|
|
24
|
+
"""Get the default Azure Search endpoint from environment variables."""
|
|
25
|
+
endpoint = os.environ.get("AZURE_SEARCH_ENDPOINT")
|
|
26
|
+
if not endpoint:
|
|
27
|
+
raise ValueError(
|
|
28
|
+
"AZURE_SEARCH_ENDPOINT environment variable is not set"
|
|
29
|
+
)
|
|
30
|
+
return endpoint
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def _get_default_api_key() -> str:
|
|
34
|
+
"""Get the default Azure Search API key from environment variables."""
|
|
35
|
+
api_key = os.environ.get("AZURE_SEARCH_API_KEY")
|
|
36
|
+
if not api_key:
|
|
37
|
+
raise ValueError("AZURE_SEARCH_API_KEY environment variable is not set")
|
|
38
|
+
return api_key
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def _get_default_index_name() -> str:
|
|
42
|
+
"""Get the default Azure Search index name from environment variables."""
|
|
43
|
+
index_name = os.environ.get("AZURE_SEARCH_INDEX_NAME")
|
|
44
|
+
if not index_name:
|
|
45
|
+
raise ValueError(
|
|
46
|
+
"AZURE_SEARCH_INDEX_NAME environment variable is not set"
|
|
47
|
+
)
|
|
48
|
+
return index_name
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
@traced_and_logged
|
|
52
|
+
def azure_search_initialize_clients(
|
|
53
|
+
endpoint: str | None = None,
|
|
54
|
+
api_key: str | None = None,
|
|
55
|
+
index_name: str | None = None,
|
|
56
|
+
) -> dict[str, Any]:
|
|
57
|
+
"""Initialize Azure AI Search clients.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
endpoint: The Azure AI Search service endpoint URL (defaults to AZURE_SEARCH_ENDPOINT env var)
|
|
61
|
+
api_key: The Azure AI Search API key (defaults to AZURE_SEARCH_API_KEY env var)
|
|
62
|
+
index_name: Optional index name for SearchClient initialization (defaults to AZURE_SEARCH_INDEX_NAME env var if not None)
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
Dictionary containing the initialized clients
|
|
66
|
+
"""
|
|
67
|
+
# Use environment variables as defaults if not provided
|
|
68
|
+
endpoint = endpoint or _get_default_endpoint()
|
|
69
|
+
api_key = api_key or _get_default_api_key()
|
|
70
|
+
|
|
71
|
+
credential = AzureKeyCredential(api_key)
|
|
72
|
+
|
|
73
|
+
# Create the search index client
|
|
74
|
+
search_index_client = SearchIndexClient(
|
|
75
|
+
endpoint=endpoint, credential=credential
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
# Create clients dictionary
|
|
79
|
+
clients = {
|
|
80
|
+
"index_client": search_index_client,
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
# Add search client if index_name was provided or available in env
|
|
84
|
+
if index_name is None and os.environ.get("AZURE_SEARCH_INDEX_NAME"):
|
|
85
|
+
index_name = _get_default_index_name()
|
|
86
|
+
|
|
87
|
+
if index_name:
|
|
88
|
+
search_client = SearchClient(
|
|
89
|
+
endpoint=endpoint, index_name=index_name, credential=credential
|
|
90
|
+
)
|
|
91
|
+
clients["search_client"] = search_client
|
|
92
|
+
|
|
93
|
+
return clients
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
@traced_and_logged
|
|
97
|
+
def azure_search_create_index(
|
|
98
|
+
index_name: str | None = None,
|
|
99
|
+
fields: list[SearchField] = None,
|
|
100
|
+
vector_search: VectorSearch | None = None,
|
|
101
|
+
endpoint: str | None = None,
|
|
102
|
+
api_key: str | None = None,
|
|
103
|
+
) -> dict[str, Any]:
|
|
104
|
+
"""Create a new search index in Azure AI Search.
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
index_name: Name of the search index to create (defaults to AZURE_SEARCH_INDEX_NAME env var)
|
|
108
|
+
fields: List of field definitions for the index
|
|
109
|
+
vector_search: Optional vector search configuration
|
|
110
|
+
endpoint: The Azure AI Search service endpoint URL (defaults to AZURE_SEARCH_ENDPOINT env var)
|
|
111
|
+
api_key: The Azure AI Search API key (defaults to AZURE_SEARCH_API_KEY env var)
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
Dictionary containing information about the created index
|
|
115
|
+
"""
|
|
116
|
+
# Use environment variables as defaults if not provided
|
|
117
|
+
endpoint = endpoint or _get_default_endpoint()
|
|
118
|
+
api_key = api_key or _get_default_api_key()
|
|
119
|
+
index_name = index_name or _get_default_index_name()
|
|
120
|
+
|
|
121
|
+
if fields is None:
|
|
122
|
+
raise ValueError("Fields must be provided for index creation")
|
|
123
|
+
|
|
124
|
+
clients = azure_search_initialize_clients(endpoint, api_key)
|
|
125
|
+
index_client = clients["index_client"]
|
|
126
|
+
|
|
127
|
+
# Create the index
|
|
128
|
+
index = SearchIndex(
|
|
129
|
+
name=index_name, fields=fields, vector_search=vector_search
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
result = index_client.create_or_update_index(index)
|
|
133
|
+
|
|
134
|
+
return {
|
|
135
|
+
"index_name": result.name,
|
|
136
|
+
"fields": [field.name for field in result.fields],
|
|
137
|
+
"created": True,
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
@traced_and_logged
|
|
142
|
+
def azure_search_upload_documents(
|
|
143
|
+
documents: list[dict[str, Any]],
|
|
144
|
+
index_name: str | None = None,
|
|
145
|
+
endpoint: str | None = None,
|
|
146
|
+
api_key: str | None = None,
|
|
147
|
+
) -> dict[str, Any]:
|
|
148
|
+
"""Upload documents to an Azure AI Search index.
|
|
149
|
+
|
|
150
|
+
Args:
|
|
151
|
+
documents: List of documents to upload (as dictionaries)
|
|
152
|
+
index_name: Name of the search index (defaults to AZURE_SEARCH_INDEX_NAME env var)
|
|
153
|
+
endpoint: The Azure AI Search service endpoint URL (defaults to AZURE_SEARCH_ENDPOINT env var)
|
|
154
|
+
api_key: The Azure AI Search API key (defaults to AZURE_SEARCH_API_KEY env var)
|
|
155
|
+
|
|
156
|
+
Returns:
|
|
157
|
+
Dictionary containing the upload results
|
|
158
|
+
"""
|
|
159
|
+
# Use environment variables as defaults if not provided
|
|
160
|
+
endpoint = endpoint or _get_default_endpoint()
|
|
161
|
+
api_key = api_key or _get_default_api_key()
|
|
162
|
+
index_name = index_name or _get_default_index_name()
|
|
163
|
+
|
|
164
|
+
clients = azure_search_initialize_clients(endpoint, api_key, index_name)
|
|
165
|
+
search_client = clients["search_client"]
|
|
166
|
+
|
|
167
|
+
result = search_client.upload_documents(documents=documents)
|
|
168
|
+
|
|
169
|
+
# Process results
|
|
170
|
+
succeeded = sum(1 for r in result if r.succeeded)
|
|
171
|
+
|
|
172
|
+
return {
|
|
173
|
+
"succeeded": succeeded,
|
|
174
|
+
"failed": len(result) - succeeded,
|
|
175
|
+
"total": len(result),
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
@traced_and_logged
|
|
180
|
+
def azure_search_query(
|
|
181
|
+
search_text: str | None = None,
|
|
182
|
+
filter: str | None = None,
|
|
183
|
+
select: list[str] | None = None,
|
|
184
|
+
top: int | None = 50,
|
|
185
|
+
vector: list[float] | None = None,
|
|
186
|
+
vector_field: str | None = None,
|
|
187
|
+
vector_k: int | None = 10,
|
|
188
|
+
index_name: str | None = None,
|
|
189
|
+
endpoint: str | None = None,
|
|
190
|
+
api_key: str | None = None,
|
|
191
|
+
) -> list[dict[str, Any]]:
|
|
192
|
+
"""Search documents in an Azure AI Search index.
|
|
193
|
+
|
|
194
|
+
Args:
|
|
195
|
+
search_text: Optional text to search for (keyword search)
|
|
196
|
+
filter: Optional OData filter expression
|
|
197
|
+
select: Optional list of fields to return
|
|
198
|
+
top: Maximum number of results to return
|
|
199
|
+
vector: Optional vector for vector search
|
|
200
|
+
vector_field: Name of the field containing vectors for vector search
|
|
201
|
+
vector_k: Number of nearest neighbors to retrieve in vector search
|
|
202
|
+
index_name: Name of the search index (defaults to AZURE_SEARCH_INDEX_NAME env var)
|
|
203
|
+
endpoint: The Azure AI Search service endpoint URL (defaults to AZURE_SEARCH_ENDPOINT env var)
|
|
204
|
+
api_key: The Azure AI Search API key (defaults to AZURE_SEARCH_API_KEY env var)
|
|
205
|
+
|
|
206
|
+
Returns:
|
|
207
|
+
List of search results as dictionaries
|
|
208
|
+
"""
|
|
209
|
+
# Use environment variables as defaults if not provided
|
|
210
|
+
endpoint = endpoint or _get_default_endpoint()
|
|
211
|
+
api_key = api_key or _get_default_api_key()
|
|
212
|
+
index_name = index_name or _get_default_index_name()
|
|
213
|
+
|
|
214
|
+
clients = azure_search_initialize_clients(endpoint, api_key, index_name)
|
|
215
|
+
search_client = clients["search_client"]
|
|
216
|
+
|
|
217
|
+
# Set up vector query if vector is provided
|
|
218
|
+
vectorized_query = None
|
|
219
|
+
if vector and vector_field:
|
|
220
|
+
vectorized_query = VectorizedQuery(
|
|
221
|
+
vector=vector, k=vector_k, fields=[vector_field]
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
# Execute the search
|
|
225
|
+
results = search_client.search(
|
|
226
|
+
search_text=search_text,
|
|
227
|
+
filter=filter,
|
|
228
|
+
select=select,
|
|
229
|
+
top=top,
|
|
230
|
+
vector_queries=[vectorized_query] if vectorized_query else None,
|
|
231
|
+
)
|
|
232
|
+
|
|
233
|
+
# Convert results to list of dictionaries
|
|
234
|
+
result_list = [dict(result) for result in results]
|
|
235
|
+
|
|
236
|
+
return result_list
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
@traced_and_logged
|
|
240
|
+
def azure_search_get_document(
|
|
241
|
+
key: str,
|
|
242
|
+
select: list[str] | None = None,
|
|
243
|
+
index_name: str | None = None,
|
|
244
|
+
endpoint: str | None = None,
|
|
245
|
+
api_key: str | None = None,
|
|
246
|
+
) -> dict[str, Any]:
|
|
247
|
+
"""Retrieve a specific document from an Azure AI Search index by key.
|
|
248
|
+
|
|
249
|
+
Args:
|
|
250
|
+
key: The unique key of the document to retrieve
|
|
251
|
+
select: Optional list of fields to return
|
|
252
|
+
index_name: Name of the search index (defaults to AZURE_SEARCH_INDEX_NAME env var)
|
|
253
|
+
endpoint: The Azure AI Search service endpoint URL (defaults to AZURE_SEARCH_ENDPOINT env var)
|
|
254
|
+
api_key: The Azure AI Search API key (defaults to AZURE_SEARCH_API_KEY env var)
|
|
255
|
+
|
|
256
|
+
Returns:
|
|
257
|
+
The retrieved document as a dictionary
|
|
258
|
+
"""
|
|
259
|
+
# Use environment variables as defaults if not provided
|
|
260
|
+
endpoint = endpoint or _get_default_endpoint()
|
|
261
|
+
api_key = api_key or _get_default_api_key()
|
|
262
|
+
index_name = index_name or _get_default_index_name()
|
|
263
|
+
|
|
264
|
+
clients = azure_search_initialize_clients(endpoint, api_key, index_name)
|
|
265
|
+
search_client = clients["search_client"]
|
|
266
|
+
|
|
267
|
+
result = search_client.get_document(key=key, selected_fields=select)
|
|
268
|
+
|
|
269
|
+
return dict(result)
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
@traced_and_logged
|
|
273
|
+
def azure_search_delete_documents(
|
|
274
|
+
keys: list[str],
|
|
275
|
+
key_field_name: str = "id",
|
|
276
|
+
index_name: str | None = None,
|
|
277
|
+
endpoint: str | None = None,
|
|
278
|
+
api_key: str | None = None,
|
|
279
|
+
) -> dict[str, Any]:
|
|
280
|
+
"""Delete documents from an Azure AI Search index.
|
|
281
|
+
|
|
282
|
+
Args:
|
|
283
|
+
keys: List of document keys to delete
|
|
284
|
+
key_field_name: Name of the key field (defaults to "id")
|
|
285
|
+
index_name: Name of the search index (defaults to AZURE_SEARCH_INDEX_NAME env var)
|
|
286
|
+
endpoint: The Azure AI Search service endpoint URL (defaults to AZURE_SEARCH_ENDPOINT env var)
|
|
287
|
+
api_key: The Azure AI Search API key (defaults to AZURE_SEARCH_API_KEY env var)
|
|
288
|
+
|
|
289
|
+
Returns:
|
|
290
|
+
Dictionary containing the deletion results
|
|
291
|
+
"""
|
|
292
|
+
# Use environment variables as defaults if not provided
|
|
293
|
+
endpoint = endpoint or _get_default_endpoint()
|
|
294
|
+
api_key = api_key or _get_default_api_key()
|
|
295
|
+
index_name = index_name or _get_default_index_name()
|
|
296
|
+
|
|
297
|
+
clients = azure_search_initialize_clients(endpoint, api_key, index_name)
|
|
298
|
+
search_client = clients["search_client"]
|
|
299
|
+
|
|
300
|
+
# Format documents for deletion (only need the key field)
|
|
301
|
+
documents_to_delete = [{key_field_name: key} for key in keys]
|
|
302
|
+
|
|
303
|
+
result = search_client.delete_documents(documents=documents_to_delete)
|
|
304
|
+
|
|
305
|
+
# Process results
|
|
306
|
+
succeeded = sum(1 for r in result if r.succeeded)
|
|
307
|
+
|
|
308
|
+
return {
|
|
309
|
+
"succeeded": succeeded,
|
|
310
|
+
"failed": len(result) - succeeded,
|
|
311
|
+
"total": len(result),
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
@traced_and_logged
|
|
316
|
+
def azure_search_list_indexes(
|
|
317
|
+
endpoint: str | None = None, api_key: str | None = None
|
|
318
|
+
) -> list[dict[str, Any]]:
|
|
319
|
+
"""List all indexes in the Azure AI Search service.
|
|
320
|
+
|
|
321
|
+
Args:
|
|
322
|
+
endpoint: The Azure AI Search service endpoint URL (defaults to AZURE_SEARCH_ENDPOINT env var)
|
|
323
|
+
api_key: The Azure AI Search API key (defaults to AZURE_SEARCH_API_KEY env var)
|
|
324
|
+
|
|
325
|
+
Returns:
|
|
326
|
+
List of indexes as dictionaries
|
|
327
|
+
"""
|
|
328
|
+
# Use environment variables as defaults if not provided
|
|
329
|
+
endpoint = endpoint or _get_default_endpoint()
|
|
330
|
+
api_key = api_key or _get_default_api_key()
|
|
331
|
+
|
|
332
|
+
clients = azure_search_initialize_clients(endpoint, api_key)
|
|
333
|
+
index_client = clients["index_client"]
|
|
334
|
+
|
|
335
|
+
result = index_client.list_indexes()
|
|
336
|
+
|
|
337
|
+
# Convert index objects to dictionaries with basic information
|
|
338
|
+
indexes = [
|
|
339
|
+
{
|
|
340
|
+
"name": index.name,
|
|
341
|
+
"fields": [field.name for field in index.fields],
|
|
342
|
+
"field_count": len(index.fields),
|
|
343
|
+
}
|
|
344
|
+
for index in result
|
|
345
|
+
]
|
|
346
|
+
|
|
347
|
+
return indexes
|
|
348
|
+
|
|
349
|
+
|
|
350
|
+
@traced_and_logged
|
|
351
|
+
def azure_search_get_index_statistics(
|
|
352
|
+
index_name: str | None = None,
|
|
353
|
+
endpoint: str | None = None,
|
|
354
|
+
api_key: str | None = None,
|
|
355
|
+
) -> dict[str, Any]:
|
|
356
|
+
"""Get statistics for a specific Azure AI Search index.
|
|
357
|
+
|
|
358
|
+
Args:
|
|
359
|
+
index_name: Name of the search index (defaults to AZURE_SEARCH_INDEX_NAME env var)
|
|
360
|
+
endpoint: The Azure AI Search service endpoint URL (defaults to AZURE_SEARCH_ENDPOINT env var)
|
|
361
|
+
api_key: The Azure AI Search API key (defaults to AZURE_SEARCH_API_KEY env var)
|
|
362
|
+
|
|
363
|
+
Returns:
|
|
364
|
+
Dictionary containing index statistics
|
|
365
|
+
"""
|
|
366
|
+
# Use environment variables as defaults if not provided
|
|
367
|
+
endpoint = endpoint or _get_default_endpoint()
|
|
368
|
+
api_key = api_key or _get_default_api_key()
|
|
369
|
+
index_name = index_name or _get_default_index_name()
|
|
370
|
+
|
|
371
|
+
clients = azure_search_initialize_clients(endpoint, api_key, index_name)
|
|
372
|
+
search_client = clients["search_client"]
|
|
373
|
+
|
|
374
|
+
stats = search_client.get_document_count()
|
|
375
|
+
|
|
376
|
+
return {"document_count": stats}
|
|
377
|
+
|
|
378
|
+
|
|
379
|
+
@traced_and_logged
|
|
380
|
+
def azure_search_create_vector_index(
|
|
381
|
+
fields: list[dict[str, Any]],
|
|
382
|
+
vector_dimensions: int,
|
|
383
|
+
index_name: str | None = None,
|
|
384
|
+
algorithm_kind: str = "hnsw",
|
|
385
|
+
endpoint: str | None = None,
|
|
386
|
+
api_key: str | None = None,
|
|
387
|
+
) -> dict[str, Any]:
|
|
388
|
+
"""Create a vector search index in Azure AI Search.
|
|
389
|
+
|
|
390
|
+
Args:
|
|
391
|
+
fields: List of field configurations (dicts with name, type, etc.)
|
|
392
|
+
vector_dimensions: Dimensions of the vector field
|
|
393
|
+
index_name: Name of the search index (defaults to AZURE_SEARCH_INDEX_NAME env var)
|
|
394
|
+
algorithm_kind: Vector search algorithm ("hnsw" or "exhaustive")
|
|
395
|
+
endpoint: The Azure AI Search service endpoint URL (defaults to AZURE_SEARCH_ENDPOINT env var)
|
|
396
|
+
api_key: The Azure AI Search API key (defaults to AZURE_SEARCH_API_KEY env var)
|
|
397
|
+
|
|
398
|
+
Returns:
|
|
399
|
+
Dictionary with index creation result
|
|
400
|
+
"""
|
|
401
|
+
# Use environment variables as defaults if not provided
|
|
402
|
+
endpoint = endpoint or _get_default_endpoint()
|
|
403
|
+
api_key = api_key or _get_default_api_key()
|
|
404
|
+
index_name = index_name or _get_default_index_name()
|
|
405
|
+
|
|
406
|
+
clients = azure_search_initialize_clients(endpoint, api_key)
|
|
407
|
+
index_client = clients["index_client"]
|
|
408
|
+
|
|
409
|
+
# Convert field configurations to SearchField objects
|
|
410
|
+
index_fields = []
|
|
411
|
+
vector_fields = []
|
|
412
|
+
|
|
413
|
+
for field_config in fields:
|
|
414
|
+
field_name = field_config["name"]
|
|
415
|
+
field_type = field_config["type"]
|
|
416
|
+
field_searchable = field_config.get("searchable", False)
|
|
417
|
+
field_filterable = field_config.get("filterable", False)
|
|
418
|
+
field_sortable = field_config.get("sortable", False)
|
|
419
|
+
field_key = field_config.get("key", False)
|
|
420
|
+
field_vector = field_config.get("vector", False)
|
|
421
|
+
|
|
422
|
+
if field_searchable and field_type == "string":
|
|
423
|
+
field = SearchableField(
|
|
424
|
+
name=field_name,
|
|
425
|
+
type=SearchFieldDataType.String,
|
|
426
|
+
key=field_key,
|
|
427
|
+
filterable=field_filterable,
|
|
428
|
+
sortable=field_sortable,
|
|
429
|
+
)
|
|
430
|
+
else:
|
|
431
|
+
data_type = None
|
|
432
|
+
if field_type == "string":
|
|
433
|
+
data_type = SearchFieldDataType.String
|
|
434
|
+
elif field_type == "int":
|
|
435
|
+
data_type = SearchFieldDataType.Int32
|
|
436
|
+
elif field_type == "double":
|
|
437
|
+
data_type = SearchFieldDataType.Double
|
|
438
|
+
elif field_type == "boolean":
|
|
439
|
+
data_type = SearchFieldDataType.Boolean
|
|
440
|
+
elif field_type == "collection":
|
|
441
|
+
data_type = SearchFieldDataType.Collection(
|
|
442
|
+
SearchFieldDataType.String
|
|
443
|
+
)
|
|
444
|
+
|
|
445
|
+
field = SimpleField(
|
|
446
|
+
name=field_name,
|
|
447
|
+
type=data_type,
|
|
448
|
+
key=field_key,
|
|
449
|
+
filterable=field_filterable,
|
|
450
|
+
sortable=field_sortable,
|
|
451
|
+
)
|
|
452
|
+
|
|
453
|
+
index_fields.append(field)
|
|
454
|
+
|
|
455
|
+
if field_vector:
|
|
456
|
+
vector_fields.append(field_name)
|
|
457
|
+
|
|
458
|
+
# Set up vector search configuration
|
|
459
|
+
algorithm_config = None
|
|
460
|
+
if algorithm_kind.lower() == "hnsw":
|
|
461
|
+
algorithm_config = HnswAlgorithmConfiguration(
|
|
462
|
+
name="hnsw-config",
|
|
463
|
+
parameters={"m": 4, "efConstruction": 400, "efSearch": 500},
|
|
464
|
+
)
|
|
465
|
+
else:
|
|
466
|
+
algorithm_config = ExhaustiveKnnAlgorithmConfiguration(
|
|
467
|
+
name="exhaustive-config"
|
|
468
|
+
)
|
|
469
|
+
|
|
470
|
+
# Create vector search configuration
|
|
471
|
+
vector_search = VectorSearch(
|
|
472
|
+
algorithms=[algorithm_config],
|
|
473
|
+
profiles=[
|
|
474
|
+
VectorSearchProfile(
|
|
475
|
+
name="vector-profile",
|
|
476
|
+
algorithm_configuration_name=algorithm_config.name,
|
|
477
|
+
)
|
|
478
|
+
],
|
|
479
|
+
)
|
|
480
|
+
|
|
481
|
+
# Create the search index
|
|
482
|
+
index = SearchIndex(
|
|
483
|
+
name=index_name, fields=index_fields, vector_search=vector_search
|
|
484
|
+
)
|
|
485
|
+
|
|
486
|
+
try:
|
|
487
|
+
result = index_client.create_or_update_index(index)
|
|
488
|
+
return {
|
|
489
|
+
"index_name": result.name,
|
|
490
|
+
"vector_fields": vector_fields,
|
|
491
|
+
"vector_dimensions": vector_dimensions,
|
|
492
|
+
"algorithm": algorithm_kind,
|
|
493
|
+
"created": True,
|
|
494
|
+
}
|
|
495
|
+
except Exception as e:
|
|
496
|
+
return {"error": str(e), "created": False}
|
flock/core/tools/basic_tools.py
CHANGED
|
@@ -1,9 +1,10 @@
|
|
|
1
1
|
"""This module contains basic agentic tools for performing various tasks."""
|
|
2
2
|
|
|
3
3
|
import importlib
|
|
4
|
+
import json
|
|
4
5
|
import os
|
|
5
6
|
import re
|
|
6
|
-
from typing import Literal
|
|
7
|
+
from typing import Any, Literal
|
|
7
8
|
|
|
8
9
|
from flock.core.interpreter.python_interpreter import PythonInterpreter
|
|
9
10
|
from flock.core.logging.trace_and_logged import traced_and_logged
|
|
@@ -201,8 +202,6 @@ def extract_numbers(text: str) -> list[float]:
|
|
|
201
202
|
|
|
202
203
|
@traced_and_logged
|
|
203
204
|
def json_parse_safe(text: str) -> dict:
|
|
204
|
-
import json
|
|
205
|
-
|
|
206
205
|
try:
|
|
207
206
|
result = json.loads(text)
|
|
208
207
|
return result
|
|
@@ -221,9 +220,98 @@ def save_to_file(content: str, filename: str):
|
|
|
221
220
|
|
|
222
221
|
@traced_and_logged
|
|
223
222
|
def read_from_file(filename: str) -> str:
|
|
223
|
+
with open(filename, encoding="utf-8") as file:
|
|
224
|
+
return file.read()
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
@traced_and_logged
|
|
228
|
+
def json_search(
|
|
229
|
+
json_file_path: str, search_query: str, case_sensitive: bool = False
|
|
230
|
+
) -> list:
|
|
231
|
+
"""Search a JSON file for objects containing the specified search query.
|
|
232
|
+
|
|
233
|
+
Args:
|
|
234
|
+
json_file_path (str): Path to the JSON file to search
|
|
235
|
+
search_query (str): Text to search for within the JSON objects
|
|
236
|
+
case_sensitive (bool, optional): Whether to perform a case-sensitive search. Defaults to False.
|
|
237
|
+
|
|
238
|
+
Returns:
|
|
239
|
+
list: List of JSON objects (as dicts) that contain the search query
|
|
240
|
+
|
|
241
|
+
Example:
|
|
242
|
+
>>> matching_tickets = json_search("tickets.json", "error 404")
|
|
243
|
+
>>> print(
|
|
244
|
+
... f"Found {len(matching_tickets)} tickets mentioning '404 error'"
|
|
245
|
+
... )
|
|
246
|
+
"""
|
|
224
247
|
try:
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
248
|
+
# Read the JSON file
|
|
249
|
+
file_content = read_from_file(json_file_path)
|
|
250
|
+
|
|
251
|
+
# Parse the JSON content
|
|
252
|
+
json_data = json_parse_safe(file_content)
|
|
253
|
+
|
|
254
|
+
# Convert search query to lowercase if case-insensitive search
|
|
255
|
+
if not case_sensitive:
|
|
256
|
+
search_query = search_query.lower()
|
|
257
|
+
|
|
258
|
+
results = []
|
|
259
|
+
|
|
260
|
+
# Determine if the JSON root is an object or array
|
|
261
|
+
if isinstance(json_data, dict):
|
|
262
|
+
# Handle case where root is a dictionary object
|
|
263
|
+
for key, value in json_data.items():
|
|
264
|
+
if isinstance(value, list):
|
|
265
|
+
# If this key contains a list of objects, search within them
|
|
266
|
+
matching_items = _search_in_list(
|
|
267
|
+
value, search_query, case_sensitive
|
|
268
|
+
)
|
|
269
|
+
results.extend(matching_items)
|
|
270
|
+
elif _contains_text(value, search_query, case_sensitive):
|
|
271
|
+
# The entire object matches
|
|
272
|
+
results.append(json_data)
|
|
273
|
+
break
|
|
274
|
+
elif isinstance(json_data, list):
|
|
275
|
+
# Handle case where root is an array
|
|
276
|
+
matching_items = _search_in_list(
|
|
277
|
+
json_data, search_query, case_sensitive
|
|
278
|
+
)
|
|
279
|
+
results.extend(matching_items)
|
|
280
|
+
|
|
281
|
+
return results
|
|
282
|
+
|
|
283
|
+
except Exception as e:
|
|
284
|
+
return [{"error": f"Error searching JSON file: {e!s}"}]
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
def _search_in_list(
|
|
288
|
+
items: list, search_query: str, case_sensitive: bool
|
|
289
|
+
) -> list:
|
|
290
|
+
"""Helper function to search for text in a list of items."""
|
|
291
|
+
matching_items = []
|
|
292
|
+
for item in items:
|
|
293
|
+
if _contains_text(item, search_query, case_sensitive):
|
|
294
|
+
matching_items.append(item)
|
|
295
|
+
return matching_items
|
|
296
|
+
|
|
297
|
+
|
|
298
|
+
def _contains_text(obj: Any, search_query: str, case_sensitive: bool) -> bool:
|
|
299
|
+
"""Recursively check if an object contains the search query in any of its string values."""
|
|
300
|
+
if isinstance(obj, str):
|
|
301
|
+
# For string values, check if they contain the search query
|
|
302
|
+
if case_sensitive:
|
|
303
|
+
return search_query in obj
|
|
304
|
+
else:
|
|
305
|
+
return search_query in obj.lower()
|
|
306
|
+
elif isinstance(obj, dict):
|
|
307
|
+
# For dictionaries, check each value
|
|
308
|
+
for value in obj.values():
|
|
309
|
+
if _contains_text(value, search_query, case_sensitive):
|
|
310
|
+
return True
|
|
311
|
+
elif isinstance(obj, list):
|
|
312
|
+
# For lists, check each item
|
|
313
|
+
for item in obj:
|
|
314
|
+
if _contains_text(item, search_query, case_sensitive):
|
|
315
|
+
return True
|
|
316
|
+
# For other types (numbers, booleans, None), return False
|
|
317
|
+
return False
|
|
@@ -6,6 +6,7 @@ from typing import Any
|
|
|
6
6
|
from pydantic import Field
|
|
7
7
|
|
|
8
8
|
from flock.core import FlockModule, FlockModuleConfig
|
|
9
|
+
from flock.core.context.context import FlockContext
|
|
9
10
|
|
|
10
11
|
|
|
11
12
|
class CallbackModuleConfig(FlockModuleConfig):
|
|
@@ -44,13 +45,21 @@ class CallbackModule(FlockModule):
|
|
|
44
45
|
description="Callback module configuration",
|
|
45
46
|
)
|
|
46
47
|
|
|
47
|
-
async def pre_initialize(
|
|
48
|
+
async def pre_initialize(
|
|
49
|
+
self,
|
|
50
|
+
agent: Any,
|
|
51
|
+
inputs: dict[str, Any],
|
|
52
|
+
context: FlockContext | None = None,
|
|
53
|
+
) -> None:
|
|
48
54
|
"""Run initialize callback if configured."""
|
|
49
55
|
if self.config.initialize_callback:
|
|
50
56
|
await self.config.initialize_callback(agent, inputs)
|
|
51
57
|
|
|
52
58
|
async def pre_evaluate(
|
|
53
|
-
self,
|
|
59
|
+
self,
|
|
60
|
+
agent: Any,
|
|
61
|
+
inputs: dict[str, Any],
|
|
62
|
+
context: FlockContext | None = None,
|
|
54
63
|
) -> dict[str, Any]:
|
|
55
64
|
"""Run evaluate callback if configured."""
|
|
56
65
|
if self.config.evaluate_callback:
|
|
@@ -58,14 +67,22 @@ class CallbackModule(FlockModule):
|
|
|
58
67
|
return inputs
|
|
59
68
|
|
|
60
69
|
async def pre_terminate(
|
|
61
|
-
self,
|
|
70
|
+
self,
|
|
71
|
+
agent: Any,
|
|
72
|
+
inputs: dict[str, Any],
|
|
73
|
+
result: dict[str, Any],
|
|
74
|
+
context: FlockContext | None = None,
|
|
62
75
|
) -> None:
|
|
63
76
|
"""Run terminate callback if configured."""
|
|
64
77
|
if self.config.terminate_callback:
|
|
65
78
|
await self.config.terminate_callback(agent, inputs, result)
|
|
66
79
|
|
|
67
80
|
async def on_error(
|
|
68
|
-
self,
|
|
81
|
+
self,
|
|
82
|
+
agent: Any,
|
|
83
|
+
error: Exception,
|
|
84
|
+
inputs: dict[str, Any],
|
|
85
|
+
context: FlockContext | None = None,
|
|
69
86
|
) -> None:
|
|
70
87
|
"""Run error callback if configured."""
|
|
71
88
|
if self.config.on_error_callback:
|
|
@@ -7,6 +7,7 @@ from pydantic import Field
|
|
|
7
7
|
from tqdm import tqdm
|
|
8
8
|
|
|
9
9
|
from flock.core import FlockAgent, FlockModule, FlockModuleConfig
|
|
10
|
+
from flock.core.context.context import FlockContext
|
|
10
11
|
from flock.core.logging.logging import get_logger
|
|
11
12
|
from flock.modules.memory.memory_parser import MemoryMappingParser
|
|
12
13
|
from flock.modules.memory.memory_storage import FlockMemoryStore, MemoryEntry
|
|
@@ -77,7 +78,10 @@ class MemoryModule(FlockModule):
|
|
|
77
78
|
)
|
|
78
79
|
|
|
79
80
|
async def initialize(
|
|
80
|
-
self,
|
|
81
|
+
self,
|
|
82
|
+
agent: FlockAgent,
|
|
83
|
+
inputs: dict[str, Any],
|
|
84
|
+
context: FlockContext | None = None,
|
|
81
85
|
) -> None:
|
|
82
86
|
"""Initialize memory store if needed."""
|
|
83
87
|
if not self.memory_store:
|
|
@@ -92,7 +96,10 @@ class MemoryModule(FlockModule):
|
|
|
92
96
|
logger.debug(f"Initialized memory module for agent {agent.name}")
|
|
93
97
|
|
|
94
98
|
async def pre_evaluate(
|
|
95
|
-
self,
|
|
99
|
+
self,
|
|
100
|
+
agent: FlockAgent,
|
|
101
|
+
inputs: dict[str, Any],
|
|
102
|
+
context: FlockContext | None = None,
|
|
96
103
|
) -> dict[str, Any]:
|
|
97
104
|
"""Check memory before evaluation."""
|
|
98
105
|
if not self.memory_store:
|
|
@@ -214,7 +221,11 @@ class MemoryModule(FlockModule):
|
|
|
214
221
|
logger.warning(f"Memory storage failed: {e}", agent=agent.name)
|
|
215
222
|
|
|
216
223
|
async def post_evaluate(
|
|
217
|
-
self,
|
|
224
|
+
self,
|
|
225
|
+
agent: FlockAgent,
|
|
226
|
+
inputs: dict[str, Any],
|
|
227
|
+
result: dict[str, Any],
|
|
228
|
+
context: FlockContext | None = None,
|
|
218
229
|
) -> dict[str, Any]:
|
|
219
230
|
"""Store results in memory after evaluation."""
|
|
220
231
|
if not self.memory_store:
|
|
@@ -229,7 +240,11 @@ class MemoryModule(FlockModule):
|
|
|
229
240
|
return result
|
|
230
241
|
|
|
231
242
|
async def terminate(
|
|
232
|
-
self,
|
|
243
|
+
self,
|
|
244
|
+
agent: Any,
|
|
245
|
+
inputs: dict[str, Any],
|
|
246
|
+
result: dict[str, Any],
|
|
247
|
+
context: FlockContext | None = None,
|
|
233
248
|
) -> None:
|
|
234
249
|
"""Save memory store if configured."""
|
|
235
250
|
if self.config.save_after_update and self.memory_store:
|
|
@@ -8,6 +8,7 @@ from typing import Any
|
|
|
8
8
|
from pydantic import Field
|
|
9
9
|
|
|
10
10
|
from flock.core import FlockAgent
|
|
11
|
+
from flock.core.context.context import FlockContext
|
|
11
12
|
from flock.core.flock_module import FlockModule, FlockModuleConfig
|
|
12
13
|
from flock.core.logging.formatters.themed_formatter import (
|
|
13
14
|
ThemedAgentResultFormatter,
|
|
@@ -58,6 +59,10 @@ class OutputModuleConfig(FlockModuleConfig):
|
|
|
58
59
|
default=False,
|
|
59
60
|
description="Whether to suppress output",
|
|
60
61
|
)
|
|
62
|
+
print_context: bool = Field(
|
|
63
|
+
default=False,
|
|
64
|
+
description="Whether to print the context",
|
|
65
|
+
)
|
|
61
66
|
|
|
62
67
|
|
|
63
68
|
class OutputModule(FlockModule):
|
|
@@ -159,12 +164,18 @@ class OutputModule(FlockModule):
|
|
|
159
164
|
json.dump(output_data, f, indent=2)
|
|
160
165
|
|
|
161
166
|
async def post_evaluate(
|
|
162
|
-
self,
|
|
167
|
+
self,
|
|
168
|
+
agent: FlockAgent,
|
|
169
|
+
inputs: dict[str, Any],
|
|
170
|
+
result: dict[str, Any],
|
|
171
|
+
context: FlockContext | None = None,
|
|
163
172
|
) -> dict[str, Any]:
|
|
164
173
|
"""Format and display the output."""
|
|
165
174
|
logger.debug("Formatting and displaying output")
|
|
166
175
|
if self.config.no_output:
|
|
167
176
|
return result
|
|
177
|
+
if self.config.print_context:
|
|
178
|
+
result["context"] = context
|
|
168
179
|
# Display the result using the formatter
|
|
169
180
|
self._formatter.display_result(result, agent.name)
|
|
170
181
|
|
|
@@ -11,6 +11,7 @@ import numpy as np
|
|
|
11
11
|
import psutil
|
|
12
12
|
from pydantic import BaseModel, Field, validator
|
|
13
13
|
|
|
14
|
+
from flock.core.context.context import FlockContext
|
|
14
15
|
from flock.core.flock_agent import FlockAgent
|
|
15
16
|
from flock.core.flock_module import FlockModule, FlockModuleConfig
|
|
16
17
|
|
|
@@ -222,7 +223,11 @@ class MetricsModule(FlockModule):
|
|
|
222
223
|
return stats
|
|
223
224
|
|
|
224
225
|
async def terminate(
|
|
225
|
-
self,
|
|
226
|
+
self,
|
|
227
|
+
agent: FlockAgent,
|
|
228
|
+
inputs: dict[str, Any],
|
|
229
|
+
result: dict[str, Any],
|
|
230
|
+
context: FlockContext | None = None,
|
|
226
231
|
) -> None:
|
|
227
232
|
"""Clean up and final metric recording."""
|
|
228
233
|
if self.config.storage_type == "json":
|
|
@@ -331,7 +336,10 @@ class MetricsModule(FlockModule):
|
|
|
331
336
|
return False
|
|
332
337
|
|
|
333
338
|
async def initialize(
|
|
334
|
-
self,
|
|
339
|
+
self,
|
|
340
|
+
agent: FlockAgent,
|
|
341
|
+
inputs: dict[str, Any],
|
|
342
|
+
context: FlockContext | None = None,
|
|
335
343
|
) -> None:
|
|
336
344
|
"""Initialize metrics collection."""
|
|
337
345
|
self._start_time = time.time()
|
|
@@ -370,7 +378,10 @@ class MetricsModule(FlockModule):
|
|
|
370
378
|
return token_count, total_cost
|
|
371
379
|
|
|
372
380
|
async def pre_evaluate(
|
|
373
|
-
self,
|
|
381
|
+
self,
|
|
382
|
+
agent: FlockAgent,
|
|
383
|
+
inputs: dict[str, Any],
|
|
384
|
+
context: FlockContext | None = None,
|
|
374
385
|
) -> dict[str, Any]:
|
|
375
386
|
"""Record pre-evaluation metrics."""
|
|
376
387
|
if self.config.collect_token_usage:
|
|
@@ -408,7 +419,11 @@ class MetricsModule(FlockModule):
|
|
|
408
419
|
return inputs
|
|
409
420
|
|
|
410
421
|
async def post_evaluate(
|
|
411
|
-
self,
|
|
422
|
+
self,
|
|
423
|
+
agent: FlockAgent,
|
|
424
|
+
inputs: dict[str, Any],
|
|
425
|
+
result: dict[str, Any],
|
|
426
|
+
context: FlockContext | None = None,
|
|
412
427
|
) -> dict[str, Any]:
|
|
413
428
|
"""Record post-evaluation metrics."""
|
|
414
429
|
if self.config.collect_timing and self._start_time:
|
|
@@ -463,7 +478,11 @@ class MetricsModule(FlockModule):
|
|
|
463
478
|
return result
|
|
464
479
|
|
|
465
480
|
async def on_error(
|
|
466
|
-
self,
|
|
481
|
+
self,
|
|
482
|
+
agent: FlockAgent,
|
|
483
|
+
error: Exception,
|
|
484
|
+
inputs: dict[str, Any],
|
|
485
|
+
context: FlockContext | None = None,
|
|
467
486
|
) -> None:
|
|
468
487
|
"""Record error metrics."""
|
|
469
488
|
self._record_metric(
|
flock/modules/zep/zep_module.py
CHANGED
|
@@ -5,6 +5,7 @@ from pydantic import Field
|
|
|
5
5
|
from zep_python.client import Zep
|
|
6
6
|
from zep_python.types import Message as ZepMessage, SessionSearchResult
|
|
7
7
|
|
|
8
|
+
from flock.core.context.context import FlockContext
|
|
8
9
|
from flock.core.flock_agent import FlockAgent
|
|
9
10
|
from flock.core.flock_module import FlockModule, FlockModuleConfig
|
|
10
11
|
from flock.core.logging.logging import get_logger
|
|
@@ -139,7 +140,11 @@ class ZepModule(FlockModule):
|
|
|
139
140
|
return response.results
|
|
140
141
|
|
|
141
142
|
async def post_evaluate(
|
|
142
|
-
self,
|
|
143
|
+
self,
|
|
144
|
+
agent: FlockAgent,
|
|
145
|
+
inputs: dict[str, Any],
|
|
146
|
+
result: dict[str, Any],
|
|
147
|
+
context: FlockContext | None = None,
|
|
143
148
|
) -> dict[str, Any]:
|
|
144
149
|
"""Format and display the output."""
|
|
145
150
|
if not self.config.enable_write:
|
|
@@ -152,7 +157,10 @@ class ZepModule(FlockModule):
|
|
|
152
157
|
return result
|
|
153
158
|
|
|
154
159
|
async def pre_evaluate(
|
|
155
|
-
self,
|
|
160
|
+
self,
|
|
161
|
+
agent: FlockAgent,
|
|
162
|
+
inputs: dict[str, Any],
|
|
163
|
+
context: FlockContext | None = None,
|
|
156
164
|
) -> dict[str, Any]:
|
|
157
165
|
"""Format and display the output."""
|
|
158
166
|
if not self.config.enable_read:
|
flock/workflow/activities.py
CHANGED
|
@@ -50,7 +50,7 @@ async def run_agent(context: FlockContext) -> dict:
|
|
|
50
50
|
# Create a nested span for this iteration.
|
|
51
51
|
with tracer.start_as_current_span("agent_iteration") as iter_span:
|
|
52
52
|
iter_span.set_attribute("agent.name", agent.name)
|
|
53
|
-
|
|
53
|
+
agent.context = context
|
|
54
54
|
# Resolve inputs for the agent.
|
|
55
55
|
agent_inputs = resolve_inputs(
|
|
56
56
|
agent.input, context, previous_agent_name
|
|
@@ -160,7 +160,7 @@ async def run_agent(context: FlockContext) -> dict:
|
|
|
160
160
|
agent.name,
|
|
161
161
|
result,
|
|
162
162
|
timestamp=datetime.now().isoformat(),
|
|
163
|
-
hand_off=handoff_data,
|
|
163
|
+
hand_off=handoff_data.model_dump(),
|
|
164
164
|
called_from=previous_agent_name,
|
|
165
165
|
)
|
|
166
166
|
previous_agent_name = agent.name
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: flock-core
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.17
|
|
4
4
|
Summary: Declarative LLM Orchestration at Scale
|
|
5
5
|
Author-email: Andre Ratzenberger <andre.ratzenberger@whiteduck.de>
|
|
6
6
|
License-File: LICENSE
|
|
@@ -8,6 +8,7 @@ Classifier: License :: OSI Approved :: MIT License
|
|
|
8
8
|
Classifier: Operating System :: OS Independent
|
|
9
9
|
Classifier: Programming Language :: Python :: 3
|
|
10
10
|
Requires-Python: >=3.10
|
|
11
|
+
Requires-Dist: azure-search-documents>=11.5.2
|
|
11
12
|
Requires-Dist: chromadb>=0.6.3
|
|
12
13
|
Requires-Dist: cloudpickle>=3.1.1
|
|
13
14
|
Requires-Dist: devtools>=0.12.2
|
|
@@ -29,6 +30,7 @@ Requires-Dist: prometheus-client>=0.21.1
|
|
|
29
30
|
Requires-Dist: pydantic>=2.10.5
|
|
30
31
|
Requires-Dist: python-box>=7.3.2
|
|
31
32
|
Requires-Dist: python-decouple>=3.8
|
|
33
|
+
Requires-Dist: python-dotenv>=1.0.1
|
|
32
34
|
Requires-Dist: questionary>=2.1.0
|
|
33
35
|
Requires-Dist: rich>=13.9.4
|
|
34
36
|
Requires-Dist: sentence-transformers>=3.4.1
|
|
@@ -8,16 +8,16 @@ flock/cli/load_examples.py,sha256=DkeLUlrb7rGx3nZ04aADU9HXXu5mZTf_DBwT0xhzIv4,7
|
|
|
8
8
|
flock/cli/load_flock.py,sha256=3JdECvt5X7uyOG2vZS3-Zk5C5SI_84_QZjcsB3oJmfA,932
|
|
9
9
|
flock/cli/load_release_notes.py,sha256=qFcgUrMddAE_TP6x1P-6ZywTUjTknfhTDW5LTxtg1yk,599
|
|
10
10
|
flock/cli/settings.py,sha256=DkeLUlrb7rGx3nZ04aADU9HXXu5mZTf_DBwT0xhzIv4,7
|
|
11
|
-
flock/cli/assets/release_notes.md,sha256
|
|
11
|
+
flock/cli/assets/release_notes.md,sha256=bqnk50jxM3w5uY44Dc7MkdT8XmRREFxrVBAG9XCOSSU,4896
|
|
12
12
|
flock/core/__init__.py,sha256=mPlvKc0SxC2qCvSlgYeP_7EyV8ptmdn24NO8mlQoCSo,559
|
|
13
13
|
flock/core/flock.py,sha256=IURlcuNvdsnqKkvgXtX4v_pGWQ8Lfb60X--MT0zvxHo,19881
|
|
14
|
-
flock/core/flock_agent.py,sha256=
|
|
14
|
+
flock/core/flock_agent.py,sha256=QPyRSa1X_aAK2MSgqLNHBiL-_cnYHOSSnrFup2YTzss,12509
|
|
15
15
|
flock/core/flock_api.py,sha256=2rHnmEdtT5KPZYwGesRT7LqwbrgKClODHT-O56u7pcQ,7140
|
|
16
16
|
flock/core/flock_evaluator.py,sha256=j7riJj_KsWoBnKmLiGp-U0CRhxDyJbgEdLGN26tfKm8,1588
|
|
17
|
-
flock/core/flock_factory.py,sha256=
|
|
18
|
-
flock/core/flock_module.py,sha256=
|
|
17
|
+
flock/core/flock_factory.py,sha256=7nV0WAh197INdBckJ-NhnhSSZOSzZC1MlYfeZm2x8Xc,2750
|
|
18
|
+
flock/core/flock_module.py,sha256=96aFVYAgwpKN53xGbivQDUpikOYGFCxK5mqhclOcxY0,3003
|
|
19
19
|
flock/core/flock_router.py,sha256=A5GaxcGvtiFlRLHBTW7okh5RDm3BdKam2uXvRHRaj7k,2187
|
|
20
|
-
flock/core/context/context.py,sha256=
|
|
20
|
+
flock/core/context/context.py,sha256=3sj5BrnY7OBG8Xk-uoT0yPKQjCzUU6a3VH26xuHo2KI,6407
|
|
21
21
|
flock/core/context/context_manager.py,sha256=FANSWa6DEhdhtZ7t_9Gza0v80UdpoDOhHbfVOccmjkA,1181
|
|
22
22
|
flock/core/context/context_vars.py,sha256=zYTMi9b6mNSSEHowEQUOTpEDurmAjaUcyBCgfKY6-cU,300
|
|
23
23
|
flock/core/execution/local_executor.py,sha256=rnIQvaJOs6zZORUcR3vvyS6LPREDJTjaygl_Db0M8ao,952
|
|
@@ -40,7 +40,8 @@ flock/core/mixin/prompt_parser.py,sha256=eOqI-FK3y17gVqpc_y5GF-WmK1Jv8mFlkZxTcgw
|
|
|
40
40
|
flock/core/registry/agent_registry.py,sha256=TUClh9e3eA6YzZC1CMTlsTPvQeqb9jYHewi-zPpcWM8,4987
|
|
41
41
|
flock/core/serialization/secure_serializer.py,sha256=n5-zRvvXddgJv1FFHsaQ2wuYdL3WUSGPvG_LGaffEJo,6144
|
|
42
42
|
flock/core/serialization/serializable.py,sha256=SymJ0YrjBx48mOBItYSqoRpKuzIc4vKWRS6ScTzre7s,2573
|
|
43
|
-
flock/core/tools/
|
|
43
|
+
flock/core/tools/azure_tools.py,sha256=9Bi6IrB5pzBTBhBSxpCVMgx8HBud8nl4gDp8aN0NT6c,17031
|
|
44
|
+
flock/core/tools/basic_tools.py,sha256=hEG14jNZ2itVvubCHTfsWkuJK6yuNwBtuFj2Js0VHZs,9043
|
|
44
45
|
flock/core/tools/llm_tools.py,sha256=Bdt4Dpur5dGpxd2KFEQyxjfZazvW1HCDKY6ydMj6UgQ,21811
|
|
45
46
|
flock/core/tools/markdown_tools.py,sha256=W6fGM48yGHbifVlaOk1jOtVcybfRbRmf20VbDOZv8S4,6031
|
|
46
47
|
flock/core/tools/dev_tools/github.py,sha256=a2OTPXS7kWOVA4zrZHynQDcsmEi4Pac5MfSjQOLePzA,5308
|
|
@@ -53,13 +54,13 @@ flock/evaluators/memory/memory_evaluator.py,sha256=SmerXyNaqm8DTV0yw-WqWkn9DXIf6
|
|
|
53
54
|
flock/evaluators/natural_language/natural_language_evaluator.py,sha256=6nVEeh8_uwv_h-d3FWlA0GbzDzRtdhvxCGKirHtyvOU,2012
|
|
54
55
|
flock/evaluators/zep/zep_evaluator.py,sha256=9NOELl7JAuUcx_FQrxY6b-_vN3MjwDyW7ZppPIGeCFc,1954
|
|
55
56
|
flock/modules/azure-search/azure_search_module.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
56
|
-
flock/modules/callback/callback_module.py,sha256=
|
|
57
|
-
flock/modules/memory/memory_module.py,sha256=
|
|
57
|
+
flock/modules/callback/callback_module.py,sha256=volGGgHtY19qj1wHR6m5a_hmXSbV3Ca3uY6I76YmcfU,2833
|
|
58
|
+
flock/modules/memory/memory_module.py,sha256=dZ30eOFqIlAz0a5IKJMoXgJ-VyPEqApAOX0OQjhGA1I,14733
|
|
58
59
|
flock/modules/memory/memory_parser.py,sha256=FLH7GL8XThvHiCMfX3eQH7Sz-f62fzhAUmO6_gaDI7U,4372
|
|
59
60
|
flock/modules/memory/memory_storage.py,sha256=CNcLDMmvv0x7Z3YMKr6VveS_VCa7rKPw8l2d-XgqokA,27246
|
|
60
|
-
flock/modules/output/output_module.py,sha256=
|
|
61
|
-
flock/modules/performance/metrics_module.py,sha256=
|
|
62
|
-
flock/modules/zep/zep_module.py,sha256=
|
|
61
|
+
flock/modules/output/output_module.py,sha256=MPs5QV5g5DZQGqR07dS0eBug_TGpkdjX5zTrcrDphq8,7467
|
|
62
|
+
flock/modules/performance/metrics_module.py,sha256=UD9OjY4-zAvauMD7YyDYqE1gyIhzpdr3JkBT8j9knxY,16790
|
|
63
|
+
flock/modules/zep/zep_module.py,sha256=x7JG6O6xnwwum0RETIqKYbA3xzdcvX2aUuns0Cl0c2Q,6014
|
|
63
64
|
flock/platform/docker_tools.py,sha256=fpA7-6rJBjPOUBLdQP4ny2QPgJ_042nmqRn5GtKnoYw,1445
|
|
64
65
|
flock/platform/jaeger_install.py,sha256=MyOMJQx4TQSMYvdUJxfiGSo3YCtsfkbNXcAcQ9bjETA,2898
|
|
65
66
|
flock/routers/__init__.py,sha256=w9uL34Auuo26-q_EGlE8Z9iHsw6S8qutTAH_ZI7pn7M,39
|
|
@@ -407,12 +408,12 @@ flock/themes/zenburned.toml,sha256=UEmquBbcAO3Zj652XKUwCsNoC2iQSlIh-q5c6DH-7Kc,1
|
|
|
407
408
|
flock/themes/zenwritten-dark.toml,sha256=To5l6520_3UqAGiEumpzGWsHhXxqu9ThrMildXKgIO0,1669
|
|
408
409
|
flock/themes/zenwritten-light.toml,sha256=G1iEheCPfBNsMTGaVpEVpDzYBHA_T-MV27rolUYolmE,1666
|
|
409
410
|
flock/workflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
410
|
-
flock/workflow/activities.py,sha256=
|
|
411
|
+
flock/workflow/activities.py,sha256=yah-lHjMW6_Ww1gt7hMXBis1cJRlcbHx0uLsMB9oNZ0,9066
|
|
411
412
|
flock/workflow/agent_activities.py,sha256=NhBZscflEf2IMfSRa_pBM_TRP7uVEF_O0ROvWZ33eDc,963
|
|
412
413
|
flock/workflow/temporal_setup.py,sha256=VWBgmBgfTBjwM5ruS_dVpA5AVxx6EZ7oFPGw4j3m0l0,1091
|
|
413
414
|
flock/workflow/workflow.py,sha256=I9MryXW_bqYVTHx-nl2epbTqeRy27CAWHHA7ZZA0nAk,1696
|
|
414
|
-
flock_core-0.3.
|
|
415
|
-
flock_core-0.3.
|
|
416
|
-
flock_core-0.3.
|
|
417
|
-
flock_core-0.3.
|
|
418
|
-
flock_core-0.3.
|
|
415
|
+
flock_core-0.3.17.dist-info/METADATA,sha256=UBhR7vepYychz81nFEj8gvsjukAXFX4kv25HKiCk2Bw,20584
|
|
416
|
+
flock_core-0.3.17.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
417
|
+
flock_core-0.3.17.dist-info/entry_points.txt,sha256=rWaS5KSpkTmWySURGFZk6PhbJ87TmvcFQDi2uzjlagQ,37
|
|
418
|
+
flock_core-0.3.17.dist-info/licenses/LICENSE,sha256=iYEqWy0wjULzM9GAERaybP4LBiPeu7Z1NEliLUdJKSc,1072
|
|
419
|
+
flock_core-0.3.17.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|