naive-knowledge-base 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,390 @@
1
+ from copy import deepcopy
2
+ from typing import Dict, List, Optional, Union, Any, ClassVar, Type
3
+ import requests
4
+ from smolagents import ApiModel, ChatMessage, MessageRole
5
+ from smolagents.tools import Tool
6
+ from smolagents.models import get_clean_message_list, tool_role_conversions
7
+ from openai.types.chat import ChatCompletion
8
+ import os
9
+ from dotenv import load_dotenv
10
+ load_dotenv()
11
+
12
+ def _request_access_token() -> str:
13
+ """Request an access token from the Flow API"""
14
+ response = requests.post(
15
+ "https://flow.ciandt.com/auth-engine-api/v1/api-key/token",
16
+ headers={"Content-Type": "application/json", "FlowTenant": os.getenv("FLOW_TENANT", "flowteam")},
17
+ json={"clientId": os.getenv("FLOW_CLIENT_ID"), "clientSecret": os.getenv("FLOW_CLIENT_SECRET"), "appToAccess": "llm-api"}
18
+ )
19
+
20
+ return response.json().get("access_token")
21
+
22
+ _access_token = _request_access_token()
23
+
24
+ class ModelConfig:
25
+ """Configuration for a specific model type"""
26
+ def __init__(
27
+ self,
28
+ api_url: str,
29
+ response_handler: str,
30
+ default_params: Dict[str, Any],
31
+ requires_anthropic_format: bool = False,
32
+ requires_system_message_separation: bool = False,
33
+ ):
34
+ self.api_url = api_url
35
+ self.response_handler = response_handler # 'claude' or 'openai'
36
+ self.default_params = default_params
37
+ self.requires_anthropic_format = requires_anthropic_format
38
+ self.requires_system_message_separation = requires_system_message_separation
39
+
40
+
41
+ class FlowApiModel(ApiModel):
42
+ """
43
+ A unified API model wrapper for Flow-based models (Claude, GPT, DeepSeek),
44
+ providing a simple interface to interact with different model types.
45
+ """
46
+ # Registry of model-specific configurations
47
+ MODEL_REGISTRY: ClassVar[Dict[str, ModelConfig]] = {
48
+ # Claude models
49
+ "anthropic.claude-37-sonnet": ModelConfig(
50
+ api_url="https://flow.ciandt.com/ai-orchestration-api/v1/bedrock/invoke",
51
+ response_handler="claude",
52
+ default_params={
53
+ "anthropic_version": "bedrock-2023-05-31",
54
+ "max_tokens": 20000,
55
+ "temperature": 0.5,
56
+ "top_p": 0.9,
57
+ "top_k": 250,
58
+ },
59
+ requires_anthropic_format=True,
60
+ requires_system_message_separation=True,
61
+ ),
62
+
63
+ # DeepSeek models
64
+ "DeepSeek-R1": ModelConfig(
65
+ api_url="https://flow.ciandt.com/ai-orchestration-api/v1/foundry/chat/completions",
66
+ response_handler="openai",
67
+ default_params={
68
+ "max_tokens": 8192,
69
+ },
70
+ ),
71
+
72
+ # GPT models
73
+ "o3-mini": ModelConfig(
74
+ api_url="https://flow.ciandt.com/ai-orchestration-api/v1/openai/chat/completions",
75
+ response_handler="openai",
76
+ default_params={
77
+ "max_tokens": 8192,
78
+ "top_p": 1.0,
79
+ "reasoning_effort": "medium",
80
+ "presence_penalty": 0,
81
+ "frequency_penalty": 0,
82
+ },
83
+ ),
84
+
85
+ "gpt-4o": ModelConfig(
86
+ api_url="https://flow.ciandt.com/ai-orchestration-api/v1/openai/chat/completions",
87
+ response_handler="openai",
88
+ default_params={
89
+ "max_tokens": 8192,
90
+ "temperature": 0.7,
91
+ "top_p": 1.0,
92
+ "presence_penalty": 0,
93
+ "frequency_penalty": 0,
94
+ },
95
+ ),
96
+
97
+ "gpt-4.1": ModelConfig(
98
+ api_url="https://flow.ciandt.com/ai-orchestration-api/v1/openai/chat/completions",
99
+ response_handler="openai",
100
+ default_params={
101
+ "max_tokens": 8192,
102
+ "temperature": 0.7,
103
+ "top_p": 1.0,
104
+ "presence_penalty": 0,
105
+ "frequency_penalty": 0,
106
+ },
107
+ ),
108
+
109
+ "o1-mini": ModelConfig(
110
+ api_url="https://flow.ciandt.com/ai-orchestration-api/v1/openai/chat/completions",
111
+ response_handler="openai",
112
+ default_params={
113
+ "reasoning_effort": "medium",
114
+ "top_p": 1.0,
115
+ "presence_penalty": 0,
116
+ "frequency_penalty": 0,
117
+ },
118
+ ),
119
+ }
120
+
121
+ @classmethod
122
+ def get_model_config(cls, model_id: str) -> ModelConfig:
123
+ """Get configuration for a specific model ID"""
124
+ if model_id not in cls.MODEL_REGISTRY:
125
+ raise ValueError(f"Unsupported model: {model_id}. Available models: {list(cls.MODEL_REGISTRY.keys())}")
126
+ return cls.MODEL_REGISTRY[model_id]
127
+
128
+ @classmethod
129
+ def create(cls, model_id: str, **kwargs) -> 'FlowApiModel':
130
+ """Factory method to create the appropriate model instance"""
131
+ return cls(model_id=model_id, **kwargs)
132
+
133
+ def __init__(
134
+ self,
135
+ model_id: str,
136
+ api_key: str = _access_token,
137
+ flow_tenant: str = os.getenv("FLOW_TENANT", "flowteam"),
138
+ flow_agent: str = "feature_refinement_agent",
139
+ flow_operation_id: str = "default_operation",
140
+ **kwargs,
141
+ ):
142
+ super().__init__(model_id=model_id, **kwargs)
143
+
144
+ # Get model-specific configuration
145
+ self.config = self.get_model_config(model_id)
146
+
147
+ # Common attributes for all models
148
+ self.api_key = api_key
149
+ self.flow_tenant = flow_tenant
150
+ self.flow_agent = flow_agent
151
+ self.flow_operation_id = flow_operation_id
152
+ self.model_name = model_id # For backward compatibility
153
+
154
+ # Load model-specific default parameters
155
+ for key, value in self.config.default_params.items():
156
+ setattr(self, key, kwargs.get(key, value))
157
+
158
+ # Store additional kwargs
159
+ self.kwargs = kwargs
160
+
161
+ # Use model-specific API URL
162
+ self.api_url = kwargs.get("api_url", self.config.api_url)
163
+
164
+ def create_client(self):
165
+ """No additional client setup is required for Flow models."""
166
+ return None
167
+
168
+ def _create_headers(self) -> dict:
169
+ """
170
+ Creates the headers for the API request.
171
+ """
172
+ headers = {
173
+ "Content-Type": "application/json",
174
+ "FlowTenant": self.flow_tenant,
175
+ "FlowAgent": self.flow_agent,
176
+ "Authorization": f"Bearer {self.api_key}",
177
+ }
178
+
179
+ # Only add FlowOperationId if it's required (for Claude and GPT models)
180
+ if hasattr(self, "flow_operation_id"):
181
+ headers["FlowOperationId"] = self.flow_operation_id
182
+
183
+ return headers
184
+
185
+ def _claude_get_tool_json_schema(self, tool: Tool) -> dict:
186
+ """Convert a Tool object to the JSON schema format expected by the API"""
187
+ properties = deepcopy(tool.inputs)
188
+ required = []
189
+ for key, value in properties.items():
190
+ if value["type"] == "any":
191
+ value["type"] = "string"
192
+ if not ("nullable" in value and value["nullable"]):
193
+ required.append(key)
194
+ return {
195
+ "name": tool.name,
196
+ "type": "function",
197
+ "description": tool.description,
198
+ "input_schema": {
199
+ "type": "object",
200
+ "properties": properties,
201
+ "required": required,
202
+ },
203
+ }
204
+
205
+ def _prepare_payload(
206
+ self,
207
+ messages: List[Dict[str, str]],
208
+ stop_sequences: Optional[List[str]] = None,
209
+ grammar: Optional[str] = None,
210
+ tools_to_call_from: Optional[List[Tool]] = None,
211
+ custom_role_conversions: Optional[Dict[str, str]] = None,
212
+ convert_images_to_image_urls: bool = False,
213
+ **kwargs,
214
+ ) -> Dict[str, Any]:
215
+ """Prepare the payload for the API request based on model type"""
216
+ # Handle system message separately for models that require it
217
+ system_message = None
218
+ if self.config.requires_system_message_separation:
219
+ system_message = next((message for message in messages if message["role"] == "system"), None)
220
+ messages = [message for message in messages if message["role"] != "system"]
221
+
222
+ # Prepare completion kwargs with base model params
223
+ completion_kwargs = self._prepare_completion_kwargs(
224
+ messages=messages,
225
+ stop_sequences=stop_sequences,
226
+ grammar=grammar,
227
+ tools_to_call_from=tools_to_call_from,
228
+ custom_role_conversions=custom_role_conversions,
229
+ convert_images_to_image_urls=convert_images_to_image_urls,
230
+ **kwargs,
231
+ )
232
+
233
+ # Add model-specific parameters
234
+ model_params = {}
235
+ for key, default_value in self.config.default_params.items():
236
+ model_params[key] = getattr(self, key, default_value)
237
+
238
+ # Update with model ID and allowed models
239
+ model_params.update({
240
+ "allowedModels": [self.model_id],
241
+ })
242
+
243
+ if not self.config.response_handler == "claude":
244
+ model_params.update({
245
+ "model": self.model_id,
246
+ })
247
+
248
+ # Special handling for o3-mini model
249
+ if self.model_id == "o3-mini" or self.model_id == "o1-mini":
250
+ if "max_tokens" in model_params:
251
+ del model_params["max_tokens"]
252
+
253
+ # Update the completion kwargs with model params
254
+ completion_kwargs.update(model_params)
255
+
256
+ # del completion_kwargs["grammar"] # Remove grammar from payload if it exists
257
+
258
+ # Remove tool_choice from payload if it exists
259
+ if "tool_choice" in completion_kwargs:
260
+ del completion_kwargs["tool_choice"]
261
+
262
+ # Special handling for Claude models - rename stop_sequences to stop
263
+ if self.config.response_handler == "claude" and "stop" in completion_kwargs:
264
+ completion_kwargs.pop("stop", None)
265
+
266
+ # Add tools if provided
267
+ if tools_to_call_from and self.config.requires_anthropic_format:
268
+ completion_kwargs["tools"] = [self._claude_get_tool_json_schema(tool) for tool in tools_to_call_from]
269
+
270
+ # Add system message if separated
271
+ if system_message and self.config.requires_system_message_separation:
272
+ completion_kwargs["system"] = system_message["content"]
273
+
274
+ return completion_kwargs
275
+
276
+ def _handle_response(self, response: requests.Response) -> ChatMessage:
277
+ """Handle the API response based on model type"""
278
+ if response.status_code != 200:
279
+ raise ValueError(
280
+ f"API call failed with status code {response.status_code}: {response.text}"
281
+ )
282
+
283
+ response_data = response.json()
284
+
285
+ # Update token counts
286
+ usage = response_data.get("usage") or {}
287
+ self._last_input_token_count = usage.get("input_tokens", 0)
288
+ self._last_output_token_count = usage.get("output_tokens", 0)
289
+
290
+ if self.config.response_handler == "claude":
291
+ # Claude-style response handling
292
+ content_list = response_data.get("content", [])
293
+ if not content_list:
294
+ raise ValueError("API response contains no content.")
295
+
296
+ first_message_content = next(
297
+ (item["text"] for item in content_list if item.get("type") == "text"), None
298
+ )
299
+
300
+ if first_message_content is None:
301
+ raise ValueError("No valid text content found in the API response.")
302
+
303
+ return ChatMessage(
304
+ role=MessageRole.ASSISTANT,
305
+ content=first_message_content,
306
+ raw=response_data,
307
+ )
308
+ else:
309
+ # OpenAI-style response handling
310
+ chatCompletion = ChatCompletion.model_validate(response_data)
311
+
312
+ return ChatMessage.from_dict(
313
+ chatCompletion.choices[0].message.model_dump(include={"role", "content", "tool_calls"}),
314
+ raw=response_data,
315
+ )
316
+
317
+ def generate(
318
+ self,
319
+ messages: List[Dict[str, Union[str, List[Dict]]]],
320
+ stop_sequences: Optional[List[str]] = None,
321
+ grammar: Optional[str] = None,
322
+ tools_to_call_from: Optional[List[Tool]] = None,
323
+ custom_role_conversions: Optional[Dict[str, str]] = None,
324
+ convert_images_to_image_urls: bool = False,
325
+ **kwargs,
326
+ ) -> ChatMessage:
327
+ """Process the input messages and return the model's response.
328
+
329
+ Parameters:
330
+ messages (`list[dict[str, str]]`):
331
+ A list of message dictionaries to be processed. Each dictionary should have the structure `{"role": "user/system", "content": "message content"}`.
332
+ stop_sequences (`List[str]`, *optional*):
333
+ A list of strings that will stop the generation if encountered in the model's output.
334
+ grammar (`str`, *optional*):
335
+ The grammar or formatting structure to use in the model's response.
336
+ tools_to_call_from (`List[Tool]`, *optional*):
337
+ A list of tools that the model can use to generate responses.
338
+ custom_role_conversions (`Dict[str, str]`, *optional*):
339
+ A dictionary mapping custom role names to standard role names.
340
+ convert_images_to_image_urls (`bool`, *optional*):
341
+ Whether to convert images in messages to URLs.
342
+ **kwargs:
343
+ Additional keyword arguments to be passed to the underlying model.
344
+
345
+ Returns:
346
+ `ChatMessage`: A chat message object containing the model's response.
347
+ """
348
+ # Prepare the payload
349
+ payload = self._prepare_payload(
350
+ messages=messages,
351
+ stop_sequences=stop_sequences,
352
+ grammar=grammar,
353
+ tools_to_call_from=tools_to_call_from,
354
+ custom_role_conversions=custom_role_conversions,
355
+ convert_images_to_image_urls=convert_images_to_image_urls,
356
+ **kwargs,
357
+ )
358
+
359
+ # Create headers for the API request
360
+ headers = self._create_headers()
361
+
362
+ # Send the request to the API
363
+ response = requests.post(self.api_url, headers=headers, json=payload, timeout=300)
364
+
365
+ # Handle the response
366
+ return self._handle_response(response)
367
+
368
+ def __call__(
369
+ self,
370
+ messages: List[Dict[str, str]],
371
+ stop_sequences: Optional[List[str]] = None,
372
+ grammar: Optional[str] = None,
373
+ tools_to_call_from: Optional[List[Tool]] = None,
374
+ custom_role_conversions: Optional[Dict[str, str]] = None,
375
+ convert_images_to_image_urls: bool = False,
376
+ **kwargs,
377
+ ) -> ChatMessage:
378
+ """
379
+ Sends a request to the Flow API and returns the response.
380
+ This method is an alias for generate() for backward compatibility.
381
+ """
382
+ return self.generate(
383
+ messages=messages,
384
+ stop_sequences=stop_sequences,
385
+ grammar=grammar,
386
+ tools_to_call_from=tools_to_call_from,
387
+ custom_role_conversions=custom_role_conversions,
388
+ convert_images_to_image_urls=convert_images_to_image_urls,
389
+ **kwargs,
390
+ )
cli.py ADDED
@@ -0,0 +1,203 @@
1
+ """Command-line interface for naive_knowledge_base."""
2
+
3
+ import sys
4
+ import argparse
5
+ from pathlib import Path
6
+ import os
7
+ import dotenv
8
+
9
+ # Define version locally
10
+ __version__ = "0.1.0"
11
+
12
+ # Add current directory to path to allow local imports
13
+ sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
14
+
15
+ # Import dependencies
16
+ try:
17
+ from smolagents import CodeAgent, ToolCallingAgent
18
+ from agents.dependency_graph import generate_dependency_graph
19
+ from api_models import FlowApiModel
20
+ from tools import (
21
+ save_content_to_file,
22
+ read_file_content,
23
+ delete_folder_or_file,
24
+ generate_folder_tree,
25
+ )
26
+ except ImportError as e:
27
+ print(f"Error importing dependencies: {e}", file=sys.stderr)
28
+ print("Please install dependencies: pip install -r requirements.txt", file=sys.stderr)
29
+ sys.exit(1)
30
+
31
+
32
+ def run_analysis(source_directory: str, file_extensions: str = "java",
33
+ ignore_dirs: str = "target,.git,test,node_modules,Pods") -> str:
34
+ """
35
+ Run dependency graph analysis on a source directory.
36
+
37
+ Args:
38
+ source_directory: Path to the source code directory to analyze
39
+ file_extensions: File extensions to analyze (default: "java")
40
+ ignore_dirs: Comma-separated list of directories to ignore
41
+
42
+ Returns:
43
+ str: Analysis results
44
+ """
45
+ dotenv.load_dotenv()
46
+
47
+ dependency_graph_agent = ToolCallingAgent(
48
+ tools=[generate_dependency_graph],
49
+ planning_interval=1,
50
+ final_answer_checks=[],
51
+ model=FlowApiModel(
52
+ model_id="gpt-4.1",
53
+ temperature=0.4,
54
+ ),
55
+ max_steps=7,
56
+ name="dependency_graph_agent",
57
+ description="Generates a dependency graph for the given source directory.",
58
+ )
59
+
60
+ manager_agent = CodeAgent(
61
+ managed_agents=[dependency_graph_agent],
62
+ model=FlowApiModel(model_id="gpt-4.1"),
63
+ tools=[
64
+ read_file_content,
65
+ save_content_to_file,
66
+ delete_folder_or_file,
67
+ generate_folder_tree
68
+ ],
69
+ additional_authorized_imports=[
70
+ "os",
71
+ "json",
72
+ "pandas",
73
+ "numpy",
74
+ "pathlib",
75
+ "ast",
76
+ "re",
77
+ "networkx",
78
+ "collections",
79
+ ],
80
+ planning_interval=3,
81
+ max_steps=30,
82
+ name="tech_lead_agent",
83
+ description="This agent is responsible for building and analyzing dependency graphs from source code.",
84
+ )
85
+
86
+ result = manager_agent.run(
87
+ f"Generate the tree and dependency graph of the {file_extensions} files in the source: "
88
+ f"{source_directory}. Make sure to ignore the {ignore_dirs} directories."
89
+ )
90
+
91
+ return result
92
+
93
+
94
+ def create_parser():
95
+ """Create the argument parser."""
96
+ parser = argparse.ArgumentParser(
97
+ prog="naive-kb",
98
+ description="Analyze code dependencies and generate dependency graphs using AI",
99
+ formatter_class=argparse.RawDescriptionHelpFormatter,
100
+ epilog="""
101
+ Examples:
102
+ # Analyze a Java project
103
+ naive-kb /path/to/project
104
+
105
+ # Analyze with specific file extensions
106
+ naive-kb /path/to/project --extensions python
107
+
108
+ # Ignore specific directories
109
+ naive-kb /path/to/project --ignore "target,build,.git,node_modules"
110
+
111
+ # Full example
112
+ naive-kb /path/to/project --extensions java --ignore "target,.git,test"
113
+ """
114
+ )
115
+
116
+ parser.add_argument(
117
+ "source_directory",
118
+ type=str,
119
+ help="Path to the source code directory to analyze"
120
+ )
121
+
122
+ parser.add_argument(
123
+ "-e", "--extensions",
124
+ type=str,
125
+ default="java",
126
+ help="File extensions to analyze (default: java)"
127
+ )
128
+
129
+ parser.add_argument(
130
+ "-i", "--ignore",
131
+ type=str,
132
+ default="target,.git,test,node_modules,Pods",
133
+ help="Comma-separated list of directories to ignore (default: target,.git,test,node_modules,Pods)"
134
+ )
135
+
136
+ parser.add_argument(
137
+ "-v", "--version",
138
+ action="version",
139
+ version=f"%(prog)s {__version__}"
140
+ )
141
+
142
+ parser.add_argument(
143
+ "--env-file",
144
+ type=str,
145
+ help="Path to .env file (default: .env in current directory)"
146
+ )
147
+
148
+ return parser
149
+
150
+
151
+ def main():
152
+ """Main CLI entry point."""
153
+ parser = create_parser()
154
+ args = parser.parse_args()
155
+
156
+ # Load environment variables
157
+ if args.env_file:
158
+ dotenv.load_dotenv(args.env_file)
159
+ else:
160
+ dotenv.load_dotenv()
161
+
162
+ # Validate source directory
163
+ source_path = Path(args.source_directory)
164
+ if not source_path.exists():
165
+ print(f"Error: Source directory '{args.source_directory}' does not exist.", file=sys.stderr)
166
+ sys.exit(1)
167
+
168
+ if not source_path.is_dir():
169
+ print(f"Error: '{args.source_directory}' is not a directory.", file=sys.stderr)
170
+ sys.exit(1)
171
+
172
+ print(f"Analyzing source directory: {args.source_directory}")
173
+ print(f"File extensions: {args.extensions}")
174
+ print(f"Ignoring directories: {args.ignore}")
175
+ print("\nRunning analysis...")
176
+
177
+ try:
178
+ result = run_analysis(
179
+ source_directory=args.source_directory,
180
+ file_extensions=args.extensions,
181
+ ignore_dirs=args.ignore
182
+ )
183
+
184
+ print("\n" + "=" * 80)
185
+ print("Analysis Complete!")
186
+ print("=" * 80)
187
+ print(result)
188
+
189
+ return 0
190
+
191
+ except KeyboardInterrupt:
192
+ print("\n\nAnalysis interrupted by user.", file=sys.stderr)
193
+ return 130
194
+
195
+ except Exception as e:
196
+ print(f"\nError running analysis: {e}", file=sys.stderr)
197
+ import traceback
198
+ traceback.print_exc()
199
+ return 1
200
+
201
+
202
+ if __name__ == "__main__":
203
+ sys.exit(main())
main.py ADDED
@@ -0,0 +1,68 @@
1
+ from requests import RequestException
2
+ from smolagents import CodeAgent, ToolCallingAgent
3
+ from api_models import FlowApiModel
4
+ from tools import save_content_to_file, read_file_content, delete_folder_or_file, generate_folder_tree
5
+ import dotenv
6
+ import sys
7
+ from agents.dependency_graph import generate_dependency_graph
8
+ dotenv.load_dotenv()
9
+
10
+ model = FlowApiModel(
11
+ model_id="gpt-4.1",
12
+ temperature=0.5,
13
+ )
14
+
15
+ dependency_graph_agent = ToolCallingAgent(
16
+ tools=[
17
+ generate_dependency_graph,
18
+ ],
19
+ planning_interval=1,
20
+ final_answer_checks=[],
21
+ model=FlowApiModel(
22
+ model_id="gpt-4.1",
23
+ temperature=0.4,
24
+ ),
25
+ max_steps=7,
26
+ name="dependency_graph_agent",
27
+ description="Generates a dependency graph for the given source directory.",
28
+ )
29
+
30
+ manager_agent = CodeAgent(
31
+ managed_agents=[dependency_graph_agent],
32
+ model=FlowApiModel(
33
+ model_id="gpt-4.1",
34
+ ),
35
+ tools=[read_file_content, save_content_to_file, delete_folder_or_file, generate_folder_tree],
36
+ additional_authorized_imports=[
37
+ "os",
38
+ "json",
39
+ "pandas",
40
+ "numpy",
41
+ "pathlib",
42
+ "ast",
43
+ "re",
44
+ "networkx",
45
+ "collections",
46
+ ],
47
+ planning_interval=3,
48
+ max_steps=30,
49
+ name="tech_lead_agent",
50
+ description="This agent is responsible for building and analyzing dependency graphs from source code. It analyzes code files, extracts dependencies, and creates comprehensive dependency graphs with metrics and relationships.",
51
+ )
52
+
53
+ if __name__ == "__main__":
54
+ if len(sys.argv) < 2:
55
+ print("Usage: python main.py <source_directory>", file=sys.stderr)
56
+ sys.exit(1)
57
+
58
+ source_directory = sys.argv[1]
59
+ file_extensions = sys.argv[2] if len(sys.argv) > 2 else "java"
60
+ ignore_dirs = sys.argv[3] if len(sys.argv) > 3 else "target,.git,test,node_modules,Pods"
61
+ print("Running main agent...")
62
+ try:
63
+ result = manager_agent.run(f"""
64
+ Tasks:
65
+ 1. Generate the tree and dependency graph of the {file_extensions} files in the source: {source_directory}. Make sure to ignore the {ignore_dirs} directories.
66
+ """.strip())
67
+ except Exception as e:
68
+ print(f"Error running main agent: {e}", file=sys.stderr)