cognautic-cli 1.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cognautic/__init__.py +7 -0
- cognautic/ai_engine.py +2213 -0
- cognautic/auto_continuation.py +196 -0
- cognautic/cli.py +1064 -0
- cognautic/config.py +245 -0
- cognautic/file_tagger.py +194 -0
- cognautic/memory.py +419 -0
- cognautic/provider_endpoints.py +424 -0
- cognautic/rules.py +246 -0
- cognautic/tools/__init__.py +19 -0
- cognautic/tools/base.py +59 -0
- cognautic/tools/code_analysis.py +391 -0
- cognautic/tools/command_runner.py +292 -0
- cognautic/tools/file_operations.py +394 -0
- cognautic/tools/registry.py +115 -0
- cognautic/tools/response_control.py +48 -0
- cognautic/tools/web_search.py +336 -0
- cognautic/utils.py +297 -0
- cognautic/websocket_server.py +485 -0
- cognautic_cli-1.1.1.dist-info/METADATA +604 -0
- cognautic_cli-1.1.1.dist-info/RECORD +25 -0
- cognautic_cli-1.1.1.dist-info/WHEEL +5 -0
- cognautic_cli-1.1.1.dist-info/entry_points.txt +2 -0
- cognautic_cli-1.1.1.dist-info/licenses/LICENSE +21 -0
- cognautic_cli-1.1.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,424 @@
|
|
|
1
|
+
"""
|
|
2
|
+
API Endpoints and configurations for all AI providers
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
# Provider API Endpoints and Model Information
|
|
6
|
+
PROVIDER_ENDPOINTS = {
|
|
7
|
+
"openai": {
|
|
8
|
+
"base_url": "https://api.openai.com/v1",
|
|
9
|
+
"chat_endpoint": "/chat/completions",
|
|
10
|
+
"models_endpoint": "/models",
|
|
11
|
+
"embeddings_endpoint": "/embeddings",
|
|
12
|
+
"audio_endpoint": "/audio",
|
|
13
|
+
"images_endpoint": "/images",
|
|
14
|
+
"files_endpoint": "/files",
|
|
15
|
+
"fine_tuning_endpoint": "/fine_tuning",
|
|
16
|
+
"moderations_endpoint": "/moderations",
|
|
17
|
+
"assistants_endpoint": "/assistants",
|
|
18
|
+
"threads_endpoint": "/threads",
|
|
19
|
+
"headers": {
|
|
20
|
+
"Authorization": "Bearer {api_key}",
|
|
21
|
+
"Content-Type": "application/json"
|
|
22
|
+
}
|
|
23
|
+
},
|
|
24
|
+
|
|
25
|
+
"anthropic": {
|
|
26
|
+
"base_url": "https://api.anthropic.com",
|
|
27
|
+
"chat_endpoint": "/v1/messages",
|
|
28
|
+
"models_endpoint": "/v1/models",
|
|
29
|
+
"headers": {
|
|
30
|
+
"x-api-key": "{api_key}",
|
|
31
|
+
"Content-Type": "application/json",
|
|
32
|
+
"anthropic-version": "2023-06-01"
|
|
33
|
+
}
|
|
34
|
+
},
|
|
35
|
+
|
|
36
|
+
"google": {
|
|
37
|
+
"base_url": "https://generativelanguage.googleapis.com",
|
|
38
|
+
"chat_endpoint": "/v1beta/models/{model}:generateContent",
|
|
39
|
+
"models_endpoint": "/v1beta/models",
|
|
40
|
+
"embeddings_endpoint": "/v1beta/models/{model}:embedContent",
|
|
41
|
+
"count_tokens_endpoint": "/v1beta/models/{model}:countTokens",
|
|
42
|
+
"headers": {
|
|
43
|
+
"Content-Type": "application/json"
|
|
44
|
+
},
|
|
45
|
+
"auth_param": "key={api_key}"
|
|
46
|
+
},
|
|
47
|
+
|
|
48
|
+
"together": {
|
|
49
|
+
"base_url": "https://api.together.xyz/v1",
|
|
50
|
+
"chat_endpoint": "/chat/completions",
|
|
51
|
+
"models_endpoint": "/models",
|
|
52
|
+
"completions_endpoint": "/completions",
|
|
53
|
+
"embeddings_endpoint": "/embeddings",
|
|
54
|
+
"images_endpoint": "/images/generations",
|
|
55
|
+
"headers": {
|
|
56
|
+
"Authorization": "Bearer {api_key}",
|
|
57
|
+
"Content-Type": "application/json"
|
|
58
|
+
}
|
|
59
|
+
},
|
|
60
|
+
|
|
61
|
+
"openrouter": {
|
|
62
|
+
"base_url": "https://openrouter.ai/api/v1",
|
|
63
|
+
"chat_endpoint": "/chat/completions",
|
|
64
|
+
"models_endpoint": "/models",
|
|
65
|
+
"auth_endpoint": "/auth/key",
|
|
66
|
+
"generation_endpoint": "/generation",
|
|
67
|
+
"headers": {
|
|
68
|
+
"Authorization": "Bearer {api_key}",
|
|
69
|
+
"Content-Type": "application/json",
|
|
70
|
+
"HTTP-Referer": "https://cognautic-cli.local",
|
|
71
|
+
"X-Title": "Cognautic CLI"
|
|
72
|
+
}
|
|
73
|
+
},
|
|
74
|
+
|
|
75
|
+
"huggingface": {
|
|
76
|
+
"base_url": "https://api-inference.huggingface.co",
|
|
77
|
+
"models_endpoint": "/models",
|
|
78
|
+
"inference_endpoint": "/models/{model}",
|
|
79
|
+
"headers": {
|
|
80
|
+
"Authorization": "Bearer {api_key}",
|
|
81
|
+
"Content-Type": "application/json"
|
|
82
|
+
}
|
|
83
|
+
},
|
|
84
|
+
|
|
85
|
+
"cohere": {
|
|
86
|
+
"base_url": "https://api.cohere.ai/v1",
|
|
87
|
+
"chat_endpoint": "/chat",
|
|
88
|
+
"generate_endpoint": "/generate",
|
|
89
|
+
"embed_endpoint": "/embed",
|
|
90
|
+
"classify_endpoint": "/classify",
|
|
91
|
+
"summarize_endpoint": "/summarize",
|
|
92
|
+
"rerank_endpoint": "/rerank",
|
|
93
|
+
"headers": {
|
|
94
|
+
"Authorization": "Bearer {api_key}",
|
|
95
|
+
"Content-Type": "application/json"
|
|
96
|
+
}
|
|
97
|
+
},
|
|
98
|
+
|
|
99
|
+
"replicate": {
|
|
100
|
+
"base_url": "https://api.replicate.com/v1",
|
|
101
|
+
"predictions_endpoint": "/predictions",
|
|
102
|
+
"models_endpoint": "/models",
|
|
103
|
+
"collections_endpoint": "/collections",
|
|
104
|
+
"headers": {
|
|
105
|
+
"Authorization": "Token {api_key}",
|
|
106
|
+
"Content-Type": "application/json"
|
|
107
|
+
}
|
|
108
|
+
},
|
|
109
|
+
|
|
110
|
+
"perplexity": {
|
|
111
|
+
"base_url": "https://api.perplexity.ai",
|
|
112
|
+
"chat_endpoint": "/chat/completions",
|
|
113
|
+
"headers": {
|
|
114
|
+
"Authorization": "Bearer {api_key}",
|
|
115
|
+
"Content-Type": "application/json"
|
|
116
|
+
}
|
|
117
|
+
},
|
|
118
|
+
|
|
119
|
+
"mistral": {
|
|
120
|
+
"base_url": "https://api.mistral.ai/v1",
|
|
121
|
+
"chat_endpoint": "/chat/completions",
|
|
122
|
+
"models_endpoint": "/models",
|
|
123
|
+
"embeddings_endpoint": "/embeddings",
|
|
124
|
+
"headers": {
|
|
125
|
+
"Authorization": "Bearer {api_key}",
|
|
126
|
+
"Content-Type": "application/json"
|
|
127
|
+
}
|
|
128
|
+
},
|
|
129
|
+
|
|
130
|
+
"groq": {
|
|
131
|
+
"base_url": "https://api.groq.com/openai/v1",
|
|
132
|
+
"chat_endpoint": "/chat/completions",
|
|
133
|
+
"models_endpoint": "/models",
|
|
134
|
+
"audio_endpoint": "/audio",
|
|
135
|
+
"headers": {
|
|
136
|
+
"Authorization": "Bearer {api_key}",
|
|
137
|
+
"Content-Type": "application/json"
|
|
138
|
+
}
|
|
139
|
+
},
|
|
140
|
+
|
|
141
|
+
"deepseek": {
|
|
142
|
+
"base_url": "https://api.deepseek.com/v1",
|
|
143
|
+
"chat_endpoint": "/chat/completions",
|
|
144
|
+
"models_endpoint": "/models",
|
|
145
|
+
"headers": {
|
|
146
|
+
"Authorization": "Bearer {api_key}",
|
|
147
|
+
"Content-Type": "application/json"
|
|
148
|
+
}
|
|
149
|
+
},
|
|
150
|
+
|
|
151
|
+
"fireworks": {
|
|
152
|
+
"base_url": "https://api.fireworks.ai/inference/v1",
|
|
153
|
+
"chat_endpoint": "/chat/completions",
|
|
154
|
+
"completions_endpoint": "/completions",
|
|
155
|
+
"embeddings_endpoint": "/embeddings",
|
|
156
|
+
"models_endpoint": "/models",
|
|
157
|
+
"headers": {
|
|
158
|
+
"Authorization": "Bearer {api_key}",
|
|
159
|
+
"Content-Type": "application/json"
|
|
160
|
+
}
|
|
161
|
+
},
|
|
162
|
+
|
|
163
|
+
"anyscale": {
|
|
164
|
+
"base_url": "https://api.endpoints.anyscale.com/v1",
|
|
165
|
+
"chat_endpoint": "/chat/completions",
|
|
166
|
+
"models_endpoint": "/models",
|
|
167
|
+
"headers": {
|
|
168
|
+
"Authorization": "Bearer {api_key}",
|
|
169
|
+
"Content-Type": "application/json"
|
|
170
|
+
}
|
|
171
|
+
},
|
|
172
|
+
|
|
173
|
+
"ai21": {
|
|
174
|
+
"base_url": "https://api.ai21.com/studio/v1",
|
|
175
|
+
"complete_endpoint": "/complete",
|
|
176
|
+
"chat_endpoint": "/chat/completions",
|
|
177
|
+
"tokenize_endpoint": "/tokenize",
|
|
178
|
+
"headers": {
|
|
179
|
+
"Authorization": "Bearer {api_key}",
|
|
180
|
+
"Content-Type": "application/json"
|
|
181
|
+
}
|
|
182
|
+
},
|
|
183
|
+
|
|
184
|
+
"palm": {
|
|
185
|
+
"base_url": "https://generativelanguage.googleapis.com/v1beta",
|
|
186
|
+
"generate_endpoint": "/models/{model}:generateText",
|
|
187
|
+
"chat_endpoint": "/models/{model}:generateMessage",
|
|
188
|
+
"embed_endpoint": "/models/{model}:embedText",
|
|
189
|
+
"models_endpoint": "/models",
|
|
190
|
+
"headers": {
|
|
191
|
+
"Content-Type": "application/json"
|
|
192
|
+
},
|
|
193
|
+
"auth_param": "key={api_key}"
|
|
194
|
+
},
|
|
195
|
+
|
|
196
|
+
"claude": {
|
|
197
|
+
"base_url": "https://api.anthropic.com/v1",
|
|
198
|
+
"messages_endpoint": "/messages",
|
|
199
|
+
"complete_endpoint": "/complete",
|
|
200
|
+
"headers": {
|
|
201
|
+
"x-api-key": "{api_key}",
|
|
202
|
+
"Content-Type": "application/json",
|
|
203
|
+
"anthropic-version": "2023-06-01"
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
# Generic HTTP client for making API requests
|
|
209
|
+
import aiohttp
|
|
210
|
+
import json
|
|
211
|
+
from typing import Dict, Any, Optional
|
|
212
|
+
|
|
213
|
+
class GenericAPIClient:
|
|
214
|
+
"""Generic API client that can work with any provider"""
|
|
215
|
+
|
|
216
|
+
def __init__(self, provider_name: str, api_key: str):
|
|
217
|
+
self.provider_name = provider_name
|
|
218
|
+
self.api_key = api_key
|
|
219
|
+
self.config = PROVIDER_ENDPOINTS.get(provider_name, {})
|
|
220
|
+
|
|
221
|
+
def get_headers(self) -> Dict[str, str]:
|
|
222
|
+
"""Get headers with API key substituted"""
|
|
223
|
+
headers = self.config.get("headers", {}).copy()
|
|
224
|
+
for key, value in headers.items():
|
|
225
|
+
if isinstance(value, str) and "{api_key}" in value:
|
|
226
|
+
headers[key] = value.format(api_key=self.api_key)
|
|
227
|
+
return headers
|
|
228
|
+
|
|
229
|
+
def get_url(self, endpoint_key: str, **kwargs) -> str:
|
|
230
|
+
"""Get full URL for an endpoint"""
|
|
231
|
+
base_url = self.config.get("base_url", "")
|
|
232
|
+
endpoint = self.config.get(endpoint_key, "")
|
|
233
|
+
|
|
234
|
+
# Format endpoint with any parameters (like model name)
|
|
235
|
+
if kwargs:
|
|
236
|
+
endpoint = endpoint.format(**kwargs)
|
|
237
|
+
|
|
238
|
+
url = base_url + endpoint
|
|
239
|
+
|
|
240
|
+
# Add auth parameter if needed (for Google/Palm)
|
|
241
|
+
if "auth_param" in self.config:
|
|
242
|
+
auth_param = self.config["auth_param"].format(api_key=self.api_key)
|
|
243
|
+
separator = "&" if "?" in url else "?"
|
|
244
|
+
url = f"{url}{separator}{auth_param}"
|
|
245
|
+
|
|
246
|
+
return url
|
|
247
|
+
|
|
248
|
+
async def make_request(self, endpoint_key: str, method: str = "POST",
|
|
249
|
+
data: Optional[Dict] = None, **url_kwargs) -> Dict[str, Any]:
|
|
250
|
+
"""Make an API request to any endpoint"""
|
|
251
|
+
url = self.get_url(endpoint_key, **url_kwargs)
|
|
252
|
+
headers = self.get_headers()
|
|
253
|
+
|
|
254
|
+
async with aiohttp.ClientSession() as session:
|
|
255
|
+
async with session.request(
|
|
256
|
+
method=method,
|
|
257
|
+
url=url,
|
|
258
|
+
headers=headers,
|
|
259
|
+
json=data if data else None
|
|
260
|
+
) as response:
|
|
261
|
+
if response.content_type == 'application/json':
|
|
262
|
+
return await response.json()
|
|
263
|
+
else:
|
|
264
|
+
text = await response.text()
|
|
265
|
+
return {"text": text, "status": response.status}
|
|
266
|
+
|
|
267
|
+
async def chat_completion(self, messages: list, model: str, **kwargs) -> Dict[str, Any]:
|
|
268
|
+
"""Generic chat completion that works with most providers"""
|
|
269
|
+
|
|
270
|
+
# Format messages based on provider
|
|
271
|
+
if self.provider_name == "google":
|
|
272
|
+
# Google Gemini format
|
|
273
|
+
data = {
|
|
274
|
+
"contents": [
|
|
275
|
+
{
|
|
276
|
+
"parts": [{"text": msg["content"]}],
|
|
277
|
+
"role": "user" if msg["role"] == "user" else "model"
|
|
278
|
+
}
|
|
279
|
+
for msg in messages
|
|
280
|
+
],
|
|
281
|
+
"generationConfig": {
|
|
282
|
+
"temperature": kwargs.get("temperature", 0.7),
|
|
283
|
+
"maxOutputTokens": kwargs.get("max_tokens", 8192)
|
|
284
|
+
}
|
|
285
|
+
}
|
|
286
|
+
return await self.make_request("chat_endpoint", data=data, model=model)
|
|
287
|
+
|
|
288
|
+
elif self.provider_name == "anthropic":
|
|
289
|
+
# Anthropic Claude format
|
|
290
|
+
data = {
|
|
291
|
+
"model": model,
|
|
292
|
+
"messages": messages,
|
|
293
|
+
"max_tokens": kwargs.get("max_tokens", 4096),
|
|
294
|
+
"temperature": kwargs.get("temperature", 0.7)
|
|
295
|
+
}
|
|
296
|
+
return await self.make_request("chat_endpoint", data=data)
|
|
297
|
+
|
|
298
|
+
else:
|
|
299
|
+
# OpenAI-compatible format (works with most providers)
|
|
300
|
+
data = {
|
|
301
|
+
"model": model,
|
|
302
|
+
"messages": messages,
|
|
303
|
+
"max_tokens": kwargs.get("max_tokens", 4096),
|
|
304
|
+
"temperature": kwargs.get("temperature", 0.7),
|
|
305
|
+
"stream": kwargs.get("stream", False)
|
|
306
|
+
}
|
|
307
|
+
return await self.make_request("chat_endpoint", data=data)
|
|
308
|
+
|
|
309
|
+
async def list_models(self) -> Dict[str, Any]:
|
|
310
|
+
"""List available models for the provider"""
|
|
311
|
+
return await self.make_request("models_endpoint", method="GET")
|
|
312
|
+
|
|
313
|
+
def get_provider_config(provider_name: str) -> Dict[str, Any]:
|
|
314
|
+
"""Get configuration for a specific provider"""
|
|
315
|
+
return PROVIDER_ENDPOINTS.get(provider_name, {})
|
|
316
|
+
|
|
317
|
+
def get_all_providers() -> list:
|
|
318
|
+
"""Get list of all supported providers"""
|
|
319
|
+
return list(PROVIDER_ENDPOINTS.keys())
|
|
320
|
+
|
|
321
|
+
def get_provider_endpoints(provider_name: str) -> Dict[str, str]:
|
|
322
|
+
"""Get all endpoints for a specific provider"""
|
|
323
|
+
config = PROVIDER_ENDPOINTS.get(provider_name, {})
|
|
324
|
+
return {k: v for k, v in config.items() if k.endswith("_endpoint")}
|
|
325
|
+
|
|
326
|
+
# Popular models for each provider
|
|
327
|
+
PROVIDER_MODELS = {
|
|
328
|
+
"openai": [
|
|
329
|
+
"gpt-4o",
|
|
330
|
+
"gpt-4o-mini",
|
|
331
|
+
"gpt-4-turbo",
|
|
332
|
+
"gpt-4",
|
|
333
|
+
"gpt-3.5-turbo",
|
|
334
|
+
"gpt-3.5-turbo-16k"
|
|
335
|
+
],
|
|
336
|
+
"anthropic": [
|
|
337
|
+
"claude-3-5-sonnet-20241022",
|
|
338
|
+
"claude-3-5-haiku-20241022",
|
|
339
|
+
"claude-3-opus-20240229",
|
|
340
|
+
"claude-3-sonnet-20240229",
|
|
341
|
+
"claude-3-haiku-20240307"
|
|
342
|
+
],
|
|
343
|
+
"google": [
|
|
344
|
+
"gemini-2.0-flash-exp",
|
|
345
|
+
"gemini-exp-1206",
|
|
346
|
+
"gemini-2.0-flash-thinking-exp-1219",
|
|
347
|
+
"gemini-exp-1121",
|
|
348
|
+
"gemini-1.5-pro",
|
|
349
|
+
"gemini-1.5-flash",
|
|
350
|
+
"gemini-1.5-flash-8b"
|
|
351
|
+
],
|
|
352
|
+
"together": [
|
|
353
|
+
"meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
|
|
354
|
+
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
|
|
355
|
+
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
|
|
356
|
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
|
357
|
+
"mistralai/Mistral-7B-Instruct-v0.2",
|
|
358
|
+
"Qwen/Qwen2.5-72B-Instruct-Turbo",
|
|
359
|
+
"deepseek-ai/deepseek-llm-67b-chat"
|
|
360
|
+
],
|
|
361
|
+
"openrouter": [
|
|
362
|
+
"anthropic/claude-3.5-sonnet",
|
|
363
|
+
"openai/gpt-4-turbo",
|
|
364
|
+
"google/gemini-pro-1.5",
|
|
365
|
+
"meta-llama/llama-3.1-405b-instruct",
|
|
366
|
+
"mistralai/mixtral-8x22b-instruct"
|
|
367
|
+
],
|
|
368
|
+
"groq": [
|
|
369
|
+
"llama-3.3-70b-versatile",
|
|
370
|
+
"llama-3.1-70b-versatile",
|
|
371
|
+
"llama-3.1-8b-instant",
|
|
372
|
+
"mixtral-8x7b-32768",
|
|
373
|
+
"gemma2-9b-it"
|
|
374
|
+
],
|
|
375
|
+
"mistral": [
|
|
376
|
+
"mistral-large-latest",
|
|
377
|
+
"mistral-medium-latest",
|
|
378
|
+
"mistral-small-latest",
|
|
379
|
+
"open-mistral-7b",
|
|
380
|
+
"open-mixtral-8x7b",
|
|
381
|
+
"open-mixtral-8x22b"
|
|
382
|
+
],
|
|
383
|
+
"deepseek": [
|
|
384
|
+
"deepseek-chat",
|
|
385
|
+
"deepseek-coder"
|
|
386
|
+
],
|
|
387
|
+
"perplexity": [
|
|
388
|
+
"llama-3.1-sonar-small-128k-online",
|
|
389
|
+
"llama-3.1-sonar-large-128k-online",
|
|
390
|
+
"llama-3.1-sonar-huge-128k-online"
|
|
391
|
+
],
|
|
392
|
+
"cohere": [
|
|
393
|
+
"command-r-plus",
|
|
394
|
+
"command-r",
|
|
395
|
+
"command",
|
|
396
|
+
"command-light"
|
|
397
|
+
],
|
|
398
|
+
"fireworks": [
|
|
399
|
+
"accounts/fireworks/models/llama-v3p1-405b-instruct",
|
|
400
|
+
"accounts/fireworks/models/llama-v3p1-70b-instruct",
|
|
401
|
+
"accounts/fireworks/models/mixtral-8x7b-instruct"
|
|
402
|
+
],
|
|
403
|
+
"huggingface": [
|
|
404
|
+
"meta-llama/Meta-Llama-3-70B-Instruct",
|
|
405
|
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
|
406
|
+
"google/gemma-7b-it"
|
|
407
|
+
],
|
|
408
|
+
"replicate": [
|
|
409
|
+
"meta/llama-2-70b-chat",
|
|
410
|
+
"mistralai/mixtral-8x7b-instruct-v0.1"
|
|
411
|
+
],
|
|
412
|
+
"anyscale": [
|
|
413
|
+
"meta-llama/Meta-Llama-3.1-70B-Instruct",
|
|
414
|
+
"mistralai/Mixtral-8x7B-Instruct-v0.1"
|
|
415
|
+
],
|
|
416
|
+
"ai21": [
|
|
417
|
+
"jamba-1.5-large",
|
|
418
|
+
"jamba-1.5-mini"
|
|
419
|
+
]
|
|
420
|
+
}
|
|
421
|
+
|
|
422
|
+
def get_provider_models(provider_name: str) -> list:
|
|
423
|
+
"""Get list of popular models for a provider"""
|
|
424
|
+
return PROVIDER_MODELS.get(provider_name, [])
|
cognautic/rules.py
ADDED
|
@@ -0,0 +1,246 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Rules management for Cognautic CLI
|
|
3
|
+
Handles workspace-specific and global rules for AI behavior
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import json
|
|
7
|
+
import os
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Dict, List, Optional, Any
|
|
10
|
+
from rich.console import Console
|
|
11
|
+
from rich.table import Table
|
|
12
|
+
from rich.panel import Panel
|
|
13
|
+
|
|
14
|
+
console = Console()
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class RulesManager:
|
|
18
|
+
"""Manages workspace and global rules for AI behavior"""
|
|
19
|
+
|
|
20
|
+
def __init__(self):
|
|
21
|
+
self.config_dir = Path.home() / ".cognautic"
|
|
22
|
+
self.global_rules_file = self.config_dir / "global_rules.json"
|
|
23
|
+
self.workspace_rules_cache = {}
|
|
24
|
+
|
|
25
|
+
# Create config directory if it doesn't exist
|
|
26
|
+
self.config_dir.mkdir(exist_ok=True)
|
|
27
|
+
|
|
28
|
+
# Initialize global rules file if it doesn't exist
|
|
29
|
+
if not self.global_rules_file.exists():
|
|
30
|
+
self._save_global_rules([])
|
|
31
|
+
|
|
32
|
+
def _get_workspace_rules_file(self, workspace_path: Optional[str] = None) -> Path:
|
|
33
|
+
"""Get the workspace rules file path"""
|
|
34
|
+
if workspace_path:
|
|
35
|
+
workspace = Path(workspace_path).resolve()
|
|
36
|
+
else:
|
|
37
|
+
workspace = Path.cwd()
|
|
38
|
+
|
|
39
|
+
return workspace / ".cognautic_rules.json"
|
|
40
|
+
|
|
41
|
+
def _load_global_rules(self) -> List[Dict[str, Any]]:
|
|
42
|
+
"""Load global rules from file"""
|
|
43
|
+
try:
|
|
44
|
+
if self.global_rules_file.exists():
|
|
45
|
+
with open(self.global_rules_file, 'r') as f:
|
|
46
|
+
return json.load(f)
|
|
47
|
+
return []
|
|
48
|
+
except Exception as e:
|
|
49
|
+
console.print(f"⚠️ Error loading global rules: {e}", style="yellow")
|
|
50
|
+
return []
|
|
51
|
+
|
|
52
|
+
def _save_global_rules(self, rules: List[Dict[str, Any]]):
|
|
53
|
+
"""Save global rules to file"""
|
|
54
|
+
try:
|
|
55
|
+
with open(self.global_rules_file, 'w') as f:
|
|
56
|
+
json.dump(rules, f, indent=2)
|
|
57
|
+
except Exception as e:
|
|
58
|
+
console.print(f"❌ Error saving global rules: {e}", style="red")
|
|
59
|
+
|
|
60
|
+
def _load_workspace_rules(self, workspace_path: Optional[str] = None) -> List[Dict[str, Any]]:
|
|
61
|
+
"""Load workspace-specific rules"""
|
|
62
|
+
rules_file = self._get_workspace_rules_file(workspace_path)
|
|
63
|
+
|
|
64
|
+
try:
|
|
65
|
+
if rules_file.exists():
|
|
66
|
+
with open(rules_file, 'r') as f:
|
|
67
|
+
return json.load(f)
|
|
68
|
+
return []
|
|
69
|
+
except Exception as e:
|
|
70
|
+
console.print(f"⚠️ Error loading workspace rules: {e}", style="yellow")
|
|
71
|
+
return []
|
|
72
|
+
|
|
73
|
+
def _save_workspace_rules(self, rules: List[Dict[str, Any]], workspace_path: Optional[str] = None):
|
|
74
|
+
"""Save workspace-specific rules"""
|
|
75
|
+
rules_file = self._get_workspace_rules_file(workspace_path)
|
|
76
|
+
|
|
77
|
+
try:
|
|
78
|
+
with open(rules_file, 'w') as f:
|
|
79
|
+
json.dump(rules, f, indent=2)
|
|
80
|
+
console.print(f"✅ Workspace rules saved to: {rules_file}", style="green")
|
|
81
|
+
except Exception as e:
|
|
82
|
+
console.print(f"❌ Error saving workspace rules: {e}", style="red")
|
|
83
|
+
|
|
84
|
+
def add_global_rule(self, rule: str, description: str = "") -> bool:
|
|
85
|
+
"""Add a global rule"""
|
|
86
|
+
rules = self._load_global_rules()
|
|
87
|
+
|
|
88
|
+
# Check if rule already exists
|
|
89
|
+
for existing_rule in rules:
|
|
90
|
+
if existing_rule.get('rule') == rule:
|
|
91
|
+
console.print("⚠️ This rule already exists in global rules", style="yellow")
|
|
92
|
+
return False
|
|
93
|
+
|
|
94
|
+
rules.append({
|
|
95
|
+
'rule': rule,
|
|
96
|
+
'description': description,
|
|
97
|
+
'type': 'global'
|
|
98
|
+
})
|
|
99
|
+
|
|
100
|
+
self._save_global_rules(rules)
|
|
101
|
+
console.print("✅ Global rule added successfully", style="green")
|
|
102
|
+
return True
|
|
103
|
+
|
|
104
|
+
def add_workspace_rule(self, rule: str, description: str = "", workspace_path: Optional[str] = None) -> bool:
|
|
105
|
+
"""Add a workspace-specific rule"""
|
|
106
|
+
rules = self._load_workspace_rules(workspace_path)
|
|
107
|
+
|
|
108
|
+
# Check if rule already exists
|
|
109
|
+
for existing_rule in rules:
|
|
110
|
+
if existing_rule.get('rule') == rule:
|
|
111
|
+
console.print("⚠️ This rule already exists in workspace rules", style="yellow")
|
|
112
|
+
return False
|
|
113
|
+
|
|
114
|
+
rules.append({
|
|
115
|
+
'rule': rule,
|
|
116
|
+
'description': description,
|
|
117
|
+
'type': 'workspace'
|
|
118
|
+
})
|
|
119
|
+
|
|
120
|
+
self._save_workspace_rules(rules, workspace_path)
|
|
121
|
+
console.print("✅ Workspace rule added successfully", style="green")
|
|
122
|
+
return True
|
|
123
|
+
|
|
124
|
+
def remove_global_rule(self, rule_index: int) -> bool:
|
|
125
|
+
"""Remove a global rule by index"""
|
|
126
|
+
rules = self._load_global_rules()
|
|
127
|
+
|
|
128
|
+
if 0 <= rule_index < len(rules):
|
|
129
|
+
removed = rules.pop(rule_index)
|
|
130
|
+
self._save_global_rules(rules)
|
|
131
|
+
console.print(f"✅ Removed global rule: {removed['rule']}", style="green")
|
|
132
|
+
return True
|
|
133
|
+
else:
|
|
134
|
+
console.print("❌ Invalid rule index", style="red")
|
|
135
|
+
return False
|
|
136
|
+
|
|
137
|
+
def remove_workspace_rule(self, rule_index: int, workspace_path: Optional[str] = None) -> bool:
|
|
138
|
+
"""Remove a workspace rule by index"""
|
|
139
|
+
rules = self._load_workspace_rules(workspace_path)
|
|
140
|
+
|
|
141
|
+
if 0 <= rule_index < len(rules):
|
|
142
|
+
removed = rules.pop(rule_index)
|
|
143
|
+
self._save_workspace_rules(rules, workspace_path)
|
|
144
|
+
console.print(f"✅ Removed workspace rule: {removed['rule']}", style="green")
|
|
145
|
+
return True
|
|
146
|
+
else:
|
|
147
|
+
console.print("❌ Invalid rule index", style="red")
|
|
148
|
+
return False
|
|
149
|
+
|
|
150
|
+
def list_global_rules(self) -> List[Dict[str, Any]]:
|
|
151
|
+
"""List all global rules"""
|
|
152
|
+
return self._load_global_rules()
|
|
153
|
+
|
|
154
|
+
def list_workspace_rules(self, workspace_path: Optional[str] = None) -> List[Dict[str, Any]]:
|
|
155
|
+
"""List all workspace rules"""
|
|
156
|
+
return self._load_workspace_rules(workspace_path)
|
|
157
|
+
|
|
158
|
+
def get_all_rules(self, workspace_path: Optional[str] = None) -> Dict[str, List[Dict[str, Any]]]:
|
|
159
|
+
"""Get both global and workspace rules"""
|
|
160
|
+
return {
|
|
161
|
+
'global': self._load_global_rules(),
|
|
162
|
+
'workspace': self._load_workspace_rules(workspace_path)
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
def get_rules_for_ai(self, workspace_path: Optional[str] = None) -> str:
|
|
166
|
+
"""Get formatted rules for AI context"""
|
|
167
|
+
all_rules = self.get_all_rules(workspace_path)
|
|
168
|
+
|
|
169
|
+
rules_text = []
|
|
170
|
+
|
|
171
|
+
# Add global rules
|
|
172
|
+
if all_rules['global']:
|
|
173
|
+
rules_text.append("# Global Rules")
|
|
174
|
+
for i, rule in enumerate(all_rules['global'], 1):
|
|
175
|
+
rules_text.append(f"{i}. {rule['rule']}")
|
|
176
|
+
if rule.get('description'):
|
|
177
|
+
rules_text.append(f" Description: {rule['description']}")
|
|
178
|
+
|
|
179
|
+
# Add workspace rules
|
|
180
|
+
if all_rules['workspace']:
|
|
181
|
+
if rules_text:
|
|
182
|
+
rules_text.append("")
|
|
183
|
+
rules_text.append("# Workspace Rules")
|
|
184
|
+
for i, rule in enumerate(all_rules['workspace'], 1):
|
|
185
|
+
rules_text.append(f"{i}. {rule['rule']}")
|
|
186
|
+
if rule.get('description'):
|
|
187
|
+
rules_text.append(f" Description: {rule['description']}")
|
|
188
|
+
|
|
189
|
+
return "\n".join(rules_text) if rules_text else ""
|
|
190
|
+
|
|
191
|
+
def display_rules(self, workspace_path: Optional[str] = None):
|
|
192
|
+
"""Display all rules in a formatted table"""
|
|
193
|
+
all_rules = self.get_all_rules(workspace_path)
|
|
194
|
+
|
|
195
|
+
# Display global rules
|
|
196
|
+
if all_rules['global']:
|
|
197
|
+
console.print(Panel.fit("🌍 Global Rules", style="bold blue"))
|
|
198
|
+
table = Table(show_header=True, header_style="bold cyan")
|
|
199
|
+
table.add_column("#", style="dim", width=4)
|
|
200
|
+
table.add_column("Rule", style="white")
|
|
201
|
+
table.add_column("Description", style="dim")
|
|
202
|
+
|
|
203
|
+
for i, rule in enumerate(all_rules['global']):
|
|
204
|
+
table.add_row(
|
|
205
|
+
str(i),
|
|
206
|
+
rule['rule'],
|
|
207
|
+
rule.get('description', '')
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
console.print(table)
|
|
211
|
+
console.print()
|
|
212
|
+
else:
|
|
213
|
+
console.print("📝 No global rules defined", style="dim")
|
|
214
|
+
console.print()
|
|
215
|
+
|
|
216
|
+
# Display workspace rules
|
|
217
|
+
if all_rules['workspace']:
|
|
218
|
+
workspace_display = workspace_path or os.getcwd()
|
|
219
|
+
console.print(Panel.fit(f"📁 Workspace Rules ({workspace_display})", style="bold green"))
|
|
220
|
+
table = Table(show_header=True, header_style="bold cyan")
|
|
221
|
+
table.add_column("#", style="dim", width=4)
|
|
222
|
+
table.add_column("Rule", style="white")
|
|
223
|
+
table.add_column("Description", style="dim")
|
|
224
|
+
|
|
225
|
+
for i, rule in enumerate(all_rules['workspace']):
|
|
226
|
+
table.add_row(
|
|
227
|
+
str(i),
|
|
228
|
+
rule['rule'],
|
|
229
|
+
rule.get('description', '')
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
console.print(table)
|
|
233
|
+
else:
|
|
234
|
+
console.print(f"📝 No workspace rules defined for current workspace", style="dim")
|
|
235
|
+
|
|
236
|
+
def clear_global_rules(self) -> bool:
|
|
237
|
+
"""Clear all global rules"""
|
|
238
|
+
self._save_global_rules([])
|
|
239
|
+
console.print("✅ All global rules cleared", style="green")
|
|
240
|
+
return True
|
|
241
|
+
|
|
242
|
+
def clear_workspace_rules(self, workspace_path: Optional[str] = None) -> bool:
|
|
243
|
+
"""Clear all workspace rules"""
|
|
244
|
+
self._save_workspace_rules([], workspace_path)
|
|
245
|
+
console.print("✅ All workspace rules cleared", style="green")
|
|
246
|
+
return True
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Tool system for Cognautic CLI
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from .registry import ToolRegistry
|
|
6
|
+
from .file_operations import FileOperationsTool
|
|
7
|
+
from .command_runner import CommandRunnerTool
|
|
8
|
+
from .web_search import WebSearchTool
|
|
9
|
+
from .code_analysis import CodeAnalysisTool
|
|
10
|
+
from .response_control import ResponseControlTool
|
|
11
|
+
|
|
12
|
+
__all__ = [
|
|
13
|
+
'ToolRegistry',
|
|
14
|
+
'FileOperationsTool',
|
|
15
|
+
'CommandRunnerTool',
|
|
16
|
+
'WebSearchTool',
|
|
17
|
+
'CodeAnalysisTool',
|
|
18
|
+
'ResponseControlTool'
|
|
19
|
+
]
|