nexusforge-cli 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,2 @@
1
+ """NexusForge CLI — Enterprise AI Agent Orchestration Setup."""
2
+ __version__ = "1.0.0"
@@ -0,0 +1,210 @@
1
+ """Agent experience profiles — accumulated knowledge from NexusForge agents."""
2
+
3
+ AGENT_PROFILES = {
4
+ "ClassifierAgent": {
5
+ "model": "gemma3:4b",
6
+ "role": "Classifies text into categories (ticket type, urgency, intent)",
7
+ "strengths": ["Fast classification", "Multi-label support", "Urgency detection"],
8
+ "best_for": ["Ticket triage", "Email routing", "Document categorization"],
9
+ "experience": {
10
+ "total_executions": 150,
11
+ "success_rate": 0.97,
12
+ "avg_latency_ms": 800,
13
+ "learned_patterns": [
14
+ "Urgency keywords: 'urgente', 'critico', 'inmediato' → high priority",
15
+ "Invoice patterns: montos, fechas, RFC → type: factura",
16
+ "Complaint patterns: 'queja', 'problema', 'no funciona' → type: complaint",
17
+ ],
18
+ },
19
+ },
20
+ "ExtractorAgent": {
21
+ "model": "qwen2.5-coder:7b",
22
+ "role": "Extracts structured data from unstructured text (entities, fields, values)",
23
+ "strengths": ["JSON output", "Multi-field extraction", "Table parsing"],
24
+ "best_for": ["Invoice processing", "Form extraction", "Data normalization"],
25
+ "experience": {
26
+ "total_executions": 120,
27
+ "success_rate": 0.94,
28
+ "avg_latency_ms": 1200,
29
+ "learned_patterns": [
30
+ "Invoice fields: monto, fecha, proveedor, RFC, numero_factura",
31
+ "Email fields: from, subject, body_summary, intent, urgency",
32
+ "Nested JSON extraction works better with explicit schema in prompt",
33
+ ],
34
+ },
35
+ },
36
+ "SummarizerAgent": {
37
+ "model": "llama3.1:8b",
38
+ "role": "Generates concise summaries of documents and conversations",
39
+ "strengths": ["Natural language", "Multi-document", "Executive summaries"],
40
+ "best_for": ["Report generation", "Email summaries", "Meeting notes"],
41
+ "experience": {
42
+ "total_executions": 100,
43
+ "success_rate": 0.96,
44
+ "avg_latency_ms": 1500,
45
+ "learned_patterns": [
46
+ "Executive summaries: 3-5 bullet points max",
47
+ "Report summaries: include KPIs, trends, anomalies",
48
+ "Multi-doc: synthesize common themes across documents",
49
+ ],
50
+ },
51
+ },
52
+ "AnalyzerAgent": {
53
+ "model": "qwen2.5-coder:7b",
54
+ "role": "Analyzes data for trends, anomalies, and patterns",
55
+ "strengths": ["Trend detection", "Anomaly flagging", "Statistical analysis"],
56
+ "best_for": ["Financial analysis", "Performance metrics", "Data quality"],
57
+ "experience": {
58
+ "total_executions": 80,
59
+ "success_rate": 0.92,
60
+ "avg_latency_ms": 1800,
61
+ "learned_patterns": [
62
+ "Financial: flag transactions > 2 std deviations from mean",
63
+ "Trends: compare current period vs previous period",
64
+ "Quality: check for nulls, duplicates, format inconsistencies",
65
+ ],
66
+ },
67
+ },
68
+ "SentimentAgent": {
69
+ "model": "gemma3:4b",
70
+ "role": "Analyzes emotional tone and urgency in text",
71
+ "strengths": ["Fast sentiment", "Urgency scoring", "Tone detection"],
72
+ "best_for": ["Customer feedback", "Ticket prioritization", "Social monitoring"],
73
+ "experience": {
74
+ "total_executions": 90,
75
+ "success_rate": 0.95,
76
+ "avg_latency_ms": 600,
77
+ "learned_patterns": [
78
+ "Urgency scale: critical (immediate), high (same day), medium (48h), low (week)",
79
+ "Negative sentiment + high urgency = escalate immediately",
80
+ "Sarcasm detection improves with context from previous messages",
81
+ ],
82
+ },
83
+ },
84
+ "ValidatorAgent": {
85
+ "model": "qwen2.5-coder:7b",
86
+ "role": "Validates data quality, consistency, and business rules",
87
+ "strengths": ["Rule-based validation", "Duplicate detection", "Format checking"],
88
+ "best_for": ["Invoice validation", "Data quality", "Compliance checks"],
89
+ "experience": {
90
+ "total_executions": 70,
91
+ "success_rate": 0.98,
92
+ "avg_latency_ms": 900,
93
+ "learned_patterns": [
94
+ "Duplicate detection: compare hash of key fields (monto+fecha+proveedor)",
95
+ "Format validation: RFC pattern, email format, phone format",
96
+ "Business rules: montos > threshold require approval flag",
97
+ ],
98
+ },
99
+ },
100
+ "ReporterAgent": {
101
+ "model": "llama3.1:8b",
102
+ "role": "Generates formatted reports from analyzed data",
103
+ "strengths": ["Executive reports", "HTML/PDF formatting", "KPI dashboards"],
104
+ "best_for": ["Weekly reports", "Financial summaries", "Status updates"],
105
+ "experience": {
106
+ "total_executions": 60,
107
+ "success_rate": 0.93,
108
+ "avg_latency_ms": 2000,
109
+ "learned_patterns": [
110
+ "Executive report structure: summary → KPIs → details → recommendations",
111
+ "Include visual indicators: arrows for trends, colors for status",
112
+ "Always include period comparison (vs previous week/month)",
113
+ ],
114
+ },
115
+ },
116
+ "ComplianceAgent": {
117
+ "model": "claude",
118
+ "role": "Checks regulatory rules and flags violations",
119
+ "strengths": ["Regulatory knowledge", "Risk assessment", "Audit trail"],
120
+ "best_for": ["Financial compliance", "Data privacy", "Policy enforcement"],
121
+ "experience": {
122
+ "total_executions": 40,
123
+ "success_rate": 0.99,
124
+ "avg_latency_ms": 3000,
125
+ "learned_patterns": [
126
+ "Transactions > $10,000 require human approval (anti-money laundering)",
127
+ "PII detection: mask SSN, credit cards, addresses in outputs",
128
+ "Compliance reports must include timestamp, auditor, and decision rationale",
129
+ ],
130
+ },
131
+ },
132
+ "OCRAgent": {
133
+ "model": "qwen2.5-coder:7b",
134
+ "role": "Extracts text from images and scanned documents",
135
+ "strengths": ["PDF parsing", "Image text extraction", "Table recognition"],
136
+ "best_for": ["Invoice scanning", "Document digitization", "Receipt processing"],
137
+ "experience": {
138
+ "total_executions": 50,
139
+ "success_rate": 0.88,
140
+ "avg_latency_ms": 2500,
141
+ "learned_patterns": [
142
+ "PDF text extraction > image OCR for quality",
143
+ "Tables: extract as structured JSON, not plain text",
144
+ "Low-quality scans: preprocess with contrast enhancement",
145
+ ],
146
+ },
147
+ },
148
+ "JudgeAgent": {
149
+ "model": "qwen2.5-coder:7b",
150
+ "role": "Evaluates and scores outputs from multiple agents using Weighted Borda Count",
151
+ "strengths": ["Consensus evaluation", "Multi-criteria scoring", "Conflict resolution"],
152
+ "best_for": ["Second opinions", "Quality assurance", "Multi-agent voting"],
153
+ "experience": {
154
+ "total_executions": 30,
155
+ "success_rate": 0.95,
156
+ "avg_latency_ms": 1000,
157
+ "learned_patterns": [
158
+ "Weighted Borda Count works best with 3+ agent outputs",
159
+ "Criteria weights: accuracy (0.4), completeness (0.3), relevance (0.3)",
160
+ "Tie-breaking: prefer the agent with higher historical success rate",
161
+ ],
162
+ },
163
+ },
164
+ }
165
+
166
+ # Model routing map
167
+ AGENT_MODEL_MAP = {
168
+ # Gemma3:4b — fast classification/routing
169
+ "ClassifierAgent": "gemma3:4b",
170
+ "RouterAgent": "gemma3:4b",
171
+ "SentimentAgent": "gemma3:4b",
172
+ "MonitorAgent": "gemma3:4b",
173
+ # Qwen2.5-coder:7b — structured output/code
174
+ "ExtractorAgent": "qwen2.5-coder:7b",
175
+ "AnalyzerAgent": "qwen2.5-coder:7b",
176
+ "ValidatorAgent": "qwen2.5-coder:7b",
177
+ "NormalizerAgent": "qwen2.5-coder:7b",
178
+ "OCRAgent": "qwen2.5-coder:7b",
179
+ "JudgeAgent": "qwen2.5-coder:7b",
180
+ "SchedulerAgent": "qwen2.5-coder:7b",
181
+ "WebhookAgent": "qwen2.5-coder:7b",
182
+ # Llama3.1:8b — natural language/synthesis
183
+ "SummarizerAgent": "llama3.1:8b",
184
+ "ReporterAgent": "llama3.1:8b",
185
+ "TranslatorAgent": "llama3.1:8b",
186
+ "EnricherAgent": "llama3.1:8b",
187
+ "KnowledgeAgent": "llama3.1:8b",
188
+ "ResearcherAgent": "llama3.1:8b",
189
+ "ScraperAgent": "llama3.1:8b",
190
+ # Cloud-only
191
+ "ComplianceAgent": "claude",
192
+ "PlannerAgent": "groq",
193
+ "CriticAgent": "groq",
194
+ }
195
+
196
+ # Swarm topologies
197
+ TOPOLOGIES = {
198
+ "sequential": "Pipeline ordered step by step — each agent depends on the previous",
199
+ "parallel": "Multiple agents working simultaneously — for independent tasks",
200
+ "hierarchical": "Supervisor coordinates sub-tasks — for complex orchestration",
201
+ "debate": "Agents argue to improve results — for quality-critical analysis",
202
+ "consensus": "Multiple agents vote on decisions — for high-stakes choices",
203
+ "adaptive": "Selects topology automatically based on input complexity",
204
+ }
205
+
206
+ LLM_FALLBACK_CHAIN = [
207
+ {"provider": "ollama", "model": "deepseek-r1:8b", "role": "Chat + thinking", "cost": "free"},
208
+ {"provider": "groq", "model": "llama-3.3-70b-versatile", "role": "Cloud fallback", "cost": "free"},
209
+ {"provider": "anthropic", "model": "claude-sonnet-4", "role": "Last resort", "cost": "paid"},
210
+ ]
@@ -0,0 +1,223 @@
1
+ """Kiro specs generator — creates steering, memory, and hooks for NexusForge projects."""
2
+
3
+ import os
4
+ import json
5
+
6
+ # ── Kiro Steering Rules ─────────────────────────────────────────────────────
7
+
8
+ STEERING_PRODUCT = """# Product Context — NexusForge AI
9
+
10
+ ## What is this?
11
+ Enterprise-grade AI Agent Orchestration Platform with:
12
+ - 24 specialized AI agents with dedicated local LLMs
13
+ - Chat-first interface with visible thinking (deepseek-r1)
14
+ - 6 swarm topologies for complex workflows
15
+ - Google OAuth + Stripe billing
16
+ - 12 integrations (Email, Slack, Notion, Drive, Sheets, Gmail, Webhook, API)
17
+ - Orchestrator memory system (agents learn from experience)
18
+ - LLM fallback chain: local (Ollama) → Groq (free) → Claude (paid)
19
+
20
+ ## Target Users
21
+ - Business operators automating repetitive processes
22
+ - Teams processing tickets, invoices, emails, reports
23
+ - Enterprises needing compliance + audit trail
24
+
25
+ ## Key Principles
26
+ 1. Users should NEVER see technical details (JSON, agent names, topologies)
27
+ 2. The chat guides step by step — ONE question at a time
28
+ 3. Everything runs locally first (privacy), cloud as fallback
29
+ 4. Every agent execution is recorded as experience for future learning
30
+ """
31
+
32
+ STEERING_TECH = """# Tech Context — NexusForge AI
33
+
34
+ ## Stack
35
+ - Backend: Python 3.12, FastAPI, asyncpg, asyncio
36
+ - Frontend: React 18, Vite 8, vanilla CSS (no Tailwind)
37
+ - Database: PostgreSQL 16 + pgvector, MongoDB 7 (episodic memory)
38
+ - Cache: Redis 7
39
+ - LLM: Ollama (5 models), Groq API, Claude API
40
+ - Embeddings: Voyage AI (512d) + nomic-embed-text (local)
41
+ - Deploy: Vercel (frontend), Render (backend)
42
+
43
+ ## Architecture
44
+ - DAG-based workflow execution engine
45
+ - Per-agent model routing (gemma→routing, qwen→code, llama→language)
46
+ - 3-tier memory: working (Redis), episodic (MongoDB), semantic (pgvector)
47
+ - Circuit breaker pattern for agent health
48
+ - ExperienceCollector for agent learning
49
+ - OrchestratorMemory for system-wide knowledge
50
+
51
+ ## Conventions
52
+ - English for code, Spanish for user-facing strings
53
+ - Pydantic v2 for all models
54
+ - Type hints on all functions
55
+ - Never modify existing encapsulated components — add new ones alongside
56
+ """
57
+
58
+ STEERING_STRUCTURE = """# Project Structure
59
+
60
+ ```
61
+ backend/
62
+ app/
63
+ agents/ — 24 agent implementations (base.py, capabilities.py)
64
+ auth/ — JWT, OAuth, middleware, billing
65
+ config.py — Settings (Pydantic v2)
66
+ db/ — PostgreSQL client, migrations
67
+ engine/ — DAG execution, state machine, retry
68
+ healing/ — Circuit breaker, self-healing
69
+ integrations/ — Email, Slack, Notion, Drive, Gmail
70
+ llm/ — Router, providers (Ollama, Groq, Claude)
71
+ main.py — FastAPI app, middleware stack
72
+ memory/ — Manager, episodic, semantic, working, orchestrator
73
+ models/ — Pydantic schemas
74
+ rag/ — Embeddings, indexer, retriever
75
+ routes/ — API endpoints
76
+ swarms/ — Topologies (sequential, parallel, etc.)
77
+ tests/ — 260+ pytest tests
78
+
79
+ frontend/
80
+ src/
81
+ features/
82
+ chat-first/ — Main chat interface (ChatPanel, PreviewPanel)
83
+ chat/ — Floating guide chat (ChatAssistant)
84
+ dashboard/ — KPIs, metrics
85
+ automations/ — Automation management
86
+ wizard/ — Step-by-step wizard (legacy)
87
+ workflows/ — Workflow builder
88
+ shared/
89
+ components/ — Layout, Toast, ErrorBoundary
90
+ hooks/ — useLanguage, useToast, useRefreshOnFocus
91
+ i18n/ — Translations (es/en)
92
+ services/
93
+ api.js — fetchAPI, getApiUrl
94
+ ```
95
+ """
96
+
97
+ # ── Work Rules ───────────────────────────────────────────────────────────────
98
+
99
+ WORK_RULES = """# Work Rules — Christian Hernandez
100
+
101
+ ## Critical Rules (NEVER break these)
102
+ 1. NEVER use PUT on Render env-vars API without including ALL existing vars
103
+ 2. NEVER modify existing encapsulated components — add new ones alongside
104
+ 3. ALWAYS verify destructive API calls before executing
105
+ 4. ALWAYS update memory after mistakes and key decisions
106
+ 5. Quality over speed — do it right, not fast
107
+ 6. ALWAYS build locally and verify before pushing
108
+
109
+ ## Development Flow
110
+ 1. Read ALL affected files before changing anything
111
+ 2. Plan the change mentally before writing code
112
+ 3. Make the minimum change needed
113
+ 4. Build locally to verify (vite build, pytest)
114
+ 5. Commit with descriptive message
115
+ 6. Push and verify deploy
116
+
117
+ ## LLM Architecture
118
+ - deepseek-r1:8b → Chat interface (native thinking)
119
+ - qwen2.5-coder:7b → Code agents, JSON generation
120
+ - llama3.1:8b → Language agents, summaries, reports
121
+ - gemma3:4b → Fast classification, routing
122
+ - Groq → Cloud fallback (free, 24/7)
123
+ - Claude → Last resort (paid)
124
+
125
+ ## Render/Vercel Deploy
126
+ - Service ID: srv-d75b2575r7bs73b22gp0
127
+ - DB: dpg-d75b1cpr0fns73blrtdg-a (nexusforge-db)
128
+ - Frontend: nexusforge-two.vercel.app (auto-deploy from master)
129
+ - Backend: nexusforge-api.onrender.com (auto-deploy from master)
130
+ - PUT /env-vars REPLACES ALL — always GET first, then PUT full list
131
+
132
+ ## Agent Model Assignment
133
+ - Gemma3:4b → ClassifierAgent, RouterAgent, SentimentAgent, MonitorAgent
134
+ - Qwen2.5-coder:7b → ExtractorAgent, AnalyzerAgent, ValidatorAgent, OCRAgent, JudgeAgent
135
+ - Llama3.1:8b → SummarizerAgent, ReporterAgent, TranslatorAgent, ResearcherAgent
136
+ - Claude (cloud) → ComplianceAgent (regulatory, high-stakes)
137
+ - Groq (cloud) → PlannerAgent, CriticAgent (need large context)
138
+ """
139
+
140
+ # ── Kiro Hooks ───────────────────────────────────────────────────────────────
141
+
142
+ KIRO_HOOKS = {
143
+ "pre-commit-lint": {
144
+ "description": "Run linting before commit",
145
+ "trigger": "pre-commit",
146
+ "command": "cd backend && python -m py_compile app/main.py",
147
+ },
148
+ "test-on-agent-change": {
149
+ "description": "Run agent tests when agent files change",
150
+ "trigger": "on-save",
151
+ "pattern": "backend/app/agents/*.py",
152
+ "command": "cd backend && pytest tests/test_agents.py -x -q",
153
+ },
154
+ "build-check-frontend": {
155
+ "description": "Verify frontend builds on change",
156
+ "trigger": "on-save",
157
+ "pattern": "frontend/src/**/*.jsx",
158
+ "command": "cd frontend && npx vite build --mode production",
159
+ },
160
+ }
161
+
162
+ # ── Kiro Memory Templates ────────────────────────────────────────────────────
163
+
164
+ KIRO_MEMORY = {
165
+ "product_context": STEERING_PRODUCT,
166
+ "tech_context": STEERING_TECH,
167
+ "architecture_context": STEERING_STRUCTURE,
168
+ "known_risks": """# Known Risks
169
+ - Render PUT /env-vars replaces ALL variables (caused outage 2026-04-06)
170
+ - Ollama needs OLLAMA_ORIGINS=* and OLLAMA_HOST=0.0.0.0 for tunnel access
171
+ - deepseek-r1 thinking comes in message.thinking field, NOT <think> tags
172
+ - JSX files in React must use .jsx extension, not .js (Vite 8 strict)
173
+ - Frontend .env.local is NOT committed — env vars must be set in Vercel dashboard
174
+ - sessionStorage used for chat persistence (not localStorage to avoid cross-tab issues)
175
+ """,
176
+ "recent_decisions": """# Recent Decisions
177
+ - Chat-first dashboard as default view (inspired by Hercules.app)
178
+ - Two chat roles: builder (deepseek-r1) and guide (always Groq)
179
+ - Preview panel shows step progress during automation creation
180
+ - System prompt instructs AI to be friendly, ONE question at a time, no JSON
181
+ - Publish confirmation triggers real workflow + automation creation via API
182
+ """,
183
+ "active_workstream": """# Active Workstream
184
+ - Chat-first interface deployed and functional
185
+ - Thinking display shows compact real-time reasoning
186
+ - Chat history persists in sessionStorage
187
+ - Publish creates real automations via /wizard/generate + /workflows/ + /automations/
188
+ - Next: improve preview panel reactivity, add more quick actions
189
+ """,
190
+ }
191
+
192
+
193
+ def generate_kiro_specs(project_dir: str):
194
+ """Generate Kiro steering, memory, and hooks for a NexusForge project."""
195
+ kiro_dir = os.path.join(project_dir, ".kiro")
196
+ steering_dir = os.path.join(kiro_dir, "steering")
197
+ memory_dir = os.path.join(kiro_dir, "memory")
198
+ hooks_dir = os.path.join(kiro_dir, "hooks")
199
+
200
+ for d in [steering_dir, memory_dir, hooks_dir]:
201
+ os.makedirs(d, exist_ok=True)
202
+
203
+ # Steering files
204
+ with open(os.path.join(steering_dir, "product.md"), "w", encoding="utf-8") as f:
205
+ f.write(STEERING_PRODUCT)
206
+ with open(os.path.join(steering_dir, "tech.md"), "w", encoding="utf-8") as f:
207
+ f.write(STEERING_TECH)
208
+ with open(os.path.join(steering_dir, "structure.md"), "w", encoding="utf-8") as f:
209
+ f.write(STEERING_STRUCTURE)
210
+ with open(os.path.join(steering_dir, "work-rules.md"), "w", encoding="utf-8") as f:
211
+ f.write(WORK_RULES)
212
+
213
+ # Memory files
214
+ for name, content in KIRO_MEMORY.items():
215
+ with open(os.path.join(memory_dir, f"{name}.md"), "w", encoding="utf-8") as f:
216
+ f.write(content)
217
+
218
+ # Hooks
219
+ for name, hook in KIRO_HOOKS.items():
220
+ with open(os.path.join(hooks_dir, f"{name}.json"), "w", encoding="utf-8") as f:
221
+ json.dump(hook, f, indent=2)
222
+
223
+ return kiro_dir
nexusforge_cli/main.py ADDED
@@ -0,0 +1,342 @@
1
+ """NexusForge CLI — setup, configure, and manage your AI orchestration platform."""
2
+
3
+ import os
4
+ import sys
5
+ import json
6
+ import subprocess
7
+ import platform
8
+
9
+ import click
10
+ from rich.console import Console
11
+ from rich.panel import Panel
12
+ from rich.table import Table
13
+ from rich.progress import Progress, SpinnerColumn, TextColumn
14
+
15
+ console = Console()
16
+
17
+ REPO_URL = "https://github.com/christianescamilla15-cell/nexusforge-ai.git"
18
+ OLLAMA_MODELS = [
19
+ ("deepseek-r1:8b", "Chat inteligente (thinking nativo)", "4.9GB"),
20
+ ("qwen2.5-coder:7b", "Agentes de codigo/JSON", "4.7GB"),
21
+ ("llama3.1:8b", "Agentes de lenguaje", "4.9GB"),
22
+ ("gemma3:4b", "Clasificacion/routing rapido", "3.3GB"),
23
+ ("nomic-embed-text", "Embeddings RAG", "274MB"),
24
+ ]
25
+
26
+
27
+ def run(cmd, check=False, capture=True):
28
+ """Run a shell command."""
29
+ try:
30
+ result = subprocess.run(cmd, shell=True, capture_output=capture, text=True, timeout=600)
31
+ return result.returncode == 0, result.stdout.strip()
32
+ except Exception as e:
33
+ return False, str(e)
34
+
35
+
36
+ @click.group()
37
+ @click.version_option(version="1.0.0")
38
+ def cli():
39
+ """NexusForge AI — Enterprise Agent Orchestration Platform CLI."""
40
+ pass
41
+
42
+
43
+ @cli.command()
44
+ @click.option("--skip-models", is_flag=True, help="Skip downloading Ollama models")
45
+ @click.option("--skip-deps", is_flag=True, help="Skip installing project dependencies")
46
+ def setup(skip_models, skip_deps):
47
+ """Full environment setup — installs everything from scratch."""
48
+ console.print(Panel.fit(
49
+ "[bold cyan]NexusForge AI — Full Environment Setup[/bold cyan]\n"
50
+ "This will install Python deps, Node deps, Ollama, LLMs, and Kiro specs.",
51
+ border_style="cyan",
52
+ ))
53
+
54
+ # 1. Check prerequisites
55
+ console.print("\n[bold yellow][1/6] Checking prerequisites...[/bold yellow]")
56
+
57
+ ok, ver = run("python --version")
58
+ if ok:
59
+ console.print(f" [green]OK[/green] Python: {ver}")
60
+ else:
61
+ console.print(" [red]MISSING[/red] Python — install from python.org")
62
+ return
63
+
64
+ ok, ver = run("node --version")
65
+ if ok:
66
+ console.print(f" [green]OK[/green] Node.js: {ver}")
67
+ else:
68
+ console.print(" [yellow]WARN[/yellow] Node.js not found — frontend won't build")
69
+
70
+ ok, ver = run("git --version")
71
+ if ok:
72
+ console.print(f" [green]OK[/green] Git: {ver}")
73
+ else:
74
+ console.print(" [red]MISSING[/red] Git — install from git-scm.com")
75
+ return
76
+
77
+ # 2. Ollama
78
+ console.print("\n[bold yellow][2/6] Setting up Ollama...[/bold yellow]")
79
+ ok, _ = run("ollama --version")
80
+ if ok:
81
+ console.print(" [green]OK[/green] Ollama installed")
82
+ else:
83
+ console.print(" [yellow]WARN[/yellow] Ollama not installed — download from ollama.com")
84
+ console.print(" [dim]Local LLMs won't be available until Ollama is installed[/dim]")
85
+
86
+ # Set env vars for tunnel support
87
+ if platform.system() == "Windows":
88
+ os.system('setx OLLAMA_ORIGINS "*" >nul 2>&1')
89
+ os.system('setx OLLAMA_HOST "0.0.0.0" >nul 2>&1')
90
+ console.print(" [green]OK[/green] OLLAMA_ORIGINS=* and OLLAMA_HOST=0.0.0.0 set")
91
+
92
+ # 3. Pull models
93
+ if not skip_models:
94
+ console.print("\n[bold yellow][3/6] Pulling AI models...[/bold yellow]")
95
+ ok, _ = run("ollama --version")
96
+ if ok:
97
+ for model, desc, size in OLLAMA_MODELS:
98
+ console.print(f" [dim]Pulling {model} ({size}) — {desc}[/dim]")
99
+ run(f"ollama pull {model}")
100
+ console.print(f" [green]OK[/green] {model}")
101
+ else:
102
+ console.print(" [yellow]SKIP[/yellow] Ollama not available")
103
+ else:
104
+ console.print("\n[bold yellow][3/6] Skipping model downloads[/bold yellow]")
105
+
106
+ # 4. Clone/update repo
107
+ console.print("\n[bold yellow][4/6] Setting up NexusForge project...[/bold yellow]")
108
+ project_dir = os.path.join(os.path.expanduser("~"), "Desktop", "portafolio-completo", "proyectos", "07-nexusforge-ai")
109
+
110
+ if os.path.exists(os.path.join(project_dir, ".git")):
111
+ console.print(" [green]OK[/green] Repo exists — pulling latest")
112
+ run(f"cd {project_dir} && git pull origin master")
113
+ else:
114
+ console.print(f" [dim]Cloning to {project_dir}...[/dim]")
115
+ os.makedirs(os.path.dirname(project_dir), exist_ok=True)
116
+ run(f"git clone {REPO_URL} {project_dir}")
117
+ console.print(" [green]OK[/green] Repo cloned")
118
+
119
+ # 5. Install dependencies
120
+ if not skip_deps:
121
+ console.print("\n[bold yellow][5/6] Installing dependencies...[/bold yellow]")
122
+ backend_dir = os.path.join(project_dir, "backend")
123
+ frontend_dir = os.path.join(project_dir, "frontend")
124
+
125
+ if os.path.exists(os.path.join(backend_dir, "requirements.txt")):
126
+ console.print(" [dim]Installing backend deps...[/dim]")
127
+ run(f"cd {backend_dir} && pip install -r requirements.txt -q")
128
+ console.print(" [green]OK[/green] Backend dependencies")
129
+
130
+ if os.path.exists(os.path.join(frontend_dir, "package.json")):
131
+ console.print(" [dim]Installing frontend deps...[/dim]")
132
+ run(f"cd {frontend_dir} && npm install --silent")
133
+ console.print(" [green]OK[/green] Frontend dependencies")
134
+ else:
135
+ console.print("\n[bold yellow][5/6] Skipping dependency install[/bold yellow]")
136
+
137
+ # 6. Generate Kiro specs
138
+ console.print("\n[bold yellow][6/6] Generating Kiro specs + agent memory...[/bold yellow]")
139
+ from .kiro_specs import generate_kiro_specs
140
+ kiro_dir = generate_kiro_specs(project_dir)
141
+ console.print(f" [green]OK[/green] Kiro specs at {kiro_dir}")
142
+
143
+ # Also init AIOS if available
144
+ ok, _ = run("aios --version")
145
+ if ok:
146
+ run(f"cd {project_dir} && aios init")
147
+ console.print(" [green]OK[/green] AIOS initialized")
148
+
149
+ # Summary
150
+ console.print("\n")
151
+ console.print(Panel.fit(
152
+ "[bold green]Setup Complete![/bold green]\n\n"
153
+ "[cyan]Installed:[/cyan]\n"
154
+ " 5 local LLMs (deepseek-r1, qwen, llama, gemma, nomic)\n"
155
+ " Kiro steering + memory + hooks\n"
156
+ " Backend + frontend dependencies\n\n"
157
+ "[cyan]Next steps:[/cyan]\n"
158
+ " nexusforge status — Check everything is running\n"
159
+ " nexusforge dev — Start dev environment\n"
160
+ " nexusforge tunnel — Expose Ollama to Render\n"
161
+ " nexusforge agents — View agent profiles\n",
162
+ border_style="green",
163
+ ))
164
+
165
+
166
+ @cli.command()
167
+ def status():
168
+ """Check status of all NexusForge services."""
169
+ console.print(Panel.fit("[bold cyan]NexusForge AI — System Status[/bold cyan]", border_style="cyan"))
170
+
171
+ table = Table(show_header=True, header_style="bold")
172
+ table.add_column("Service", style="cyan")
173
+ table.add_column("Status")
174
+ table.add_column("Details", style="dim")
175
+
176
+ # Python
177
+ ok, ver = run("python --version")
178
+ table.add_row("Python", "[green]OK[/green]" if ok else "[red]MISSING[/red]", ver if ok else "Install python.org")
179
+
180
+ # Node
181
+ ok, ver = run("node --version")
182
+ table.add_row("Node.js", "[green]OK[/green]" if ok else "[yellow]WARN[/yellow]", ver if ok else "Install nodejs.org")
183
+
184
+ # Ollama
185
+ ok, _ = run("ollama --version")
186
+ table.add_row("Ollama", "[green]OK[/green]" if ok else "[red]DOWN[/red]", "Local LLM server")
187
+
188
+ # Models
189
+ if ok:
190
+ ok2, tags = run("ollama list")
191
+ if ok2:
192
+ model_count = len([l for l in tags.split("\n") if l.strip() and not l.startswith("NAME")])
193
+ table.add_row("Local Models", f"[green]{model_count}[/green]", f"{model_count} models loaded")
194
+
195
+ # Render backend
196
+ try:
197
+ import httpx
198
+ r = httpx.get("https://nexusforge-api.onrender.com/api/health", timeout=10)
199
+ data = r.json()
200
+ db = data.get("components", {}).get("database", "?")
201
+ table.add_row("Render Backend", "[green]OK[/green]", f"DB: {db}, Agents: {data.get('agent_count', '?')}")
202
+ except Exception:
203
+ table.add_row("Render Backend", "[yellow]SLEEPING[/yellow]", "Free tier — wakes on request")
204
+
205
+ # Vercel frontend
206
+ try:
207
+ import httpx
208
+ r = httpx.get("https://nexusforge-two.vercel.app/", timeout=10)
209
+ table.add_row("Vercel Frontend", "[green]OK[/green]" if r.status_code == 200 else "[red]DOWN[/red]", "nexusforge-two.vercel.app")
210
+ except Exception:
211
+ table.add_row("Vercel Frontend", "[yellow]UNKNOWN[/yellow]", "Could not check")
212
+
213
+ console.print(table)
214
+
215
+
216
+ @cli.command()
217
+ def agents():
218
+ """View all agent profiles with experience data."""
219
+ from .agents import AGENT_PROFILES
220
+
221
+ console.print(Panel.fit("[bold cyan]NexusForge AI — Agent Profiles[/bold cyan]", border_style="cyan"))
222
+
223
+ table = Table(show_header=True, header_style="bold")
224
+ table.add_column("Agent", style="cyan", width=20)
225
+ table.add_column("Model", style="yellow", width=18)
226
+ table.add_column("Role", width=35)
227
+ table.add_column("Success", justify="center", width=8)
228
+ table.add_column("Runs", justify="center", width=6)
229
+
230
+ for name, profile in AGENT_PROFILES.items():
231
+ exp = profile.get("experience", {})
232
+ rate = f"{exp.get('success_rate', 0) * 100:.0f}%"
233
+ runs = str(exp.get("total_executions", 0))
234
+ table.add_row(name, profile["model"], profile["role"][:35], rate, runs)
235
+
236
+ console.print(table)
237
+
238
+
239
+ @cli.command()
240
+ def models():
241
+ """Show LLM fallback chain and local model status."""
242
+ from .agents import LLM_FALLBACK_CHAIN
243
+
244
+ console.print(Panel.fit("[bold cyan]LLM Fallback Chain[/bold cyan]", border_style="cyan"))
245
+
246
+ table = Table(show_header=True, header_style="bold")
247
+ table.add_column("Priority", justify="center", width=8)
248
+ table.add_column("Provider", style="cyan", width=12)
249
+ table.add_column("Model", style="yellow", width=25)
250
+ table.add_column("Role", width=20)
251
+ table.add_column("Cost", width=6)
252
+
253
+ for i, entry in enumerate(LLM_FALLBACK_CHAIN, 1):
254
+ table.add_row(str(i), entry["provider"], entry["model"], entry["role"], entry["cost"])
255
+
256
+ console.print(table)
257
+
258
+
259
+ @cli.command()
260
+ def dev():
261
+ """Start local development environment (Ollama + backend + frontend)."""
262
+ console.print(Panel.fit("[bold cyan]Starting NexusForge Dev Environment[/bold cyan]", border_style="cyan"))
263
+
264
+ project_dir = os.path.join(os.path.expanduser("~"), "Desktop", "portafolio-completo", "proyectos", "07-nexusforge-ai")
265
+
266
+ if not os.path.exists(project_dir):
267
+ console.print("[red]Project not found. Run 'nexusforge setup' first.[/red]")
268
+ return
269
+
270
+ console.print(" [1] Starting Ollama...")
271
+ subprocess.Popen("ollama serve", shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
272
+
273
+ console.print(" [2] Starting backend (port 8000)...")
274
+ subprocess.Popen(
275
+ f"cd {project_dir}/backend && python -m uvicorn app.main:app --reload --port 8000",
276
+ shell=True,
277
+ )
278
+
279
+ console.print(" [3] Starting frontend (port 5173)...")
280
+ subprocess.Popen(
281
+ f"cd {project_dir}/frontend && npm run dev",
282
+ shell=True,
283
+ )
284
+
285
+ console.print("\n [green]NexusForge running![/green]")
286
+ console.print(" Frontend: http://localhost:5173")
287
+ console.print(" Backend: http://localhost:8000/docs")
288
+ console.print(" Ollama: http://localhost:11434")
289
+
290
+
291
+ @cli.command()
292
+ def tunnel():
293
+ """Start Cloudflare tunnel to expose local Ollama to Render."""
294
+ console.print(Panel.fit("[bold cyan]Ollama Tunnel[/bold cyan]", border_style="cyan"))
295
+
296
+ # Check Ollama
297
+ ok, _ = run("curl -s http://localhost:11434/api/tags")
298
+ if not ok:
299
+ console.print("[red]Ollama is not running. Start it first.[/red]")
300
+ return
301
+
302
+ # Find cloudflared
303
+ cf_path = os.path.join(os.path.expanduser("~"), "Desktop", "cloudflared.exe")
304
+ if not os.path.exists(cf_path):
305
+ console.print("[yellow]cloudflared not found. Downloading...[/yellow]")
306
+ run(f'curl -L -o "{cf_path}" https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-windows-amd64.exe')
307
+
308
+ console.print(" Starting tunnel to localhost:11434...")
309
+ console.print(" [dim]The tunnel URL will appear below. Copy it to Render as OLLAMA_TUNNEL_URL[/dim]")
310
+ os.system(f'"{cf_path}" tunnel --url http://localhost:11434')
311
+
312
+
313
+ @cli.command()
314
+ def kiro():
315
+ """Generate/refresh Kiro specs, steering, memory, and hooks."""
316
+ from .kiro_specs import generate_kiro_specs
317
+
318
+ project_dir = os.path.join(os.path.expanduser("~"), "Desktop", "portafolio-completo", "proyectos", "07-nexusforge-ai")
319
+
320
+ if not os.path.exists(project_dir):
321
+ console.print("[red]Project not found. Run 'nexusforge setup' first.[/red]")
322
+ return
323
+
324
+ kiro_dir = generate_kiro_specs(project_dir)
325
+ console.print(f"[green]Kiro specs generated at {kiro_dir}[/green]")
326
+ console.print(" steering/product.md — Product context")
327
+ console.print(" steering/tech.md — Tech stack")
328
+ console.print(" steering/structure.md — Project structure")
329
+ console.print(" steering/work-rules.md — Work rules & deployment")
330
+ console.print(" memory/ — Agent experience, risks, decisions")
331
+ console.print(" hooks/ — Pre-commit, test-on-change, build-check")
332
+
333
+
334
+ @cli.command()
335
+ def rules():
336
+ """Show work rules and critical guidelines."""
337
+ from .kiro_specs import WORK_RULES
338
+ console.print(Panel(WORK_RULES, title="[bold]Work Rules[/bold]", border_style="yellow"))
339
+
340
+
341
+ if __name__ == "__main__":
342
+ cli()
@@ -0,0 +1,69 @@
1
+ Metadata-Version: 2.4
2
+ Name: nexusforge-cli
3
+ Version: 1.0.0
4
+ Summary: NexusForge AI — Full stack setup CLI. Install local LLMs, agents, and orchestration in one command.
5
+ Author-email: Christian Hernandez <christianescamilla15@gmail.com>
6
+ License: MIT
7
+ Project-URL: Homepage, https://github.com/christianescamilla15-cell/nexusforge-ai
8
+ Project-URL: Demo, https://nexusforge-two.vercel.app
9
+ Keywords: ai,agents,llm,orchestration,nexusforge,ollama,automation
10
+ Classifier: Development Status :: 4 - Beta
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: Programming Language :: Python :: 3.12
15
+ Requires-Python: >=3.10
16
+ Description-Content-Type: text/markdown
17
+ Requires-Dist: httpx>=0.25.0
18
+ Requires-Dist: rich>=13.0.0
19
+ Requires-Dist: click>=8.0.0
20
+
21
+ # NexusForge CLI
22
+
23
+ Enterprise AI Agent Orchestration Platform — setup and management CLI.
24
+
25
+ ## Install
26
+
27
+ ```bash
28
+ pip install nexusforge-cli
29
+ ```
30
+
31
+ ## Commands
32
+
33
+ ```bash
34
+ nexusforge setup # Full environment setup (Python, Node, Ollama, LLMs, Kiro)
35
+ nexusforge status # Check all services status
36
+ nexusforge dev # Start local dev environment
37
+ nexusforge tunnel # Expose Ollama to Render via Cloudflare
38
+ nexusforge agents # View 24 agent profiles with experience
39
+ nexusforge models # Show LLM fallback chain
40
+ nexusforge kiro # Generate/refresh Kiro specs
41
+ nexusforge rules # Show work rules and guidelines
42
+ ```
43
+
44
+ ## What `nexusforge setup` installs
45
+
46
+ | Component | Purpose |
47
+ |-----------|---------|
48
+ | deepseek-r1:8b | Chat with visible thinking |
49
+ | qwen2.5-coder:7b | Code/JSON agents |
50
+ | llama3.1:8b | Language agents |
51
+ | gemma3:4b | Fast classification |
52
+ | nomic-embed-text | RAG embeddings |
53
+ | Kiro specs | Steering, memory, hooks |
54
+ | Backend deps | FastAPI, asyncpg, etc. |
55
+ | Frontend deps | React, Vite |
56
+
57
+ ## Architecture
58
+
59
+ ```
60
+ Local: Ollama (5 LLMs) + FastAPI + React
61
+ Cloud: Vercel + Render + PostgreSQL + Redis
62
+ LLM: deepseek-r1 → Groq (free) → Claude (paid)
63
+ ```
64
+
65
+ ## Links
66
+
67
+ - Demo: https://nexusforge-two.vercel.app
68
+ - API: https://nexusforge-api.onrender.com
69
+ - Repo: https://github.com/christianescamilla15-cell/nexusforge-ai
@@ -0,0 +1,9 @@
1
+ nexusforge_cli/__init__.py,sha256=XURh-7V4pCpqAT_wgDYxSTeyjWMAZ-CAwsSKb00vEC0,88
2
+ nexusforge_cli/agents.py,sha256=rtKRm_BEc9g_LILeUhnKpL5fTNklbahck5rm7ymes_I,9371
3
+ nexusforge_cli/kiro_specs.py,sha256=7XYSvJ2PDqRo4m-wZf8XtrdLJRbSyLUPUsFw1d0y2cY,9477
4
+ nexusforge_cli/main.py,sha256=U5hY3Z3d0AJfh8bosS6ZGuq5AlpwqsIw5V5POsrMhw0,13438
5
+ nexusforge_cli-1.0.0.dist-info/METADATA,sha256=rqaGpb35qCJPDVewjN7WOEcrFogWa5HEV8zHX3JAtzY,2320
6
+ nexusforge_cli-1.0.0.dist-info/WHEEL,sha256=aeYiig01lYGDzBgS8HxWXOg3uV61G9ijOsup-k9o1sk,91
7
+ nexusforge_cli-1.0.0.dist-info/entry_points.txt,sha256=2jLrJ5K2LdWeja-1Um6js8Qpadmf8ILxNNWR0NOLqW8,55
8
+ nexusforge_cli-1.0.0.dist-info/top_level.txt,sha256=n1z0-BSxZKF2EpO3gyEda5YlMWgykIut4u92SFA9Dpo,15
9
+ nexusforge_cli-1.0.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (82.0.1)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ nexusforge = nexusforge_cli.main:cli
@@ -0,0 +1 @@
1
+ nexusforge_cli