archo 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
archo-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,11 @@
1
+ Metadata-Version: 2.4
2
+ Name: archo
3
+ Version: 0.1.0
4
+ Requires-Dist: anthropic>=0.84.0
5
+ Requires-Dist: python-dotenv>=1.2.2
6
+ Requires-Dist: pydantic>=2.0
7
+ Requires-Dist: typer>=0.24.0
8
+ Requires-Dist: platformdirs>=4.9.4
9
+ Requires-Dist: tomli_w>=1.2.0
10
+ Provides-Extra: openai
11
+ Requires-Dist: openai>=1.0; extra == "openai"
archo-0.1.0/README.md ADDED
@@ -0,0 +1,88 @@
1
+ # archo
2
+
3
+ Auto-generate architecture docs from your codebase. Always in sync.
4
+
5
+ `archo` scans your project, sends the source files to an LLM, and generates an `ARCHO.md` file with a Mermaid architecture diagram and a structured breakdown of routes, dependencies, databases, and pub/sub topics.
6
+
7
+ ```
8
+ $ archo
9
+ Scanning repo: .
10
+
11
+ Collected (12 files):
12
+ + src/app.py
13
+ + src/routes/users.py
14
+ + src/services/notification.py
15
+ ...
16
+
17
+ The following files will be sent to anthropic: [...]
18
+ Would you like to continue? [y/n]: y
19
+
20
+ Generating ARCHO.md...
21
+ Done — ARCHO.md written.
22
+ ```
23
+
24
+ ## Install
25
+
26
+ ```bash
27
+ pip install archo
28
+ ```
29
+
30
+ For OpenAI support:
31
+
32
+ ```bash
33
+ pip install archo[openai]
34
+ ```
35
+
36
+ ## Quick start
37
+
38
+ Run `archo` in any project directory:
39
+
40
+ ```bash
41
+ cd your-project
42
+ archo
43
+ ```
44
+
45
+ On first run, you'll be prompted to configure your LLM provider, model, and API key. Config is stored in your system config directory (e.g. `~/.config/archo/config.toml`).
46
+
47
+ ## Supported providers
48
+
49
+ | Provider | Models | Notes |
50
+ |----------|-------------------------|-------|
51
+ | Anthropic | claude-sonnet-4-6, etc. | Default provider |
52
+ | OpenAI | gpt-5.4, etc. | Requires `pip install archo[openai]` |
53
+ | Ollama | Any local model | Requires a running Ollama instance |
54
+
55
+ ## Commands
56
+
57
+ ```bash
58
+ archo # Generate architecture docs for current directory
59
+ archo init # First-time setup (provider, model, API key)
60
+ archo provider # View or change LLM provider
61
+ archo model # View or change model
62
+ archo api-key # View or change API key
63
+ archo llm-host # View or change Ollama host URL
64
+ ```
65
+
66
+ ## What it generates
67
+
68
+ `archo` creates an `ARCHO.md` file containing:
69
+
70
+ - **Mermaid diagram** — visual map of your service, its routes, outbound calls, databases, and pub/sub topics
71
+ - **Routes** — all HTTP endpoints with methods and paths
72
+ - **Outbound calls** — services your code talks to and how (REST, gRPC, SDK, etc.)
73
+ - **Pub/Sub** — topics your service publishes to and subscribes from
74
+ - **Databases** — database types and their collections/tables
75
+
76
+ ## Security
77
+
78
+ `archo` never sends sensitive files to any LLM provider. The following are always excluded:
79
+
80
+ - `.env`, `.env.production`, `.env.prod`
81
+ - `*.pem`, `*.key`, `*.p12`
82
+ - `secrets.yaml`, `credentials.json`
83
+
84
+ Large files (>50KB), lock files, logs, and common non-source directories (`node_modules`, `.venv`, `__pycache__`, etc.) are also skipped.
85
+
86
+ ## License
87
+
88
+ MIT
@@ -0,0 +1,24 @@
1
+ [build-system]
2
+ requires = ["setuptools"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "archo"
7
+ version = "0.1.0"
8
+ dependencies = [
9
+ "anthropic>=0.84.0",
10
+ "python-dotenv>=1.2.2",
11
+ "pydantic>=2.0",
12
+ "typer>=0.24.0",
13
+ "platformdirs>=4.9.4",
14
+ "tomli_w>=1.2.0"
15
+ ]
16
+
17
+ [project.optional-dependencies]
18
+ openai = ["openai>=1.0"]
19
+
20
+ [tool.setuptools.packages.find]
21
+ where = ["src"]
22
+
23
+ [project.scripts]
24
+ archo = "archo.main:app"
archo-0.1.0/setup.cfg ADDED
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
File without changes
File without changes
@@ -0,0 +1,91 @@
1
+ import os
2
+
3
+ BLOCKED_FILES = {
4
+ ".env",
5
+ ".env.production",
6
+ ".env.prod",
7
+ "secrets.yaml",
8
+ "credentials.json",
9
+ ".gitignore",
10
+ }
11
+
12
+ BLOCKED_EXTENSIONS = {".pem", ".key", ".p12"}
13
+
14
+ SKIP_DIRS = {
15
+ "tests", "test", "__pycache__", "node_modules",
16
+ "dist", "build", ".venv", "venv", "migrations", ".git",
17
+ }
18
+
19
+ SKIP_EXTENSIONS = {".lock", ".log", ".md", ".txt", ".d.ts"}
20
+
21
+ MAX_FILE_SIZE = 50 * 1024 # 50kb
22
+
23
+
24
+ def is_blocked(file_path: str) -> bool:
25
+ """Return True if this file must never be sent."""
26
+ name = os.path.basename(file_path)
27
+ _, ext = os.path.splitext(name)
28
+ return name in BLOCKED_FILES or ext in BLOCKED_EXTENSIONS
29
+
30
+
31
+ def collect_files(repo_path: str = ".") -> tuple[str, list[str]]:
32
+ """Walk repo_path, collect relevant files, and return a single bundled string."""
33
+ print(f"Scanning repo: {repo_path}\n")
34
+ bundle = []
35
+ collected_paths = []
36
+ skipped = []
37
+
38
+ for root, dirs, files in os.walk(repo_path):
39
+ dirs[:] = [
40
+ d for d in dirs
41
+ if d not in SKIP_DIRS and not d.startswith(".")
42
+ ]
43
+
44
+ for filename in sorted(files):
45
+ full_path = os.path.join(root, filename)
46
+ rel_path = os.path.relpath(full_path, repo_path)
47
+ _, ext = os.path.splitext(filename)
48
+
49
+ if is_blocked(full_path):
50
+ skipped.append((rel_path, "blocked"))
51
+ continue
52
+
53
+ if ext in SKIP_EXTENSIONS:
54
+ skipped.append((rel_path, "skipped extension"))
55
+ continue
56
+
57
+ try:
58
+ size = os.path.getsize(full_path)
59
+ except Exception as e:
60
+ print(f" Error checking size of {rel_path}: {e}")
61
+ continue
62
+
63
+ if size > MAX_FILE_SIZE:
64
+ skipped.append((rel_path, f"too large ({size // 1024}kb)"))
65
+ continue
66
+
67
+ try:
68
+ with open(full_path, "r", encoding="utf-8") as f:
69
+ content = f.read()
70
+ except Exception as e:
71
+ print(f" Error reading {rel_path}: {e}")
72
+ continue
73
+
74
+ bundle.append(f"### {rel_path}\n{content}")
75
+ collected_paths.append(rel_path)
76
+
77
+ # Summary
78
+ print(f"Collected ({len(bundle)} files):")
79
+ for path in collected_paths:
80
+ print(f" + {path}")
81
+
82
+ print(f"\nSkipped ({len(skipped)} files):")
83
+ for path, reason in skipped:
84
+ print(f" - {path} [{reason}]")
85
+
86
+ if not bundle:
87
+ print("\nNo files collected.")
88
+ return "", []
89
+
90
+ print(f"\nDone.")
91
+ return "\n\n".join(bundle), collected_paths
@@ -0,0 +1,88 @@
1
+ import os
2
+ import tomllib
3
+ import tomli_w
4
+ from platformdirs import user_config_dir
5
+
6
+ CONFIG_DIR = user_config_dir("archo")
7
+ CONFIG_PATH = os.path.join(CONFIG_DIR, "config.toml")
8
+
9
+ def config_exists() -> bool:
10
+ """Check if config file exists."""
11
+ return os.path.exists(CONFIG_PATH)
12
+
13
+ def setup_config(provider: str, model: str, api_key: str, host_url: str = None) -> None:
14
+ """Setup .archo/config file"""
15
+ if not config_exists():
16
+
17
+ os.makedirs(CONFIG_DIR, exist_ok=True)
18
+
19
+ with open(CONFIG_PATH, "x") as f:
20
+ f.write("[llm-setup]\n")
21
+ f.write(f'LLM_PROVIDER = "{provider}"\n')
22
+ f.write(f'LLM_MODEL = "{model}"\n')
23
+ f.write(f'LLM_API_KEY= "{api_key}"\n')
24
+ if host_url:
25
+ f.write(f'OLLAMA_HOST= "{host_url}"\n')
26
+
27
+ def get_config() -> dict:
28
+ """Get .archo/config file"""
29
+ if config_exists():
30
+ with open(CONFIG_PATH, "rb") as config_file:
31
+ try:
32
+ config_dict = tomllib.load(config_file)
33
+ except tomllib.TOMLDecodeError:
34
+ raise ValueError("Config file is corrupted")
35
+
36
+ return config_dict
37
+ raise FileNotFoundError("Config file not found")
38
+
39
+ def update_config(config: dict) -> None:
40
+ """Update .archo/config file"""
41
+ with open(CONFIG_PATH, "wb") as config_file:
42
+ tomli_w.dump(config, config_file)
43
+
44
+ def get_model() -> str:
45
+ """Get current model from config file"""
46
+ config = get_config()
47
+ return config.get("llm-setup", {}).get("LLM_MODEL", "No model set")
48
+
49
+ def get_provider() -> str:
50
+ """Get current provider from config file"""
51
+ config = get_config()
52
+ return config.get("llm-setup", {}).get("LLM_PROVIDER", "No provider set")
53
+
54
+ def update_model(model: str) -> None:
55
+ """Update current model from config file"""
56
+ config = get_config()
57
+ config["llm-setup"]["LLM_MODEL"] = model
58
+ update_config(config)
59
+
60
+ def update_provider(provider: str) -> None:
61
+ """Update current provider from config file"""
62
+ config = get_config()
63
+ config["llm-setup"]["LLM_PROVIDER"] = provider
64
+ update_config(config)
65
+
66
+ def get_api_key() -> str:
67
+ """Get current API key from config file"""
68
+ config = get_config()
69
+ return config.get("llm-setup", {}).get("LLM_API_KEY", "No API key set")
70
+
71
+ def update_api_key(api_key: str) -> None:
72
+ """Update current API key in config file"""
73
+ config = get_config()
74
+ config["llm-setup"]["LLM_API_KEY"] = api_key
75
+ update_config(config)
76
+
77
+ def get_llm_host() -> str | None:
78
+ """Get current Ollama host URL from config file, or None if not set"""
79
+ config = get_config()
80
+ return config.get("llm-setup", {}).get("OLLAMA_HOST", None)
81
+
82
+ def update_llm_host(host_url: str) -> None:
83
+ """Update current Ollama host URL in config file"""
84
+ config = get_config()
85
+ config["llm-setup"]["OLLAMA_HOST"] = host_url
86
+ update_config(config)
87
+
88
+
@@ -0,0 +1 @@
1
+
@@ -0,0 +1,33 @@
1
+ import os
2
+
3
+ from archo.llm.providers.llm_base import LLMProvider
4
+ from archo.schemas.architecture import ArchitectureResponse
5
+
6
+
7
+ def build_prompt(file_bundle: str) -> str:
8
+ """Build the user prompt wrapping the collected file bundle."""
9
+ return f"Analyze these service files and return the architecture JSON:\n\n{file_bundle}"
10
+
11
+ def get_provider(provider: str, model, api_key_value: str = None, llm_host: str = None) -> LLMProvider:
12
+ """Return an instance of the configured provider."""
13
+ if provider == "anthropic":
14
+ from archo.llm.providers.anthropic import AnthropicProvider
15
+ return AnthropicProvider(api_key_value, model)
16
+ elif provider == "openai":
17
+ from archo.llm.providers.openai import OpenAIProvider
18
+ return OpenAIProvider(api_key_value, model)
19
+ elif provider == "ollama":
20
+ from archo.llm.providers.ollama import OllamaProvider
21
+ return OllamaProvider(model, llm_host)
22
+ else:
23
+ raise ValueError(f"Unknown LLM_PROVIDER '{provider}'. Supported: anthropic, openai, ollama")
24
+
25
+
26
+ def call_llm(file_bundle: str, provider: str, model: str, api_key_value: str = None, llm_host: str = None) -> ArchitectureResponse:
27
+ """Send the file bundle to the configured LLM provider and return parsed architecture JSON."""
28
+ provider = get_provider(provider, model, api_key_value, llm_host)
29
+ prompt = build_prompt(file_bundle)
30
+
31
+ architecture_response = provider.call(prompt)
32
+
33
+ return architecture_response
@@ -0,0 +1,21 @@
1
+ SYSTEM_PROMPT = """You are a code analysis assistant. Analyze the provided service files and extract architecture information.
2
+
3
+ Return ONLY valid JSON matching this exact schema — no markdown, no explanation, just JSON:
4
+
5
+ {
6
+ "service_name": string,
7
+ "language": string,
8
+ "framework": string,
9
+ "routes": [{ "method": string, "path": string }],
10
+ "outbound_calls": [{ "target_service": string, "via": string }],
11
+ "pubsub": { "subscribes": [string], "publishes": [string] },
12
+ "databases": [{ "type": string, "collections_or_tables": [string] }]
13
+ }
14
+
15
+ Rules for classification:
16
+ - "framework" refers to the primary web or application framework (e.g. Flask, FastAPI, Express, Spring). CLI libraries (e.g. Typer, Click, argparse), build tools, and utility packages are not frameworks — use "none" if no application framework is detected.
17
+ - "outbound_calls" includes ALL calls to services or APIs outside this codebase — internal services (e.g. orders-service), third-party APIs (e.g. Stripe, Twilio), and SDK-based integrations (e.g. boto3 for S3). Use "via" to describe how the call is made (HTTP, SDK, gRPC, etc.).
18
+ - "databases" refers only to actual database systems (e.g. PostgreSQL, MongoDB, Redis, DynamoDB). File system reads/writes and local config files are not databases — use an empty array if none are present.
19
+ - "pubsub" refers strictly to message broker patterns (e.g. Redis Pub/Sub channels, Kafka topics, RabbitMQ exchanges).
20
+ Do NOT classify Redis job queues (e.g. enqueue_job, arq, rq, celery) as pubsub — those belong in "databases" with type "redis".
21
+ """
File without changes
@@ -0,0 +1,41 @@
1
+ import anthropic as anthropic_sdk
2
+ from archo.llm.prompt import SYSTEM_PROMPT
3
+ from archo.llm.providers.llm_base import LLMProvider
4
+ from archo.schemas.architecture import ArchitectureResponse
5
+
6
+
7
+ class AnthropicProvider(LLMProvider):
8
+ def __init__(self, api_key: str, model: str):
9
+ self.api_key = api_key
10
+ self.client = anthropic_sdk.Anthropic(api_key=self.api_key)
11
+ self.model = model
12
+
13
+ def call(self, file_bundle: str) -> ArchitectureResponse:
14
+ """Send file bundle to Anthropic and return response text."""
15
+ print(f"Sending to Anthropic ({self.model})...")
16
+
17
+ try:
18
+ with self.client.messages.stream(
19
+ model=self.model,
20
+ max_tokens=4096,
21
+ system=SYSTEM_PROMPT,
22
+ messages=[{"role": "user", "content": file_bundle}],
23
+ output_format=ArchitectureResponse,
24
+ ) as stream:
25
+ response = stream.get_final_message()
26
+ except anthropic_sdk.AuthenticationError:
27
+ raise RuntimeError("Invalid API key — check LLM_API_KEY.")
28
+ except anthropic_sdk.APIConnectionError:
29
+ raise RuntimeError("Could not connect to Anthropic API — check your network.")
30
+ except anthropic_sdk.APIStatusError as e:
31
+ raise RuntimeError(f"Anthropic API error ({e.status_code}): {e.message}")
32
+
33
+ for block in response.content:
34
+ if block.type == "text":
35
+ try:
36
+ architecture_response = ArchitectureResponse.model_validate_json(block.text)
37
+ return architecture_response
38
+ except Exception as e:
39
+ raise RuntimeError(f"Failed to parse Anthropic response: {e}")
40
+
41
+ raise RuntimeError("Anthropic returned an empty response.")
@@ -0,0 +1,5 @@
1
+ from archo.schemas.architecture import ArchitectureResponse
2
+
3
+ class LLMProvider:
4
+ def call(self, file_bundle: str) -> ArchitectureResponse:
5
+ raise NotImplementedError
@@ -0,0 +1,48 @@
1
+ import json
2
+ import urllib.request
3
+ import urllib.error
4
+ from archo.llm.prompt import SYSTEM_PROMPT
5
+ from archo.llm.providers.llm_base import LLMProvider
6
+ from archo.schemas.architecture import ArchitectureResponse
7
+
8
+
9
+ class OllamaProvider(LLMProvider):
10
+ def __init__(self, model: str, ollama_host: str):
11
+ self.host = ollama_host
12
+ self.model = model
13
+
14
+ def call(self, file_bundle: str) -> ArchitectureResponse:
15
+ """Send file bundle to Ollama and return response text."""
16
+ url = f"{self.host}/api/chat"
17
+
18
+ payload = json.dumps({
19
+ "model": self.model,
20
+ "stream": False,
21
+ "format": "json",
22
+ "messages": [
23
+ {"role": "system", "content": SYSTEM_PROMPT},
24
+ {"role": "user", "content": file_bundle},
25
+ ],
26
+ }).encode("utf-8")
27
+
28
+ print(f"Sending to Ollama ({self.model} @ {self.host})...")
29
+
30
+ req = urllib.request.Request(url, data=payload, headers={"Content-Type": "application/json"})
31
+
32
+ try:
33
+ with urllib.request.urlopen(req) as resp:
34
+ body = json.loads(resp.read().decode("utf-8"))
35
+ except urllib.error.URLError as e:
36
+ raise RuntimeError(f"Could not connect to Ollama at {self.host} — is it running? ({e.reason})")
37
+ except json.JSONDecodeError:
38
+ raise RuntimeError("Ollama returned an invalid response.")
39
+
40
+ text = body.get("message", {}).get("content", "")
41
+ if not text:
42
+ raise RuntimeError("Ollama returned an empty response.")
43
+
44
+ try:
45
+ architecture_response = ArchitectureResponse.model_validate_json(text)
46
+ return architecture_response
47
+ except Exception as e:
48
+ raise RuntimeError(f"Failed to parse Ollama response: {e}")
@@ -0,0 +1,42 @@
1
+ from archo.llm.prompt import SYSTEM_PROMPT
2
+ from archo.llm.providers.llm_base import LLMProvider
3
+ from archo.schemas.architecture import ArchitectureResponse
4
+
5
+
6
+ class OpenAIProvider(LLMProvider):
7
+ def __init__(self, api_key: str, model: str):
8
+ try:
9
+ import openai
10
+ except ImportError:
11
+ raise ImportError("OpenAI provider requires the openai package. Install with: pip install archo[openai]")
12
+
13
+ self.api_key = api_key
14
+ self._openai_sdk = openai
15
+ self.client = openai.OpenAI(api_key=self.api_key)
16
+ self.model = model
17
+
18
+ def call(self, file_bundle: str) -> ArchitectureResponse:
19
+ """Send file bundle to OpenAI and return response text."""
20
+ print(f"Sending to OpenAI ({self.model})...")
21
+
22
+ try:
23
+ response = self.client.chat.completions.parse(
24
+ model=self.model,
25
+ max_completion_tokens=4096,
26
+ messages=[
27
+ {"role": "system", "content": SYSTEM_PROMPT},
28
+ {"role": "user", "content": file_bundle},
29
+ ],
30
+ response_format=ArchitectureResponse,
31
+ )
32
+ except self._openai_sdk.AuthenticationError:
33
+ raise RuntimeError("Invalid API key — check LLM_API_KEY.")
34
+ except self._openai_sdk.APIConnectionError:
35
+ raise RuntimeError("Could not connect to OpenAI API — check your network.")
36
+ except self._openai_sdk.APIStatusError as e:
37
+ raise RuntimeError(f"OpenAI API error ({e.status_code}): {str(e)}")
38
+
39
+ if not response.choices:
40
+ raise RuntimeError("OpenAI returned an empty response.")
41
+
42
+ return response.choices[0].message.parsed
@@ -0,0 +1,152 @@
1
+ import typer
2
+ from importlib.metadata import version
3
+
4
+ from archo.config import (
5
+ config_exists, setup_config, get_config,
6
+ get_provider, update_provider,
7
+ get_model, update_model,
8
+ get_api_key, update_api_key,
9
+ get_llm_host, update_llm_host,
10
+ )
11
+ from archo.llm.llm_client import call_llm
12
+ from archo.collector.file_collector import collect_files
13
+ from archo.output.output_generator import write_output
14
+
15
+ app = typer.Typer()
16
+
17
+ def version_callback(value: bool):
18
+ if value:
19
+ typer.echo(f"archo {version('archo')}")
20
+ raise typer.Exit()
21
+
22
+ @app.callback(invoke_without_command=True)
23
+ def main(
24
+ ctx: typer.Context,
25
+ version: bool = typer.Option(None, "--version", "-v", callback=version_callback, is_eager=True, help="Show version and exit."),
26
+ ):
27
+ """
28
+ Sets up archo for the first time if .archo/config doesn't exist
29
+ Reads in all the files in the current working directory ignoring sensitive files
30
+ Sends those files to be interpreted by the LLM of your choice and return a structured response
31
+ An ARCHO.md file is created with a high level overview of the application's architecture
32
+ """
33
+ if ctx.invoked_subcommand or version:
34
+ return
35
+
36
+ if not config_exists():
37
+ init()
38
+
39
+ file_string, file_list = collect_files()
40
+
41
+ if file_string:
42
+ provider_name = get_provider()
43
+
44
+ user_response = typer.prompt(f"The following files will be sent to {provider_name}: {file_list}\nWould you like to continue? [y/n]")
45
+
46
+ if user_response.lower() == "n" or user_response.lower() == "no":
47
+ typer.echo("Aborting.")
48
+ raise typer.Exit()
49
+
50
+ model_name = get_model()
51
+ llm_host_value = get_llm_host()
52
+ api_key_value = get_api_key()
53
+ result = call_llm(file_string, provider_name, model_name, api_key_value, llm_host_value)
54
+ write_output(result)
55
+
56
+ @app.command()
57
+ def init():
58
+ """
59
+ Sets up .archo/config for the first time.
60
+ Prompts user for provider, model and API Key
61
+ Prompts user OLLAMA_HOST if provider is ollama
62
+ Adds ./archo to .gitignore if it's not already there
63
+ """
64
+
65
+ if config_exists():
66
+ typer.echo("Config file already exists. Run archo --help for available commands.")
67
+ raise typer.Exit(1)
68
+
69
+ typer.echo("Welcome to Archo! Let's get you set up.")
70
+
71
+ llm_api_key = None
72
+ llm_host_url = None
73
+
74
+ llm_provider = typer.prompt("What LLM provider would you like to use? (anthropic, openai, ollama)", default="anthropic", show_default=True)
75
+ llm_model = typer.prompt("What model would you like to use? e.g. claude-sonnet-4-6, gpt-5.3-instant, or your own for ollama")
76
+
77
+ if llm_provider == "ollama":
78
+ llm_host_url = typer.prompt("Enter your LLM host URL")
79
+ else:
80
+ llm_api_key = typer.prompt("Enter your API Key. It must correspond to the provider you selected.", hide_input=True)
81
+
82
+ setup_config(llm_provider, llm_model, llm_api_key, llm_host_url)
83
+
84
+ @app.command()
85
+ def model():
86
+ """
87
+ Displays current model being used
88
+ Allows user to change their model
89
+ """
90
+ typer.echo(f"Current model: {get_model()}")
91
+ model_response = typer.prompt("Enter new model to switch or press enter to keep current", default="", show_default=False)
92
+ if model_response:
93
+ update_model(model_response)
94
+ typer.echo(f"Model updated to {model_response}")
95
+ else:
96
+ typer.echo("Model unchanged")
97
+
98
+ @app.command()
99
+ def provider():
100
+ """
101
+ Displays current provider being used
102
+ Allows user to change their provider
103
+ Allows user to change their model
104
+ """
105
+ typer.echo(f"Current provider: {get_provider()}")
106
+ provider_response = typer.prompt("Enter new provider to switch or press enter to keep current", default="", show_default=False)
107
+ if provider_response:
108
+ update_provider(provider_response)
109
+ typer.echo(f"Provider updated to {provider_response}")
110
+
111
+ if provider_response.lower() == "ollama":
112
+ typer.echo("Since you switched to Ollama, you'll need to set your LLM host URL.")
113
+ llm_host()
114
+
115
+ model_response = typer.prompt("Would you like to update your model as well? (y/n)", default="y",
116
+ show_default=True)
117
+ if model_response.lower() == "y" or model_response.lower() == "yes":
118
+ model()
119
+ else:
120
+ typer.echo("Provider unchanged")
121
+
122
+ @app.command()
123
+ def api_key():
124
+ """
125
+ Displays current API key being used
126
+ Allows user to change their API key
127
+ """
128
+ typer.echo(f"Current API key: {get_api_key()}")
129
+ key_response = typer.prompt("Enter new API key to switch or press enter to keep current", default="", show_default=False, hide_input=True)
130
+ if key_response:
131
+ update_api_key(key_response)
132
+ typer.echo("API key updated")
133
+ else:
134
+ typer.echo("API key unchanged")
135
+
136
+ @app.command()
137
+ def llm_host():
138
+ """
139
+ Displays current Ollama host URL
140
+ Allows user to change their Ollama host URL
141
+ """
142
+ current = get_llm_host()
143
+ if current is None:
144
+ typer.echo("No Ollama host set")
145
+ else:
146
+ typer.echo(f"Current Ollama host: {current}")
147
+ host_response = typer.prompt("Enter new Ollama host URL or press enter to keep current", default="", show_default=False)
148
+ if host_response:
149
+ update_llm_host(host_response)
150
+ typer.echo(f"Ollama host updated to {host_response}")
151
+ else:
152
+ typer.echo("Ollama host unchanged")
@@ -0,0 +1,57 @@
1
+ from archo.schemas.architecture import ArchitectureResponse
2
+
3
+
4
+ def generate_markdown(data: ArchitectureResponse) -> str:
5
+ """Convert architecture JSON into a readable ARCHO.md string."""
6
+ lines = []
7
+
8
+ lines.append(f"# {data.service_name}")
9
+ lines.append("")
10
+ lines.append(f"**Language:** {data.language} ")
11
+ if data.framework != "none":
12
+ lines.append(f"**Framework:** {data.framework} ")
13
+ lines.append("")
14
+
15
+ if data.routes:
16
+ lines.append("## Routes")
17
+ lines.append("")
18
+ for r in data.routes:
19
+ lines.append(f"- `{r.method}` {r.path}")
20
+ lines.append("")
21
+
22
+ outbound = data.outbound_calls or []
23
+ if outbound:
24
+ lines.append("## Outbound Calls")
25
+ lines.append("")
26
+ for call in outbound:
27
+ lines.append(f"- **{call.target_service}** via {call.via}")
28
+ lines.append("")
29
+
30
+ if data.pubsub:
31
+ subscribes = data.pubsub.subscribes
32
+ publishes = data.pubsub.publishes
33
+ if subscribes or publishes:
34
+ lines.append("## Pub/Sub")
35
+ lines.append("")
36
+ if subscribes:
37
+ lines.append("**Subscribes:**")
38
+ for topic in subscribes:
39
+ lines.append(f"- {topic}")
40
+ if publishes:
41
+ lines.append("**Publishes:**")
42
+ for topic in publishes:
43
+ lines.append(f"- {topic}")
44
+ lines.append("")
45
+
46
+ databases = data.databases or []
47
+ if databases:
48
+ lines.append("## Databases")
49
+ lines.append("")
50
+ for db in databases:
51
+ if db.collections_or_tables:
52
+ lines.append(f"**{db.type}:** {', '.join(db.collections_or_tables)}")
53
+ else:
54
+ lines.append(f"**{db.type}**")
55
+ lines.append("")
56
+
57
+ return "\n".join(lines)
@@ -0,0 +1,49 @@
1
+ from archo.schemas.architecture import ArchitectureResponse
2
+
3
+
4
+ def generate_mermaid(data: ArchitectureResponse) -> str:
5
+ """Convert architecture JSON into a high-level Mermaid diagram string."""
6
+ name = data.service_name
7
+ language = data.language
8
+ framework = data.framework
9
+ databases = data.databases or []
10
+ outbound = data.outbound_calls or []
11
+
12
+ lines = ["graph LR"]
13
+ framework_and_language = f"{framework} / {language}" if framework != "none" else language
14
+ lines.append(f' {name}["{name}<br>({framework_and_language})"]')
15
+ lines.append("")
16
+
17
+ for db in databases:
18
+ node_id = db.type.replace(" ", "_")
19
+ label = f"{db.type}<br>({', '.join(db.collections_or_tables)})" if db.collections_or_tables else db.type
20
+ lines.append(f' {node_id}[("{label}")]')
21
+
22
+ for call in outbound:
23
+ node_id = call.target_service.replace(" ", "_").replace("-", "_")
24
+ lines.append(f' {node_id}["{call.target_service}"]')
25
+
26
+ lines.append("")
27
+ # lines.append(f" client --> {name}")
28
+
29
+ for db in databases:
30
+ node_id = db.type.replace(" ", "_")
31
+ lines.append(f" {name} --> {node_id}")
32
+
33
+ for call in outbound:
34
+ node_id = call.target_service.replace(" ", "_").replace("-", "_")
35
+ lines.append(f" {name} --> {node_id}")
36
+
37
+ return "\n".join(lines) + "\n"
38
+
39
+
40
+ def write_mermaid(data: ArchitectureResponse, output_path: str = "ARCHITECTURE.mermaid") -> None:
41
+ """Write the Mermaid diagram to a file."""
42
+ print(f"Generating {output_path}...")
43
+ content = generate_mermaid(data)
44
+ try:
45
+ with open(output_path, "w", encoding="utf-8") as f:
46
+ f.write(content)
47
+ print(f"Done — {output_path} written.")
48
+ except Exception as e:
49
+ raise RuntimeError(f"Failed to write {output_path}: {e}")
@@ -0,0 +1,17 @@
1
+ from archo.output.mermaid_generator import generate_mermaid
2
+ from archo.output.markdown_generator import generate_markdown
3
+ from archo.schemas.architecture import ArchitectureResponse
4
+
5
+
6
+ def write_output(data: ArchitectureResponse, output_path: str = "ARCHO.md") -> None:
7
+ """Write the combined Mermaid diagram and markdown detail to a single file."""
8
+ print(f"Generating {output_path}...")
9
+ mermaid = generate_mermaid(data)
10
+ markdown = generate_markdown(data)
11
+ content = "```mermaid\n" + mermaid + "```\n\n" + markdown
12
+ try:
13
+ with open(output_path, "w", encoding="utf-8") as f:
14
+ f.write(content)
15
+ print(f"Done — {output_path} written.")
16
+ except Exception as e:
17
+ raise RuntimeError(f"Failed to write {output_path}: {e}")
@@ -0,0 +1,26 @@
1
+ from pydantic import BaseModel
2
+
3
+ class Route(BaseModel):
4
+ method: str
5
+ path: str
6
+
7
+ class OutboundCall(BaseModel):
8
+ target_service: str
9
+ via: str
10
+
11
+ class Pubsub(BaseModel):
12
+ subscribes: list[str]
13
+ publishes: list[str]
14
+
15
+ class Database(BaseModel):
16
+ type: str
17
+ collections_or_tables: list[str]
18
+
19
+ class ArchitectureResponse(BaseModel):
20
+ service_name: str
21
+ language: str
22
+ framework: str
23
+ routes: list[Route]
24
+ outbound_calls: list[OutboundCall] | None = None
25
+ pubsub: Pubsub | None = None
26
+ databases: list[Database] | None = None
@@ -0,0 +1,11 @@
1
+ Metadata-Version: 2.4
2
+ Name: archo
3
+ Version: 0.1.0
4
+ Requires-Dist: anthropic>=0.84.0
5
+ Requires-Dist: python-dotenv>=1.2.2
6
+ Requires-Dist: pydantic>=2.0
7
+ Requires-Dist: typer>=0.24.0
8
+ Requires-Dist: platformdirs>=4.9.4
9
+ Requires-Dist: tomli_w>=1.2.0
10
+ Provides-Extra: openai
11
+ Requires-Dist: openai>=1.0; extra == "openai"
@@ -0,0 +1,25 @@
1
+ README.md
2
+ pyproject.toml
3
+ src/archo/__init__.py
4
+ src/archo/config.py
5
+ src/archo/main.py
6
+ src/archo.egg-info/PKG-INFO
7
+ src/archo.egg-info/SOURCES.txt
8
+ src/archo.egg-info/dependency_links.txt
9
+ src/archo.egg-info/entry_points.txt
10
+ src/archo.egg-info/requires.txt
11
+ src/archo.egg-info/top_level.txt
12
+ src/archo/collector/__init__.py
13
+ src/archo/collector/file_collector.py
14
+ src/archo/llm/__init__.py
15
+ src/archo/llm/llm_client.py
16
+ src/archo/llm/prompt.py
17
+ src/archo/llm/providers/__init__.py
18
+ src/archo/llm/providers/anthropic.py
19
+ src/archo/llm/providers/llm_base.py
20
+ src/archo/llm/providers/ollama.py
21
+ src/archo/llm/providers/openai.py
22
+ src/archo/output/markdown_generator.py
23
+ src/archo/output/mermaid_generator.py
24
+ src/archo/output/output_generator.py
25
+ src/archo/schemas/architecture.py
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ archo = archo.main:app
@@ -0,0 +1,9 @@
1
+ anthropic>=0.84.0
2
+ python-dotenv>=1.2.2
3
+ pydantic>=2.0
4
+ typer>=0.24.0
5
+ platformdirs>=4.9.4
6
+ tomli_w>=1.2.0
7
+
8
+ [openai]
9
+ openai>=1.0
@@ -0,0 +1 @@
1
+ archo