ursaproxy 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ursaproxy/__init__.py ADDED
@@ -0,0 +1,203 @@
1
+ from datetime import datetime
2
+ from email.utils import parsedate_to_datetime
3
+
4
+ import httpx
5
+ from jinja2 import Environment, PackageLoader
6
+ from xitzin import NotFound, Request, Response, TemporaryFailure, Xitzin
7
+
8
+ from .cache import cache
9
+ from .config import settings
10
+ from .converter import extract_metadata, extract_slug, html_to_gemtext
11
+ from .fetcher import NotFoundError, ServerError, fetch_feed, fetch_html
12
+
13
+ app = Xitzin()
14
+
15
+ # Template environments
16
+ templates = Environment(
17
+ loader=PackageLoader("ursaproxy", "templates"),
18
+ autoescape=False, # Gemtext doesn't need HTML escaping
19
+ )
20
+
21
+ xml_templates = Environment(
22
+ loader=PackageLoader("ursaproxy", "templates"),
23
+ autoescape=True, # XML escaping for feed
24
+ )
25
+
26
+
27
+ @app.on_startup
28
+ async def startup() -> None:
29
+ """Initialize shared HTTP client."""
30
+ app.state.client = httpx.AsyncClient(timeout=30.0)
31
+
32
+
33
+ @app.on_shutdown
34
+ async def shutdown() -> None:
35
+ """Close HTTP client."""
36
+ await app.state.client.aclose()
37
+
38
+
39
+ async def _get_feed(client: httpx.AsyncClient):
40
+ """Fetch feed with caching and error handling."""
41
+ if cached := cache.get("feed", settings.cache_ttl_feed):
42
+ return cached
43
+
44
+ try:
45
+ feed = await fetch_feed(client)
46
+ cache.set("feed", feed)
47
+ return feed
48
+ except ServerError as e:
49
+ raise TemporaryFailure(str(e)) from e
50
+ except NotFoundError as e:
51
+ raise NotFound(str(e)) from e
52
+
53
+
54
+ async def _render_content(
55
+ client: httpx.AsyncClient,
56
+ slug: str,
57
+ content_type: str,
58
+ include_date: bool = True,
59
+ ) -> str:
60
+ """Fetch and render content as gemtext with caching."""
61
+ cache_key = f"{content_type}:{slug}"
62
+
63
+ if cached := cache.get(cache_key, settings.cache_ttl_post):
64
+ return cached
65
+
66
+ try:
67
+ html = await fetch_html(client, slug)
68
+ except NotFoundError as e:
69
+ raise NotFound(str(e)) from e
70
+ except ServerError as e:
71
+ raise TemporaryFailure(str(e)) from e
72
+
73
+ title, date = extract_metadata(html)
74
+ content = html_to_gemtext(html)
75
+
76
+ template = templates.get_template("post.gmi")
77
+ gemtext = template.render(
78
+ title=title,
79
+ date=date if include_date else None,
80
+ content=content,
81
+ web_url=f"{settings.bearblog_url}/{slug}/",
82
+ )
83
+
84
+ cache.set(cache_key, gemtext)
85
+ return gemtext
86
+
87
+
88
+ @app.gemini("/")
89
+ async def index(request: Request) -> str:
90
+ """Landing page with recent posts and page links."""
91
+ feed = await _get_feed(request.app.state.client)
92
+
93
+ posts = []
94
+ for entry in feed.entries[:10]:
95
+ link = getattr(entry, "link", None)
96
+ if not link:
97
+ continue
98
+ slug = extract_slug(link)
99
+ if not slug:
100
+ continue
101
+ date = entry.get("published", "")[:16] if entry.get("published") else ""
102
+ title = getattr(entry, "title", "Untitled")
103
+ posts.append({"slug": slug, "title": title, "date": date})
104
+
105
+ template = templates.get_template("index.gmi")
106
+ return template.render(
107
+ blog_name=settings.blog_name,
108
+ description=feed.feed.get("description", ""),
109
+ pages=settings.pages,
110
+ posts=posts,
111
+ )
112
+
113
+
114
+ @app.gemini("/post/{slug}")
115
+ async def post(request: Request, slug: str) -> str:
116
+ """Individual blog post."""
117
+ return await _render_content(
118
+ request.app.state.client, slug, "post", include_date=True
119
+ )
120
+
121
+
122
+ @app.gemini("/page/{slug}")
123
+ async def page(request: Request, slug: str) -> str:
124
+ """Static page (projects, now, etc.)."""
125
+ return await _render_content(
126
+ request.app.state.client, slug, "page", include_date=False
127
+ )
128
+
129
+
130
+ @app.gemini("/about")
131
+ async def about(request: Request) -> str:
132
+ """About page from feed metadata."""
133
+ feed = await _get_feed(request.app.state.client)
134
+ description = feed.feed.get("description", "A personal blog.")
135
+
136
+ template = templates.get_template("about.gmi")
137
+ return template.render(
138
+ blog_name=settings.blog_name,
139
+ description=description,
140
+ bearblog_url=settings.bearblog_url,
141
+ )
142
+
143
+
144
+ def _rfc822_to_iso(date_str: str) -> str:
145
+ """Convert RFC 822 date to ISO 8601 format for Atom feeds."""
146
+ if not date_str:
147
+ return datetime.now().isoformat() + "Z"
148
+ try:
149
+ dt = parsedate_to_datetime(date_str)
150
+ return dt.isoformat().replace("+00:00", "Z")
151
+ except (ValueError, TypeError):
152
+ return datetime.now().isoformat() + "Z"
153
+
154
+
155
+ @app.gemini("/feed")
156
+ async def feed(request: Request) -> Response:
157
+ """Atom feed with Gemini URLs."""
158
+ rss = await _get_feed(request.app.state.client)
159
+
160
+ # Use configured gemini_host or fall back to request hostname
161
+ host = settings.gemini_host or request.hostname or "localhost"
162
+ base_url = f"gemini://{host}"
163
+
164
+ # Get the most recent update time
165
+ updated = _rfc822_to_iso(rss.feed.get("updated", ""))
166
+
167
+ entries = []
168
+ for entry in rss.entries:
169
+ link = getattr(entry, "link", None)
170
+ if not link:
171
+ continue
172
+ slug = extract_slug(link)
173
+ if not slug:
174
+ continue
175
+
176
+ entries.append(
177
+ {
178
+ "title": getattr(entry, "title", "Untitled"),
179
+ "url": f"{base_url}/post/{slug}",
180
+ "published": _rfc822_to_iso(entry.get("published", "")),
181
+ "summary": getattr(entry, "description", ""),
182
+ }
183
+ )
184
+
185
+ template = xml_templates.get_template("feed.xml")
186
+ atom_xml = template.render(
187
+ blog_name=settings.blog_name,
188
+ base_url=base_url,
189
+ updated=updated,
190
+ entries=entries,
191
+ )
192
+
193
+ return Response(body=atom_xml, mime_type="application/atom+xml")
194
+
195
+
196
+ def main() -> None:
197
+ """Entry point."""
198
+ app.run(
199
+ host=settings.host,
200
+ port=settings.port,
201
+ certfile=settings.cert_file,
202
+ keyfile=settings.key_file,
203
+ )
ursaproxy/cache.py ADDED
@@ -0,0 +1,37 @@
1
+ from time import time
2
+ from typing import Any
3
+
4
+
5
+ class Cache:
6
+ """Simple TTL cache using dict + timestamps with size limit."""
7
+
8
+ def __init__(self, max_size: int = 1000) -> None:
9
+ self._data: dict[str, tuple[Any, float]] = {}
10
+ self._max_size = max_size
11
+
12
+ def get(self, key: str, ttl: int) -> Any | None:
13
+ """Get value if exists and not expired."""
14
+ entry = self._data.get(key)
15
+ if entry:
16
+ value, timestamp = entry
17
+ if time() - timestamp < ttl:
18
+ return value
19
+ # Use pop to avoid race conditions
20
+ self._data.pop(key, None)
21
+ return None
22
+
23
+ def set(self, key: str, value: Any) -> None:
24
+ """Set value with current timestamp."""
25
+ self._evict_if_full()
26
+ self._data[key] = (value, time())
27
+
28
+ def _evict_if_full(self) -> None:
29
+ """Remove oldest entries if cache is full."""
30
+ if len(self._data) >= self._max_size:
31
+ # Remove oldest 10% of entries
32
+ sorted_keys = sorted(self._data.keys(), key=lambda k: self._data[k][1])
33
+ for key in sorted_keys[: len(sorted_keys) // 10 or 1]:
34
+ self._data.pop(key, None)
35
+
36
+
37
+ cache = Cache()
ursaproxy/config.py ADDED
@@ -0,0 +1,39 @@
1
+ from pydantic import field_validator
2
+ from pydantic_settings import BaseSettings
3
+
4
+
5
+ class Settings(BaseSettings):
6
+ """Configuration from environment variables."""
7
+
8
+ # Required: the Bearblog URL to proxy
9
+ bearblog_url: str
10
+ blog_name: str
11
+
12
+ cache_ttl_feed: int = 300 # 5 minutes
13
+ cache_ttl_post: int = 1800 # 30 minutes
14
+
15
+ # Static pages (slug -> title) - pages not in RSS feed
16
+ # Override via PAGES='{"about": "About Me", "now": "Now"}'
17
+ pages: dict[str, str] = {}
18
+
19
+ # Gemini capsule hostname (for feed URLs)
20
+ # e.g., "gemini.example.com" -> gemini://gemini.example.com/post/...
21
+ gemini_host: str | None = None
22
+
23
+ # Server settings
24
+ host: str = "localhost"
25
+ port: int = 1965
26
+ cert_file: str | None = None
27
+ key_file: str | None = None
28
+
29
+ @field_validator("bearblog_url")
30
+ @classmethod
31
+ def normalize_url(cls, v: str) -> str:
32
+ """Remove trailing slash to prevent double slashes in URLs."""
33
+ v = v.rstrip("/")
34
+ if not v.startswith(("http://", "https://")):
35
+ raise ValueError("bearblog_url must start with http:// or https://")
36
+ return v
37
+
38
+
39
+ settings = Settings() # type: ignore[call-arg] # pydantic-settings reads from env
ursaproxy/converter.py ADDED
@@ -0,0 +1,78 @@
1
+ from bs4 import BeautifulSoup
2
+ from markdownify import markdownify
3
+ from md2gemini import md2gemini
4
+
5
+
6
+ def html_to_gemtext(html: str) -> str:
7
+ """
8
+ Convert Bearblog HTML to Gemtext.
9
+
10
+ Bearblog structure:
11
+ - Content is in <main> element
12
+ - Title is <h1> (extracted separately)
13
+ - Date is in <time> element
14
+ - Nav/footer should be stripped
15
+ """
16
+ soup = BeautifulSoup(html, "html.parser")
17
+
18
+ # Bearblog uses <main> for content, not <article>
19
+ main = soup.find("main")
20
+ if not main:
21
+ main = soup.body
22
+
23
+ if not main:
24
+ return ""
25
+
26
+ # Remove elements we don't want
27
+ for tag in main.find_all(["script", "style", "nav", "footer", "form"]):
28
+ tag.decompose()
29
+
30
+ # Remove the h1 (title handled separately)
31
+ if h1 := main.find("h1"):
32
+ h1.decompose()
33
+
34
+ # HTML -> Markdown -> Gemtext
35
+ markdown = markdownify(str(main), heading_style="ATX")
36
+ gemtext = md2gemini(markdown, links="paragraph", plain=True)
37
+
38
+ return gemtext.strip()
39
+
40
+
41
+ def extract_metadata(html: str) -> tuple[str, str]:
42
+ """
43
+ Extract title and date from Bearblog HTML.
44
+
45
+ Returns: (title, date_str)
46
+ """
47
+ soup = BeautifulSoup(html, "html.parser")
48
+
49
+ # Title is the first h1
50
+ h1 = soup.find("h1")
51
+ title = h1.get_text(strip=True) if h1 else "Untitled"
52
+
53
+ # Date is in <time datetime="2026-01-31">
54
+ time_el = soup.find("time")
55
+ if time_el and time_el.get("datetime"):
56
+ date_str = time_el["datetime"]
57
+ elif time_el:
58
+ date_str = time_el.get_text(strip=True)
59
+ else:
60
+ date_str = ""
61
+
62
+ return title, date_str
63
+
64
+
65
+ def extract_slug(url: str) -> str:
66
+ """
67
+ Extract slug from Bearblog URL.
68
+
69
+ Input: "https://alanbato.com/el-internetsito/"
70
+ Output: "el-internetsito"
71
+ """
72
+ if not url:
73
+ return ""
74
+ path = url.rstrip("/").split("/")[-1]
75
+ # If it looks like a domain (has a dot), there's no slug
76
+ if "." in path:
77
+ return ""
78
+ return path
ursaproxy/fetcher.py ADDED
@@ -0,0 +1,53 @@
1
+ import feedparser
2
+ import httpx
3
+
4
+ from .config import settings
5
+
6
+
7
+ class FetchError(Exception):
8
+ """Base error for fetch operations."""
9
+
10
+
11
+ class NotFoundError(FetchError):
12
+ """Resource not found (404)."""
13
+
14
+
15
+ class ServerError(FetchError):
16
+ """Server or network error."""
17
+
18
+
19
+ async def _fetch(
20
+ client: httpx.AsyncClient, url: str, not_found_msg: str
21
+ ) -> httpx.Response:
22
+ """Fetch URL with standardized error handling."""
23
+ try:
24
+ response = await client.get(url)
25
+ if response.status_code == 404:
26
+ raise NotFoundError(not_found_msg)
27
+ if response.status_code >= 500:
28
+ raise ServerError(f"Server error {response.status_code}")
29
+ if response.status_code >= 400:
30
+ raise ServerError(f"HTTP error {response.status_code}")
31
+ return response
32
+ except httpx.HTTPStatusError as e:
33
+ raise ServerError(f"HTTP error: {e}") from e
34
+ except httpx.RequestError as e:
35
+ raise ServerError(f"Network error: {e}") from e
36
+
37
+
38
+ async def fetch_feed(client: httpx.AsyncClient) -> feedparser.FeedParserDict:
39
+ """Fetch RSS feed from Bearblog."""
40
+ url = f"{settings.bearblog_url}/feed/?type=rss"
41
+ response = await _fetch(client, url, f"Feed not found at {url}")
42
+ return feedparser.parse(response.text)
43
+
44
+
45
+ async def fetch_html(client: httpx.AsyncClient, slug: str) -> str:
46
+ """
47
+ Fetch HTML page from Bearblog.
48
+
49
+ Note: Bearblog URLs have trailing slashes: /{slug}/
50
+ """
51
+ url = f"{settings.bearblog_url}/{slug}/"
52
+ response = await _fetch(client, url, f"Page not found: {slug}")
53
+ return response.text
@@ -0,0 +1,6 @@
1
+ # About {{ blog_name }}
2
+
3
+ {{ description }}
4
+
5
+ => / <- Back to index
6
+ => {{ bearblog_url }} Visit on the web
@@ -0,0 +1,18 @@
1
+ <?xml version="1.0" encoding="utf-8"?>
2
+ <feed xmlns="http://www.w3.org/2005/Atom">
3
+ <title>{{ blog_name }}</title>
4
+ <link href="{{ base_url }}/" rel="alternate"/>
5
+ <link href="{{ base_url }}/feed" rel="self"/>
6
+ <id>{{ base_url }}/</id>
7
+ <updated>{{ updated }}</updated>
8
+ {% for entry in entries %}
9
+ <entry>
10
+ <title>{{ entry.title }}</title>
11
+ <link href="{{ entry.url }}" rel="alternate"/>
12
+ <id>{{ entry.url }}</id>
13
+ <published>{{ entry.published }}</published>
14
+ <updated>{{ entry.published }}</updated>
15
+ <summary>{{ entry.summary }}</summary>
16
+ </entry>
17
+ {% endfor %}
18
+ </feed>
@@ -0,0 +1,15 @@
1
+ # {{ blog_name }}
2
+
3
+ {{ description }}
4
+
5
+ ## Pages
6
+ {% for slug, title in pages.items() %}
7
+ => /page/{{ slug }} {{ title }}
8
+ {% endfor %}
9
+
10
+ ## Recent Posts
11
+ => /feed Atom Feed
12
+
13
+ {% for post in posts %}
14
+ => /post/{{ post.slug }} {{ post.title }} ({{ post.date }})
15
+ {% endfor %}
@@ -0,0 +1,10 @@
1
+ # {{ title }}
2
+
3
+ {% if date %}Published: {{ date }}
4
+
5
+ {% endif %}
6
+ {{ content }}
7
+
8
+ ---
9
+ => / <- Back to index
10
+ => {{ web_url }} View on web
@@ -0,0 +1,181 @@
1
+ Metadata-Version: 2.3
2
+ Name: ursaproxy
3
+ Version: 0.1.2
4
+ Summary: A Bearblog-to-Gemini proxy showcasing Xitzin
5
+ Author: Alan Velasco
6
+ Author-email: Alan Velasco <ursaproxy@alanbato.com>
7
+ License: MIT
8
+ Classifier: License :: OSI Approved :: MIT License
9
+ Classifier: Programming Language :: Python :: 3.13
10
+ Requires-Dist: xitzin>=0.6.1
11
+ Requires-Dist: feedparser>=6.0.11
12
+ Requires-Dist: httpx>=0.27.0
13
+ Requires-Dist: beautifulsoup4>=4.12.3
14
+ Requires-Dist: markdownify>=0.12.1
15
+ Requires-Dist: md2gemini>=1.9.1
16
+ Requires-Dist: pydantic-settings>=2.0.0
17
+ Requires-Dist: jinja2>=3.1.0
18
+ Requires-Python: >=3.13
19
+ Description-Content-Type: text/markdown
20
+
21
+ # UrsaProxy
22
+
23
+ A Bearblog-to-Gemini proxy built with [Xitzin](https://github.com/alanbato/xitzin). It fetches content from a Bearblog RSS feed and HTML pages, converts them to Gemtext format, and serves them over the Gemini protocol.
24
+
25
+ ## Features
26
+
27
+ - Proxies Bearblog content to Gemini protocol
28
+ - Converts HTML to Gemtext via Markdown intermediate format
29
+ - Generates Atom feeds with Gemini URLs
30
+ - Configurable TTL caching for feed and post data
31
+ - Support for static pages not in RSS feed
32
+
33
+ ## Installation
34
+
35
+ Requires Python 3.13+.
36
+
37
+ ```bash
38
+ # Using uv
39
+ uv add ursaproxy
40
+
41
+ # Using pip
42
+ pip install ursaproxy
43
+ ```
44
+
45
+ ## Configuration
46
+
47
+ UrsaProxy is configured via environment variables:
48
+
49
+ ### Required
50
+
51
+ | Variable | Description |
52
+ |----------|-------------|
53
+ | `BEARBLOG_URL` | The Bearblog URL to proxy (e.g., `https://example.bearblog.dev`) |
54
+ | `BLOG_NAME` | Display name for the blog |
55
+ | `CERT_FILE` | Path to TLS certificate file |
56
+ | `KEY_FILE` | Path to TLS private key file |
57
+
58
+ ### Optional
59
+
60
+ | Variable | Default | Description |
61
+ |----------|---------|-------------|
62
+ | `PAGES` | `{}` | JSON dict of static pages `{"slug": "Title"}` |
63
+ | `GEMINI_HOST` | `None` | Hostname for Gemini URLs in feed |
64
+ | `CACHE_TTL_FEED` | `300` | Feed cache TTL in seconds (5 min) |
65
+ | `CACHE_TTL_POST` | `1800` | Post cache TTL in seconds (30 min) |
66
+ | `HOST` | `localhost` | Server bind address |
67
+ | `PORT` | `1965` | Server port (Gemini default) |
68
+
69
+ ### Example
70
+
71
+ ```bash
72
+ export BEARBLOG_URL="https://example.bearblog.dev"
73
+ export BLOG_NAME="My Gemini Blog"
74
+ export CERT_FILE="/path/to/cert.pem"
75
+ export KEY_FILE="/path/to/key.pem"
76
+ export PAGES='{"about": "About Me", "now": "What I am doing now"}'
77
+ export GEMINI_HOST="gemini.example.com"
78
+ ```
79
+
80
+ ## Usage
81
+
82
+ ```bash
83
+ ursaproxy
84
+ ```
85
+
86
+ The server will start on `gemini://localhost:1965/` by default.
87
+
88
+ ### Routes
89
+
90
+ | Route | Description |
91
+ |-------|-------------|
92
+ | `/` | Landing page with recent posts and page links |
93
+ | `/post/{slug}` | Individual blog post with date |
94
+ | `/page/{slug}` | Static page (without date) |
95
+ | `/about` | About page from feed metadata |
96
+ | `/feed` | Atom feed with Gemini URLs |
97
+
98
+ ## Development
99
+
100
+ For contributing, clone the repository and install with dev dependencies:
101
+
102
+ ```bash
103
+ git clone https://github.com/alanbato/ursaproxy.git
104
+ cd ursaproxy
105
+ uv sync --group dev --group test
106
+ ```
107
+
108
+ ### Commands
109
+
110
+ ```bash
111
+ # Run linting
112
+ uv run ruff check .
113
+
114
+ # Run linting with auto-fix
115
+ uv run ruff check --fix .
116
+
117
+ # Format code
118
+ uv run ruff format .
119
+
120
+ # Type check
121
+ uv run ty check
122
+
123
+ # Run all pre-commit hooks
124
+ uv run pre-commit run --all-files
125
+
126
+ # Run tests
127
+ uv run pytest
128
+
129
+ # Run tests with verbose output
130
+ uv run pytest -v
131
+ ```
132
+
133
+ ### Project Structure
134
+
135
+ ```
136
+ src/ursaproxy/
137
+ ├── __init__.py # Xitzin app, routes, and entry point
138
+ ├── config.py # Pydantic settings for environment config
139
+ ├── fetcher.py # HTTP client for fetching Bearblog content
140
+ ├── converter.py # HTML -> Markdown -> Gemtext pipeline
141
+ ├── cache.py # Simple TTL cache implementation
142
+ └── templates/ # Jinja2 templates
143
+ ├── index.gmi # Landing page template
144
+ ├── post.gmi # Post/page template
145
+ ├── about.gmi # About page template
146
+ └── feed.xml # Atom feed template
147
+ ```
148
+
149
+ ### Testing
150
+
151
+ The test suite uses pytest with fixtures for offline testing:
152
+
153
+ ```bash
154
+ # Run all 111 tests
155
+ uv run pytest
156
+
157
+ # Run specific test file
158
+ uv run pytest tests/test_converter.py
159
+
160
+ # Run with coverage (if installed)
161
+ uv run pytest --cov=ursaproxy
162
+ ```
163
+
164
+ HTTP requests are mocked using [respx](https://github.com/lundberg/respx), so tests run completely offline.
165
+
166
+ ## How It Works
167
+
168
+ 1. **Feed Fetching**: Fetches RSS feed from `{BEARBLOG_URL}/feed/?type=rss`
169
+ 2. **HTML Fetching**: Fetches individual pages from `{BEARBLOG_URL}/{slug}/`
170
+ 3. **Conversion Pipeline**:
171
+ - Parse HTML with BeautifulSoup
172
+ - Extract content from `<main>` element
173
+ - Remove nav, footer, scripts, styles
174
+ - Convert to Markdown with markdownify
175
+ - Convert to Gemtext with md2gemini
176
+ 4. **Caching**: Feed and posts are cached with configurable TTLs
177
+ 5. **Serving**: Content served via Gemini protocol using Xitzin
178
+
179
+ ## License
180
+
181
+ MIT
@@ -0,0 +1,13 @@
1
+ ursaproxy/__init__.py,sha256=bO2ym_YzTMOUVBbqTjXWWI0ZQqRc81HnZ2o0JBkkZso,5777
2
+ ursaproxy/cache.py,sha256=Q5cypE91xGte1ph-_NqcEwZ0NoT7_LszYT1DVpfd2bM,1205
3
+ ursaproxy/config.py,sha256=mjyZPOtW7f_IhP4ukMj8TOHpyGaXfoSXmExYGv2Z-FI,1216
4
+ ursaproxy/converter.py,sha256=FAW0fA7a3WtlPYMtZbDlsM0Gl7twXK73DAV911R7SPI,1955
5
+ ursaproxy/fetcher.py,sha256=1Bsm96QYjnKQMorS2xwp9e3WRq8GbA6tKZrs1Z4ObOM,1591
6
+ ursaproxy/templates/about.gmi,sha256=hJxK25i9uXr2geBo1HrdkLztqkebICDy-_bdnQ4jlGI,105
7
+ ursaproxy/templates/feed.xml,sha256=0aiFNOfgHeMH0427LPc58pEf0NdvcDHIRa-x9tc_Ty8,597
8
+ ursaproxy/templates/index.gmi,sha256=2J-1lQRcwoxr46bqasW969RPpo_X5n2sh9sVuXIMdFg,265
9
+ ursaproxy/templates/post.gmi,sha256=DMPbqZl52v-G-6wmwbXh15kg8dYpTDUx3mfqkce-HhQ,133
10
+ ursaproxy-0.1.2.dist-info/WHEEL,sha256=iHtWm8nRfs0VRdCYVXocAWFW8ppjHL-uTJkAdZJKOBM,80
11
+ ursaproxy-0.1.2.dist-info/entry_points.txt,sha256=uM3A9bQS-p_6VhWbkFRtob2oN-SuBboJWS3ZwCjlAFk,46
12
+ ursaproxy-0.1.2.dist-info/METADATA,sha256=S1WUJKOfwq-hNgMslrbIzqN53x9cOosPRrVAc9lUTqM,4781
13
+ ursaproxy-0.1.2.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: uv 0.9.30
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,3 @@
1
+ [console_scripts]
2
+ ursaproxy = ursaproxy:main
3
+