parsefood 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. parsefood-0.1.0/.env.example +24 -0
  2. parsefood-0.1.0/.github/dependabot.yml +8 -0
  3. parsefood-0.1.0/.github/workflows/release.yml +49 -0
  4. parsefood-0.1.0/.gitignore +60 -0
  5. parsefood-0.1.0/.pre-commit-config.yaml +5 -0
  6. parsefood-0.1.0/AGENTS.md +345 -0
  7. parsefood-0.1.0/CLAUDE.md +1 -0
  8. parsefood-0.1.0/LICENSE +21 -0
  9. parsefood-0.1.0/Makefile +5 -0
  10. parsefood-0.1.0/PKG-INFO +253 -0
  11. parsefood-0.1.0/README.md +213 -0
  12. parsefood-0.1.0/docs/pipeline.md +179 -0
  13. parsefood-0.1.0/food_database.json +1484 -0
  14. parsefood-0.1.0/food_log/__init__.py +89 -0
  15. parsefood-0.1.0/food_log/config.py +113 -0
  16. parsefood-0.1.0/food_log/database.py +267 -0
  17. parsefood-0.1.0/food_log/llm/__init__.py +22 -0
  18. parsefood-0.1.0/food_log/llm/client.py +36 -0
  19. parsefood-0.1.0/food_log/llm/consistency.py +514 -0
  20. parsefood-0.1.0/food_log/llm/extractor.py +141 -0
  21. parsefood-0.1.0/food_log/llm/prompts.py +243 -0
  22. parsefood-0.1.0/food_log/models.py +200 -0
  23. parsefood-0.1.0/food_log/processing/__init__.py +28 -0
  24. parsefood-0.1.0/food_log/processing/messages.py +339 -0
  25. parsefood-0.1.0/food_log/processing/nutrition.py +201 -0
  26. parsefood-0.1.0/food_log/processing/validation.py +166 -0
  27. parsefood-0.1.0/food_log/profile.py +147 -0
  28. parsefood-0.1.0/food_log/utils.py +179 -0
  29. parsefood-0.1.0/food_log/visualization/__init__.py +5 -0
  30. parsefood-0.1.0/food_log/visualization/plots.py +57 -0
  31. parsefood-0.1.0/logo.png +0 -0
  32. parsefood-0.1.0/main.py +558 -0
  33. parsefood-0.1.0/process_labels.py +366 -0
  34. parsefood-0.1.0/profiles/template.yaml.example +16 -0
  35. parsefood-0.1.0/prompts/_food_list.txt +149 -0
  36. parsefood-0.1.0/pyproject.toml +66 -0
  37. parsefood-0.1.0/review_database.py +815 -0
  38. parsefood-0.1.0/s3_storage.py +133 -0
  39. parsefood-0.1.0/scrape_food.py +198 -0
  40. parsefood-0.1.0/scrapers/__init__.py +39 -0
  41. parsefood-0.1.0/scrapers/base.py +45 -0
  42. parsefood-0.1.0/scrapers/celeiro.py +236 -0
  43. parsefood-0.1.0/scrapers/continente.py +303 -0
  44. parsefood-0.1.0/scrapers/llm_extraction.py +80 -0
  45. parsefood-0.1.0/scrapers/models.py +33 -0
  46. parsefood-0.1.0/scrapers/pingo_doce.py +311 -0
  47. parsefood-0.1.0/scrapers/registry.py +50 -0
  48. parsefood-0.1.0/scripts/compare_migration.py +628 -0
  49. parsefood-0.1.0/scripts/download_telegram_messages.py +230 -0
  50. parsefood-0.1.0/scripts/telegram_sync.py +222 -0
  51. parsefood-0.1.0/scripts/test_new_system.py +153 -0
  52. parsefood-0.1.0/telegram_bot.py +215 -0
  53. parsefood-0.1.0/uv.lock +1885 -0
@@ -0,0 +1,24 @@
1
+ MODEL_ID=google/gemini-2.5-flash
2
+ OPENROUTER_API_KEY=KEY
3
+ DATA_PATH=<data_path>
4
+ TARGET_CALORIES=<target_calories>
5
+
6
+ # Telegram API Configuration (for automated message download)
7
+ # Get your API credentials from: https://my.telegram.org/apps
8
+ TELEGRAM_API_ID=<telegram_api_id>
9
+ TELEGRAM_API_HASH=<telegram_api_hash>
10
+ TELEGRAM_PHONE=<telegram_phone>
11
+ TELEGRAM_CHAT=<telegram_chat>
12
+ TELEGRAM_OUTPUT_FILE=data/result.json
13
+
14
+ # Telegram Bot Configuration (for sending summaries)
15
+ # Create a bot via @BotFather to get the token
16
+ # Get your chat ID by messaging @userinfobot
17
+ TELEGRAM_BOT_TOKEN=<bot_token>
18
+ TELEGRAM_CHAT_ID=<chat_id>
19
+
20
+ # AWS S3 Configuration (for syncing data files)
21
+ # Set S3_ENABLED=true to enable S3 sync, false to use local files only
22
+ S3_ENABLED=false
23
+ S3_BUCKET=<s3-bucket-name>
24
+ S3_REGION=<s3-region-name>
@@ -0,0 +1,8 @@
1
+ # Dependabot configuration for automated dependency updates
2
+ # https://docs.github.com/en/code-security/dependabot/dependabot-version-updates
3
+ version: 2
4
+ updates:
5
+ - package-ecosystem: "pip"
6
+ directory: "/"
7
+ schedule:
8
+ interval: "weekly"
@@ -0,0 +1,49 @@
1
+ name: Release
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+ paths:
7
+ - "food_log/__init__.py"
8
+
9
+ jobs:
10
+ test:
11
+ uses: tsilva/.github/.github/workflows/test.yml@main
12
+
13
+ pii-scan:
14
+ uses: tsilva/.github/.github/workflows/pii-scan.yml@main
15
+
16
+ publish:
17
+ needs: [test, pii-scan]
18
+ runs-on: ubuntu-latest
19
+ environment: pypi
20
+ permissions:
21
+ contents: write
22
+ id-token: write
23
+ steps:
24
+ - uses: actions/checkout@v4
25
+
26
+ - uses: actions/setup-python@v5
27
+ with:
28
+ python-version: "3.12"
29
+
30
+ - name: Install build tools
31
+ run: pip install hatch
32
+
33
+ - name: Build package
34
+ run: hatch build
35
+
36
+ - name: Get version
37
+ id: version
38
+ run: echo "version=$(hatch version)" >> "$GITHUB_OUTPUT"
39
+
40
+ - name: Create GitHub Release
41
+ uses: softprops/action-gh-release@v2
42
+ with:
43
+ tag_name: v${{ steps.version.outputs.version }}
44
+ name: v${{ steps.version.outputs.version }}
45
+ generate_release_notes: true
46
+ files: dist/*
47
+
48
+ - name: Publish to PyPI
49
+ uses: pypa/gh-action-pypi-publish@release/v1
@@ -0,0 +1,60 @@
1
+ # >>> MANAGED BY GITGUARD - DO NOT EDIT THIS SECTION <<<
2
+ .claude/*.local.json
3
+ .claude/*.local.json.bak
4
+ .claude-sandbox.json
5
+ .mcp.json
6
+ .env
7
+ .env.*
8
+ !.env.example
9
+ !.env.*.example
10
+ .env.*.local
11
+ .env.local
12
+ *.pem
13
+ *.key
14
+ *.p12
15
+ *.pfx
16
+ *.gpg
17
+ *.secret
18
+ *-credentials.json
19
+ service-account*.json
20
+ credentials.json
21
+ secrets.json
22
+ .secrets/
23
+ .aws/
24
+ .ssh/
25
+ config.local.*
26
+ .DS_Store
27
+ Thumbs.db
28
+ .idea/
29
+ .vscode/
30
+ *.swp
31
+ *.swo
32
+ *.code-workspace
33
+ __pycache__/
34
+ *.py[cod]
35
+ .venv/
36
+ venv/
37
+ env/
38
+ *.egg-info/
39
+ node_modules/
40
+ .npm/
41
+ npm-debug.log*
42
+ yarn-debug.log*
43
+ yarn-error.log*
44
+ dist/
45
+ build/
46
+ logs/
47
+ *.log
48
+ # >>> END MANAGED <<<
49
+
50
+ # Project-specific rules
51
+ !profiles/_*.yaml
52
+ *.egg-info
53
+ __pycache__
54
+ data
55
+ food_database.backup_*.json
56
+ labels
57
+ logs
58
+ profiles/*.yaml
59
+ skills/
60
+ telegram_session.session
@@ -0,0 +1,5 @@
1
+ repos:
2
+ - repo: https://github.com/tsilva/.github
3
+ rev: main
4
+ hooks:
5
+ - id: gitleaks
@@ -0,0 +1,345 @@
1
+ # CLAUDE.md
2
+
3
+ This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
4
+
5
+ ## Project Overview
6
+
7
+ Food Log Parser is a Python tool that extracts and analyzes food logs from Telegram exports. It uses LLMs (via OpenRouter API) to parse Portuguese food entries, calculate calories, and generate visualizations of calorie intake over time.
8
+
9
+ ## Package Structure
10
+
11
+ ```
12
+ parsefood/
13
+ ├── food_log/ # Main package
14
+ │ ├── __init__.py # Package exports
15
+ │ ├── config.py # Centralized configuration (paths, units, dates)
16
+ │ ├── profile.py # Profile configuration for multi-user support
17
+ │ ├── models.py # Pydantic models (FoodEntry, FoodLogEntry, etc.)
18
+ │ ├── database.py # Database operations (load/save/backup/merge)
19
+ │ ├── utils.py # Shared utilities (normalize_name, normalize_unit, etc.)
20
+ │ ├── llm/
21
+ │ │ ├── __init__.py
22
+ │ │ ├── client.py # OpenRouter client factory
23
+ │ │ ├── extractor.py # FoodDataExtractor class
24
+ │ │ └── prompts.py # Prompt templates
25
+ │ ├── processing/
26
+ │ │ ├── __init__.py
27
+ │ │ ├── nutrition.py # NutritionCalculator
28
+ │ │ ├── messages.py # Message loading and grouping
29
+ │ │ └── validation.py # Data validation
30
+ │ └── visualization/
31
+ │ ├── __init__.py
32
+ │ └── plots.py # Calorie plotting
33
+ ├── profiles/ # User profile configurations
34
+ │ ├── _template.yaml # Template for new profiles
35
+ │ └── cristina.yaml # Cristina's profile
36
+ ├── scrapers/ # Web scrapers for food data
37
+ ├── scripts/ # Utility scripts
38
+ ├── main.py # Entry point
39
+ ├── process_labels.py # Vision AI label processor
40
+ └── scrape_food.py # Web scraper CLI
41
+ ```
42
+
43
+ ### Import Patterns
44
+
45
+ ```python
46
+ # Import from food_log package
47
+ from food_log import UNIT_MAP, FOOD_DATABASE_PATH, normalize_unit
48
+ from food_log.models import FoodEntry, FoodLogEntry
49
+ from food_log.database import load_database, save_database, create_backup
50
+ from food_log.llm import FoodDataExtractor, create_openrouter_client
51
+ from food_log.processing import NutritionCalculator
52
+ from food_log.visualization import plot_calories
53
+ ```
54
+
55
+ ## Core Architecture
56
+
57
+ ### Data Processing Pipeline
58
+
59
+ The application follows a multi-stage pipeline:
60
+
61
+ 1. **Message Extraction**: Reads Telegram JSON export (`data/result.json`) and filters messages from a specific sender
62
+ 2. **Date Grouping**: Groups messages by date, applying blacklists and date rewrites for specific message IDs
63
+ 3. **LLM Processing**: Uses OpenRouter API with configurable model (default: Gemini 2.5 Flash) to extract structured food data from Portuguese text
64
+ 4. **Validation**: Uses Pydantic models (`FoodEntry`, `FoodLogEntry`) for data validation and structure
65
+ 5. **Storage**: Saves processed data to `data/date_messages_map.json` with both legacy format (semicolon-separated) and original messages
66
+ 6. **Visualization**: Generates polynomial regression plot of calorie trends (`data/plot.png`)
67
+
68
+ ### Key Data Structures
69
+
70
+ **FoodEntry**: Single food item with fields:
71
+ - `ingredient`: Food name (lowercase, stripped)
72
+ - `quantity`: Positive float
73
+ - `unit`: Portuguese unit (e.g., "colher de sopa", "unidade", "copo")
74
+ - `calories`: Non-negative integer
75
+
76
+ **FoodLogEntry**: Daily log containing:
77
+ - `date`: YYYY-MM-DD format
78
+ - `raw_text`: Original message
79
+ - `foods`: List of FoodEntry objects
80
+ - `total_calories`: Auto-calculated sum
81
+
82
+ **date_messages_map.json structure**:
83
+ ```json
84
+ {
85
+ "YYYY-MM-DD": {
86
+ "original": ["raw message 1", "raw message 2"],
87
+ "processed": ["ingredient;quantity;unit;calories", ...],
88
+ "total": total_calories_int
89
+ }
90
+ }
91
+ ```
92
+
93
+ ### LLM Integration
94
+
95
+ The `FoodDataExtractor` class handles LLM interaction:
96
+ - Connects via OpenAI-compatible client to OpenRouter
97
+ - Uses `food_database.json` as the structured nutrition database
98
+ - Extracts JSON array from LLM responses with regex fallback
99
+ - Validates and coerces data types to match Pydantic models
100
+ - Falls back to retry with data type fixes on validation errors
101
+ - Works in conjunction with `NutritionCalculator` for deterministic calorie calculations
102
+
103
+ ### Configuration
104
+
105
+ Environment variables (`.env` file):
106
+ - `MODEL_ID`: OpenRouter model identifier (e.g., "google/gemini-2.5-flash")
107
+ - `OPENROUTER_API_KEY`: API key for OpenRouter
108
+ - Optional: `TARGET_CALORIES`, `DATA_PATH` (defaults used when no profile)
109
+
110
+ Centralized configuration in `food_log/config.py`:
111
+ - `START_DATE`: Messages before this date are ignored (currently 2023-08-30)
112
+ - `BLACKLISTED_IDS`: Message IDs to skip during processing
113
+ - `REWRITE_ID_DATES`: Message IDs with corrected timestamps
114
+ - `UNIT_MAP`: Portuguese to standard unit mapping (e.g., "colher de sopa" → "tbsp")
115
+ - `DATA_PATH`, `FOOD_DATABASE_PATH`: File paths
116
+
117
+ ### Profiles
118
+
119
+ Multi-user support via YAML profile files in `profiles/` directory:
120
+
121
+ ```yaml
122
+ # profiles/cristina.yaml
123
+ name: "Cristina"
124
+ sender_name: "Cristina" # Filter Telegram messages by sender
125
+ data_path: "/path/to/data"
126
+
127
+ # Optional settings (defaults shown)
128
+ start_date: "2023-08-30"
129
+ target_calories: 2000
130
+ blacklisted_ids: [2842, 2849]
131
+ rewrite_id_dates:
132
+ 7071: "2024-06-23T07:22:47"
133
+ ```
134
+
135
+ **Key points:**
136
+ - `food_database.json` remains **global/shared** (not per-profile)
137
+ - Running without `--profile` uses environment defaults (backward compatible)
138
+ - Profile files starting with `_` are excluded (templates)
139
+
140
+ Configuration priority:
141
+ 1. Profile YAML settings (if `--profile` specified)
142
+ 2. Environment variables
143
+ 3. Code defaults
144
+
145
+ Profile-related functions:
146
+ ```python
147
+ from food_log import list_profiles, load_profile, get_runtime_config
148
+
149
+ # List available profiles
150
+ profiles = list_profiles() # ['cristina']
151
+
152
+ # Load and use a profile
153
+ profile = load_profile('cristina')
154
+ config = get_runtime_config(profile)
155
+
156
+ # Use default config (no profile)
157
+ config = get_runtime_config()
158
+ ```
159
+
160
+ ### Utility Scripts
161
+
162
+ **scripts/test_new_system.py**: Integration test script for the food database system. Tests both LLM extraction and nutrition calculation.
163
+
164
+ **scripts/compare_migration.py**: Validation and comparison tool with two modes:
165
+ - `--quick`: Fast validation by recalculating from existing processed entries (no LLM)
166
+ - Default: Full regeneration using LLM, generates markdown reports for discrepancies
167
+
168
+ **process_labels.py**: Utility for adding new foods to the database by processing product nutrition label images using vision AI. Supports multiple image formats and creates timestamped backups.
169
+
170
+ **scrape_food.py**: Web scraper for adding foods from product URLs. Uses Playwright for JavaScript-rendered pages. Currently supports Continente.pt.
171
+
172
+ ## Web Scraping
173
+
174
+ **IMPORTANT**: When asked to scrape a URL or add a food from a product page, ALWAYS use the `scrape_food.py` CLI tool instead of ad-hoc scraping. The scraper handles JavaScript rendering, nutrition extraction, and database integration.
175
+
176
+ ### Usage
177
+
178
+ ```bash
179
+ # Scrape and display (dry-run)
180
+ uv run python scrape_food.py URL
181
+
182
+ # Scrape and add to database
183
+ uv run python scrape_food.py --add URL
184
+
185
+ # Scrape with custom name
186
+ uv run python scrape_food.py --add --name "custom name" URL
187
+
188
+ # Validate against existing entry
189
+ uv run python scrape_food.py --validate --name "existing name" URL
190
+
191
+ # Overwrite existing entry
192
+ uv run python scrape_food.py --add --overwrite --name "name" URL
193
+
194
+ # List supported sites
195
+ uv run python scrape_food.py --list-scrapers
196
+ ```
197
+
198
+ ### Supported Sites
199
+
200
+ - **Continente.pt**: Extracts calories, proteins, carbs, fats, fiber, sodium, and unit weight from product pages
201
+
202
+ ### Adding New Scrapers
203
+
204
+ To add support for a new store:
205
+ 1. Create `scrapers/newstore.py` implementing the `FoodScraper` protocol
206
+ 2. Register in `scrape_food.py`: `registry.register(NewStoreScraper())`
207
+
208
+ ## Development Commands
209
+
210
+ ### Running the Main Application
211
+
212
+ ```bash
213
+ # List available profiles
214
+ uv run python main.py --list-profiles
215
+
216
+ # Process with a specific profile
217
+ uv run python main.py --profile cristina
218
+
219
+ # Process without profile (uses DATA_PATH env var)
220
+ uv run python main.py
221
+ ```
222
+
223
+ This will:
224
+ 1. Backup `result.json` to `backups/result_TIMESTAMP.json`
225
+ 2. Sync latest messages from Telegram to `result.json`
226
+ 3. Backup `date_messages_map.json` to `backups/date_messages_map_TIMESTAMP.json`
227
+ 4. Load and process any new or modified dates using the LLM
228
+ 5. Save results to `date_messages_map.json`
229
+ 6. Generate calorie trend plot at `plot.png`
230
+
231
+ ### Quick Daily Status
232
+
233
+ ```bash
234
+ # Show today's food log with progress (read-only, fast)
235
+ uv run python main.py --profile cristina --today
236
+
237
+ # Without profile (uses defaults)
238
+ uv run python main.py --today
239
+ ```
240
+
241
+ ### Validation and Inspection
242
+
243
+ ```bash
244
+ # Validate all data in the processed file
245
+ uv run python main.py validate data/date_messages_map.json
246
+
247
+ # Inspect a specific date's data
248
+ uv run python main.py validate data/date_messages_map.json 2024-03-27
249
+ ```
250
+
251
+ ### Testing and Validation
252
+
253
+ ```bash
254
+ # Test the food database system
255
+ uv run python scripts/test_new_system.py
256
+
257
+ # Quick validation: recalculate and compare calorie values (no LLM)
258
+ uv run python scripts/compare_migration.py --quick -n 50
259
+ ```
260
+
261
+ ### Setup
262
+
263
+ ```bash
264
+ # Install uv (if not already installed)
265
+ curl -LsSf https://astral.sh/uv/install.sh | sh
266
+
267
+ # Install dependencies
268
+ uv sync
269
+
270
+ # Configure environment
271
+ cp .env.example .env
272
+ # Edit .env with your OPENROUTER_API_KEY and MODEL_ID
273
+ ```
274
+
275
+ Note: This project uses `uv` for dependency management. Dependencies are defined in `pyproject.toml`. The `uv sync` command will create a virtual environment and install all dependencies automatically.
276
+
277
+ ## Important Implementation Details
278
+
279
+ ### Message Processing Logic
280
+
281
+ - Messages are filtered by `sender_name` from profile (or "Cristina" by default)
282
+ - Existing processed dates are NOT reprocessed unless the original message text changes
283
+ - Processing uses 2 workers by default to avoid rate limits
284
+ - Both `result.json` and `date_messages_map.json` are backed up to `backups/` directory with timestamps before processing
285
+
286
+ ### Legacy Format Support
287
+
288
+ The system maintains backward compatibility with semicolon-separated format:
289
+ ```
290
+ ingredient;quantity;unit;calories
291
+ ```
292
+
293
+ The `parse_legacy_format()` function can read this format and convert to `FoodEntry` objects.
294
+
295
+ ### Validation and Migration
296
+
297
+ `validate_and_migrate_existing_data()` validates existing processed data without modifying the stored format. It only logs successful validations without storing `structured_data` field.
298
+
299
+ ### Portuguese Unit Handling
300
+
301
+ Common Portuguese units in the food list:
302
+ - "colher de sopa" (tbsp) = tablespoon
303
+ - "colher de chá" (tsp) = teaspoon
304
+ - "copo" or "tigela" = cup
305
+ - "unidade" = unit (individual item)
306
+ - "fatia" = slice
307
+ - "lata" = can
308
+
309
+ ### Food Database
310
+
311
+ The application uses a structured JSON database (`food_database.json`) containing:
312
+ - Portuguese food names as keys
313
+ - Nutrition per 100g (calories, proteins, carbs, fats)
314
+ - Unit conversions (grams_per_unit for tbsp, tsp, cup, unit, g)
315
+
316
+ New foods can be added using `process_labels.py` which extracts nutrition info from product label images using vision AI.
317
+
318
+ **Legacy**: `prompts/_food_list.txt` contains an old flat-file format for reference only. The active system uses `food_database.json`.
319
+
320
+ ## Testing Strategy
321
+
322
+ When adding new functionality:
323
+ 1. Test with real Telegram export data in `data/result.json`
324
+ 2. Use validation mode to check data integrity
325
+ 3. Inspect specific dates that might have issues
326
+ 4. Verify plot generation produces reasonable visualizations
327
+ 5. Check that backups are created properly before processing
328
+
329
+ ## Data Files
330
+
331
+ - `data/result.json`: Telegram export (synced from Telegram API or user-provided)
332
+ - `data/date_messages_map.json`: Main processed data store
333
+ - `data/backups/`: Timestamped backups of result.json and date_messages_map.json
334
+ - `data/plot.png`: Generated calorie trend visualization
335
+ - `food_database.json`: Structured nutrition database (active)
336
+ - `prompts/_food_list.txt`: Legacy calorie reference (kept for reference only)
337
+ - `docs/pipeline.md`: Detailed pipeline documentation (keep updated)
338
+
339
+ ## Documentation Maintenance
340
+
341
+ **Important**: When modifying the data processing pipeline in `main.py`, update `docs/pipeline.md` to reflect the changes. This includes:
342
+ - Adding/removing/reordering pipeline steps
343
+ - Changing data flow between components
344
+ - Modifying file formats or storage locations
345
+ - Adding new integrations (APIs, storage backends, etc.)
@@ -0,0 +1 @@
1
+ AGENTS.md
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 tsilva
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,5 @@
1
+ release-%:
2
+ hatch version $*
3
+ git add food_log/__init__.py
4
+ git commit -m "chore: release $$(hatch version)"
5
+ git push