flutter-pro-max-cli 2.0.0 → 2.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +12 -0
- package/assets/scripts/__pycache__/core.cpython-312.pyc +0 -0
- package/assets/scripts/__pycache__/design_system.cpython-312.pyc +0 -0
- package/assets/scripts/core.py +52 -45
- package/assets/scripts/design_system.py +1074 -0
- package/assets/scripts/search.py +72 -1
- package/assets/templates/platforms/agent.json +1 -1
- package/assets/templates/platforms/claude.json +3 -3
- package/assets/templates/platforms/codebuddy.json +12 -9
- package/assets/templates/platforms/codex.json +6 -6
- package/assets/templates/platforms/continue.json +5 -5
- package/assets/templates/platforms/copilot.json +6 -6
- package/assets/templates/platforms/cursor.json +5 -5
- package/assets/templates/platforms/gemini.json +6 -6
- package/assets/templates/platforms/kiro.json +5 -5
- package/assets/templates/platforms/opencode.json +6 -6
- package/assets/templates/platforms/qoder.json +11 -8
- package/assets/templates/platforms/roocode.json +6 -6
- package/assets/templates/platforms/trae.json +6 -6
- package/assets/templates/platforms/windsurf.json +6 -6
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -180,6 +180,18 @@ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file
|
|
|
180
180
|
|
|
181
181
|
---
|
|
182
182
|
|
|
183
|
+
## 📝 Changelog
|
|
184
|
+
|
|
185
|
+
### v2.1.0 (2026-01-27)
|
|
186
|
+
- **Type Safety**: Full Python type hints cho Pylance strict mode
|
|
187
|
+
- **Python 3.10+**: Minimum Python version updated
|
|
188
|
+
- **Code Quality**: Xóa unused imports, fix linter warnings
|
|
189
|
+
|
|
190
|
+
### v2.0.0
|
|
191
|
+
- Phiên bản đầu tiên với 14 AI assistant support
|
|
192
|
+
|
|
193
|
+
---
|
|
194
|
+
|
|
183
195
|
<div align="center">
|
|
184
196
|
|
|
185
197
|
**Streamline your Flutter development with AI-powered architectural intelligence.**
|
|
Binary file
|
|
Binary file
|
package/assets/scripts/core.py
CHANGED
|
@@ -10,9 +10,10 @@ import re
|
|
|
10
10
|
from pathlib import Path
|
|
11
11
|
from math import log
|
|
12
12
|
from collections import defaultdict
|
|
13
|
+
from typing import Any
|
|
13
14
|
|
|
14
15
|
# ============ CONFIGURATION ============
|
|
15
|
-
def _get_data_dir():
|
|
16
|
+
def _get_data_dir() -> Path:
|
|
16
17
|
"""Auto-detect data directory based on script location"""
|
|
17
18
|
script_dir = Path(__file__).parent
|
|
18
19
|
possible_paths = [
|
|
@@ -38,7 +39,7 @@ DATA_DIR = _get_data_dir()
|
|
|
38
39
|
MAX_RESULTS = 5
|
|
39
40
|
|
|
40
41
|
# Domain configuration: file, search columns, output columns
|
|
41
|
-
CSV_CONFIG = {
|
|
42
|
+
CSV_CONFIG: dict[str, dict[str, str | list[str]]] = {
|
|
42
43
|
"widget": {
|
|
43
44
|
"file": "widget.csv",
|
|
44
45
|
"search_cols": ["Widget Name", "Category", "Description", "Key Properties", "Usage Context & Pro-Tips"],
|
|
@@ -141,49 +142,49 @@ AVAILABLE_STACKS = list(STACK_EXCLUSIONS.keys())
|
|
|
141
142
|
class BM25:
|
|
142
143
|
"""BM25 ranking algorithm for text search - zero dependencies"""
|
|
143
144
|
|
|
144
|
-
def __init__(self, k1=1.5, b=0.75):
|
|
145
|
+
def __init__(self, k1: float = 1.5, b: float = 0.75) -> None:
|
|
145
146
|
self.k1 = k1
|
|
146
147
|
self.b = b
|
|
147
|
-
self.corpus = []
|
|
148
|
-
self.doc_lengths = []
|
|
149
|
-
self.avgdl = 0
|
|
150
|
-
self.idf = {}
|
|
151
|
-
self.doc_freqs = defaultdict(int)
|
|
152
|
-
self.
|
|
153
|
-
|
|
154
|
-
def tokenize(self, text):
|
|
148
|
+
self.corpus: list[list[str]] = []
|
|
149
|
+
self.doc_lengths: list[int] = []
|
|
150
|
+
self.avgdl: float = 0
|
|
151
|
+
self.idf: dict[str, float] = {}
|
|
152
|
+
self.doc_freqs: defaultdict[str, int] = defaultdict(int)
|
|
153
|
+
self.n: int = 0
|
|
154
|
+
|
|
155
|
+
def tokenize(self, text: str) -> list[str]:
|
|
155
156
|
"""Lowercase, split, remove punctuation, filter short words"""
|
|
156
157
|
text = re.sub(r'[^\w\s]', ' ', str(text).lower())
|
|
157
158
|
return [w for w in text.split() if len(w) > 1]
|
|
158
159
|
|
|
159
|
-
def fit(self, documents):
|
|
160
|
+
def fit(self, documents: list[str]) -> None:
|
|
160
161
|
"""Build BM25 index from documents"""
|
|
161
162
|
self.corpus = [self.tokenize(doc) for doc in documents]
|
|
162
|
-
self.
|
|
163
|
-
if self.
|
|
163
|
+
self.n = len(self.corpus)
|
|
164
|
+
if self.n == 0:
|
|
164
165
|
return
|
|
165
166
|
self.doc_lengths = [len(doc) for doc in self.corpus]
|
|
166
|
-
self.avgdl = sum(self.doc_lengths) / self.
|
|
167
|
+
self.avgdl = sum(self.doc_lengths) / self.n
|
|
167
168
|
|
|
168
169
|
for doc in self.corpus:
|
|
169
|
-
seen = set()
|
|
170
|
+
seen: set[str] = set()
|
|
170
171
|
for word in doc:
|
|
171
172
|
if word not in seen:
|
|
172
173
|
self.doc_freqs[word] += 1
|
|
173
174
|
seen.add(word)
|
|
174
175
|
|
|
175
176
|
for word, freq in self.doc_freqs.items():
|
|
176
|
-
self.idf[word] = log((self.
|
|
177
|
+
self.idf[word] = log((self.n - freq + 0.5) / (freq + 0.5) + 1)
|
|
177
178
|
|
|
178
|
-
def score(self, query):
|
|
179
|
+
def score(self, query: str) -> list[tuple[int, float]]:
|
|
179
180
|
"""Score all documents against query"""
|
|
180
181
|
query_tokens = self.tokenize(query)
|
|
181
|
-
scores = []
|
|
182
|
+
scores: list[tuple[int, float]] = []
|
|
182
183
|
|
|
183
184
|
for idx, doc in enumerate(self.corpus):
|
|
184
|
-
score = 0
|
|
185
|
+
score: float = 0.0
|
|
185
186
|
doc_len = self.doc_lengths[idx]
|
|
186
|
-
term_freqs = defaultdict(int)
|
|
187
|
+
term_freqs: defaultdict[str, int] = defaultdict(int)
|
|
187
188
|
for word in doc:
|
|
188
189
|
term_freqs[word] += 1
|
|
189
190
|
|
|
@@ -201,13 +202,13 @@ class BM25:
|
|
|
201
202
|
|
|
202
203
|
|
|
203
204
|
# ============ HELPER FUNCTIONS ============
|
|
204
|
-
def _load_csv(filepath):
|
|
205
|
+
def _load_csv(filepath: Path) -> list[dict[str, str]]:
|
|
205
206
|
"""Load CSV and return list of dicts"""
|
|
206
207
|
with open(filepath, 'r', encoding='utf-8') as f:
|
|
207
208
|
return list(csv.DictReader(f))
|
|
208
209
|
|
|
209
210
|
|
|
210
|
-
def _search_csv(filepath, search_cols, output_cols, query, max_results, boost_col=None, boost_query=None):
|
|
211
|
+
def _search_csv(filepath: Path, search_cols: list[str], output_cols: list[str], query: str, max_results: int, boost_col: str | None = None, boost_query: str | None = None) -> list[dict[str, Any]]:
|
|
211
212
|
"""Core search function using BM25 with optional boosting"""
|
|
212
213
|
if not filepath.exists():
|
|
213
214
|
return []
|
|
@@ -225,28 +226,29 @@ def _search_csv(filepath, search_cols, output_cols, query, max_results, boost_co
|
|
|
225
226
|
# Apply boosting if specified (widget name match, etc.)
|
|
226
227
|
if boost_col and boost_query:
|
|
227
228
|
boost_query_lower = boost_query.lower()
|
|
228
|
-
boosted = []
|
|
229
|
+
boosted: list[tuple[int, float]] = []
|
|
229
230
|
for idx, score in ranked:
|
|
231
|
+
boosted_score = score
|
|
230
232
|
if score > 0:
|
|
231
233
|
boost_value = str(data[idx].get(boost_col, "")).lower()
|
|
232
234
|
if boost_value in boost_query_lower or boost_query_lower in boost_value:
|
|
233
|
-
score
|
|
234
|
-
boosted.append((idx,
|
|
235
|
+
boosted_score = score * 2.0 # Double score for exact/partial match
|
|
236
|
+
boosted.append((idx, boosted_score))
|
|
235
237
|
ranked = sorted(boosted, key=lambda x: x[1], reverse=True)
|
|
236
238
|
|
|
237
239
|
# Get top results with score > 0
|
|
238
|
-
results = []
|
|
240
|
+
results: list[dict[str, Any]] = []
|
|
239
241
|
for idx, score in ranked[:max_results]:
|
|
240
242
|
if score > 0:
|
|
241
243
|
row = data[idx]
|
|
242
|
-
result = {col: row.get(col, "") for col in output_cols if col in row}
|
|
244
|
+
result: dict[str, Any] = {col: row.get(col, "") for col in output_cols if col in row}
|
|
243
245
|
result["_score"] = round(score, 4)
|
|
244
246
|
results.append(result)
|
|
245
247
|
|
|
246
248
|
return results
|
|
247
249
|
|
|
248
250
|
|
|
249
|
-
def detect_domain(query):
|
|
251
|
+
def detect_domain(query: str) -> str:
|
|
250
252
|
"""Auto-detect the most relevant domain from query keywords"""
|
|
251
253
|
query_lower = query.lower()
|
|
252
254
|
|
|
@@ -267,13 +269,13 @@ def detect_domain(query):
|
|
|
267
269
|
"prompt": ["prompt", "ai", "css", "tailwind", "implementation"],
|
|
268
270
|
}
|
|
269
271
|
|
|
270
|
-
scores = {domain: sum(1 for kw in keywords if kw in query_lower) for domain, keywords in domain_keywords.items()}
|
|
271
|
-
best = max(scores, key=scores
|
|
272
|
+
scores: dict[str, int] = {domain: sum(1 for kw in keywords if kw in query_lower) for domain, keywords in domain_keywords.items()}
|
|
273
|
+
best = max(scores, key=lambda k: scores[k])
|
|
272
274
|
return best if scores[best] > 0 else "widget"
|
|
273
275
|
|
|
274
276
|
|
|
275
277
|
# ============ MAIN SEARCH FUNCTIONS ============
|
|
276
|
-
def search(query, domain=None, max_results=MAX_RESULTS):
|
|
278
|
+
def search(query: str, domain: str | None = None, max_results: int = MAX_RESULTS) -> dict[str, Any]:
|
|
277
279
|
"""
|
|
278
280
|
Main search function with auto-domain detection
|
|
279
281
|
|
|
@@ -292,7 +294,7 @@ def search(query, domain=None, max_results=MAX_RESULTS):
|
|
|
292
294
|
return {"error": f"Unknown domain: {domain}. Available: {', '.join(AVAILABLE_DOMAINS)}"}
|
|
293
295
|
|
|
294
296
|
config = CSV_CONFIG[domain]
|
|
295
|
-
filepath = DATA_DIR / config["file"]
|
|
297
|
+
filepath = DATA_DIR / str(config["file"])
|
|
296
298
|
|
|
297
299
|
if not filepath.exists():
|
|
298
300
|
return {"error": f"File not found: {filepath}", "domain": domain}
|
|
@@ -301,15 +303,20 @@ def search(query, domain=None, max_results=MAX_RESULTS):
|
|
|
301
303
|
boost_col = "Widget Name" if domain == "widget" else None
|
|
302
304
|
boost_query = query if domain == "widget" else None
|
|
303
305
|
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
306
|
+
search_cols = config["search_cols"]
|
|
307
|
+
output_cols = config["output_cols"]
|
|
308
|
+
if isinstance(search_cols, list) and isinstance(output_cols, list):
|
|
309
|
+
results = _search_csv(
|
|
310
|
+
filepath,
|
|
311
|
+
search_cols,
|
|
312
|
+
output_cols,
|
|
313
|
+
query,
|
|
314
|
+
max_results,
|
|
315
|
+
boost_col=boost_col,
|
|
316
|
+
boost_query=boost_query
|
|
317
|
+
)
|
|
318
|
+
else:
|
|
319
|
+
results = []
|
|
313
320
|
|
|
314
321
|
return {
|
|
315
322
|
"domain": domain,
|
|
@@ -320,7 +327,7 @@ def search(query, domain=None, max_results=MAX_RESULTS):
|
|
|
320
327
|
}
|
|
321
328
|
|
|
322
329
|
|
|
323
|
-
def search_with_stack(query, stack, domain=None, max_results=MAX_RESULTS):
|
|
330
|
+
def search_with_stack(query: str, stack: str, domain: str | None = None, max_results: int = MAX_RESULTS) -> dict[str, Any]:
|
|
324
331
|
"""
|
|
325
332
|
Search with stack-specific filtering (excludes conflicting packages)
|
|
326
333
|
|
|
@@ -343,11 +350,11 @@ def search_with_stack(query, stack, domain=None, max_results=MAX_RESULTS):
|
|
|
343
350
|
|
|
344
351
|
# Filter out conflicting packages
|
|
345
352
|
excluded = STACK_EXCLUSIONS[stack]
|
|
346
|
-
filtered_results = []
|
|
353
|
+
filtered_results: list[dict[str, Any]] = []
|
|
347
354
|
|
|
348
355
|
for item in result["results"]:
|
|
349
356
|
# Check package name field
|
|
350
|
-
pkg_name = item.get("pkg_name", "").lower()
|
|
357
|
+
pkg_name = str(item.get("pkg_name", "")).lower()
|
|
351
358
|
if pkg_name not in excluded:
|
|
352
359
|
filtered_results.append(item)
|
|
353
360
|
|