taiwan-payment-skill 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/README.md +197 -0
  2. package/assets/taiwan-payment/CLAUDE.md +297 -0
  3. package/assets/taiwan-payment/EXAMPLES.md +1425 -0
  4. package/assets/taiwan-payment/README.md +306 -0
  5. package/assets/taiwan-payment/SKILL.md +857 -0
  6. package/assets/taiwan-payment/data/error-codes.csv +20 -0
  7. package/assets/taiwan-payment/data/field-mappings.csv +15 -0
  8. package/assets/taiwan-payment/data/operations.csv +8 -0
  9. package/assets/taiwan-payment/data/payment-methods.csv +24 -0
  10. package/assets/taiwan-payment/data/providers.csv +4 -0
  11. package/assets/taiwan-payment/data/reasoning.csv +32 -0
  12. package/assets/taiwan-payment/data/troubleshooting.csv +18 -0
  13. package/assets/taiwan-payment/references/ecpay-payment-api.md +880 -0
  14. package/assets/taiwan-payment/references/newebpay-payment-api.md +677 -0
  15. package/assets/taiwan-payment/references/payuni-payment-api.md +997 -0
  16. package/assets/taiwan-payment/scripts/core.py +288 -0
  17. package/assets/taiwan-payment/scripts/recommend.py +269 -0
  18. package/assets/taiwan-payment/scripts/search.py +185 -0
  19. package/assets/taiwan-payment/scripts/test_payment.py +358 -0
  20. package/assets/templates/base/quick-reference.md +370 -0
  21. package/assets/templates/base/skill-content.md +851 -0
  22. package/assets/templates/platforms/antigravity.json +25 -0
  23. package/assets/templates/platforms/claude.json +26 -0
  24. package/assets/templates/platforms/codebuddy.json +25 -0
  25. package/assets/templates/platforms/codex.json +25 -0
  26. package/assets/templates/platforms/continue.json +25 -0
  27. package/assets/templates/platforms/copilot.json +25 -0
  28. package/assets/templates/platforms/cursor.json +25 -0
  29. package/assets/templates/platforms/gemini.json +25 -0
  30. package/assets/templates/platforms/kiro.json +25 -0
  31. package/assets/templates/platforms/opencode.json +25 -0
  32. package/assets/templates/platforms/qoder.json +25 -0
  33. package/assets/templates/platforms/roocode.json +25 -0
  34. package/assets/templates/platforms/trae.json +25 -0
  35. package/assets/templates/platforms/windsurf.json +25 -0
  36. package/dist/index.js +17095 -0
  37. package/package.json +58 -0
@@ -0,0 +1,288 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Taiwan Payment BM25 搜索引擎
4
+
5
+ 基於 BM25 (Okapi BM25) 演算法的語義搜索系統
6
+ 支援多個搜索域: provider, operation, error, field, payment_method, troubleshoot, reasoning
7
+
8
+ 用法:
9
+ from core import search, search_all, detect_domain
10
+
11
+ # 單域搜索
12
+ results = search("信用卡", domain="payment_method", max_results=5)
13
+
14
+ # 自動偵測域
15
+ results = search("ECPay API", domain=None, max_results=3)
16
+
17
+ # 全域搜索
18
+ all_results = search_all("金額錯誤", max_per_domain=3)
19
+ """
20
+
21
+ import csv
22
+ import math
23
+ import re
24
+ from pathlib import Path
25
+ from typing import List, Dict, Optional, Tuple
26
+ import json
27
+
28
+ # 數據文件路徑
29
+ SCRIPT_DIR = Path(__file__).parent
30
+ DATA_DIR = SCRIPT_DIR.parent / 'data'
31
+
32
+ # CSV 配置
33
+ CSV_CONFIG = {
34
+ 'provider': {
35
+ 'file': 'providers.csv',
36
+ 'search_cols': ['provider', 'display_name', 'auth_method', 'features', 'api_style'],
37
+ 'output_cols': ['provider', 'display_name', 'auth_method', 'encryption', 'test_merchant_id', 'features', 'market_share', 'api_style']
38
+ },
39
+ 'operation': {
40
+ 'file': 'operations.csv',
41
+ 'search_cols': ['operation', 'name_zh', 'name_en', 'description'],
42
+ 'output_cols': ['operation', 'name_zh', 'ecpay_endpoint', 'newebpay_endpoint', 'payuni_endpoint', 'required_fields', 'description']
43
+ },
44
+ 'error': {
45
+ 'file': 'error-codes.csv',
46
+ 'search_cols': ['provider', 'code', 'message_zh', 'message_en', 'category', 'solution'],
47
+ 'output_cols': ['provider', 'code', 'message_zh', 'category', 'severity', 'solution']
48
+ },
49
+ 'field': {
50
+ 'file': 'field-mappings.csv',
51
+ 'search_cols': ['field_name', 'field_zh', 'ecpay_name', 'newebpay_name', 'payuni_name', 'notes'],
52
+ 'output_cols': ['field_name', 'field_zh', 'ecpay_name', 'newebpay_name', 'payuni_name', 'type', 'required', 'format', 'notes']
53
+ },
54
+ 'payment_method': {
55
+ 'file': 'payment-methods.csv',
56
+ 'search_cols': ['method_id', 'name_zh', 'name_en', 'ecpay_code', 'newebpay_code', 'payuni_code', 'description', 'features'],
57
+ 'output_cols': ['method_id', 'name_zh', 'ecpay_code', 'newebpay_code', 'payuni_code', 'category', 'description', 'features']
58
+ },
59
+ 'troubleshoot': {
60
+ 'file': 'troubleshooting.csv',
61
+ 'search_cols': ['issue', 'symptom', 'cause', 'solution', 'provider'],
62
+ 'output_cols': ['issue', 'symptom', 'cause', 'solution', 'provider', 'severity']
63
+ },
64
+ 'reasoning': {
65
+ 'file': 'reasoning.csv',
66
+ 'search_cols': ['scenario', 'recommended_provider', 'reason', 'use_cases', 'anti_patterns'],
67
+ 'output_cols': ['scenario', 'recommended_provider', 'confidence', 'reason', 'anti_patterns', 'use_cases']
68
+ }
69
+ }
70
+
71
+ # 域偵測關鍵字
72
+ DOMAIN_KEYWORDS = {
73
+ 'provider': ['ecpay', '綠界', 'newebpay', '藍新', 'payuni', '統一', '金流', '服務商', 'provider'],
74
+ 'operation': ['create', 'query', 'refund', 'void', '建立', '查詢', '退款', '作廢', '請款', 'api', 'endpoint'],
75
+ 'error': ['error', 'code', '錯誤', '失敗', 'failed', '10100', '10200', 'TRA'],
76
+ 'field': ['field', 'parameter', '參數', '欄位', 'merchantid', 'tradeno', 'amount'],
77
+ 'payment_method': ['credit', 'atm', 'cvs', 'barcode', '信用卡', '轉帳', '超商', '支付', '付款方式', 'payment'],
78
+ 'troubleshoot': ['troubleshoot', 'issue', 'problem', '問題', '疑難', '排解', '如何', 'how to'],
79
+ 'reasoning': ['recommend', 'choose', 'select', '推薦', '選擇', '建議', '適合', '比較', 'why', '為什麼']
80
+ }
81
+
82
+
83
+ def tokenize(text: str) -> List[str]:
84
+ """中英文混合分詞"""
85
+ if not text:
86
+ return []
87
+
88
+ # 轉小寫
89
+ text = text.lower()
90
+
91
+ # 分離中英文
92
+ tokens = []
93
+
94
+ # 英文 token (包含數字)
95
+ for match in re.finditer(r'[a-z0-9]+', text):
96
+ tokens.append(match.group())
97
+
98
+ # 中文 bigram + unigram
99
+ chinese_chars = re.findall(r'[\u4e00-\u9fff]', text)
100
+ for char in chinese_chars:
101
+ tokens.append(char)
102
+
103
+ # Bigram for better matching
104
+ for i in range(len(chinese_chars) - 1):
105
+ tokens.append(chinese_chars[i] + chinese_chars[i + 1])
106
+
107
+ return tokens
108
+
109
+
110
+ def compute_idf(documents: List[List[str]]) -> Dict[str, float]:
111
+ """計算 IDF (Inverse Document Frequency)"""
112
+ n = len(documents)
113
+ df = {}
114
+
115
+ for doc in documents:
116
+ seen = set()
117
+ for term in doc:
118
+ if term not in seen:
119
+ df[term] = df.get(term, 0) + 1
120
+ seen.add(term)
121
+
122
+ idf = {}
123
+ for term, freq in df.items():
124
+ idf[term] = math.log((n - freq + 0.5) / (freq + 0.5) + 1.0)
125
+
126
+ return idf
127
+
128
+
129
+ def bm25_score(
130
+ query_tokens: List[str],
131
+ doc_tokens: List[str],
132
+ idf: Dict[str, float],
133
+ avg_dl: float,
134
+ k1: float = 1.5,
135
+ b: float = 0.75
136
+ ) -> float:
137
+ """計算 BM25 分數"""
138
+ score = 0.0
139
+ dl = len(doc_tokens)
140
+
141
+ # Term frequency in document
142
+ tf = {}
143
+ for term in doc_tokens:
144
+ tf[term] = tf.get(term, 0) + 1
145
+
146
+ for term in query_tokens:
147
+ if term in tf:
148
+ freq = tf[term]
149
+ idf_score = idf.get(term, 0)
150
+ numerator = freq * (k1 + 1)
151
+ denominator = freq + k1 * (1 - b + b * (dl / avg_dl))
152
+ score += idf_score * (numerator / denominator)
153
+
154
+ return score
155
+
156
+
157
+ def load_csv(domain: str) -> Tuple[List[Dict], List[List[str]]]:
158
+ """載入 CSV 並返回行數據和 token 化文檔"""
159
+ config = CSV_CONFIG.get(domain)
160
+ if not config:
161
+ return [], []
162
+
163
+ csv_path = DATA_DIR / config['file']
164
+ if not csv_path.exists():
165
+ return [], []
166
+
167
+ rows = []
168
+ documents = []
169
+
170
+ with open(csv_path, 'r', encoding='utf-8') as f:
171
+ reader = csv.DictReader(f)
172
+ for row in reader:
173
+ rows.append(row)
174
+
175
+ # 組合搜索欄位
176
+ search_text = ' '.join(
177
+ str(row.get(col, '')) for col in config['search_cols']
178
+ )
179
+ documents.append(tokenize(search_text))
180
+
181
+ return rows, documents
182
+
183
+
184
+ def detect_domain(query: str) -> str:
185
+ """自動偵測查詢應該屬於哪個域"""
186
+ query_lower = query.lower()
187
+ scores = {}
188
+
189
+ for domain, keywords in DOMAIN_KEYWORDS.items():
190
+ score = sum(1 for kw in keywords if kw in query_lower)
191
+ scores[domain] = score
192
+
193
+ # 返回最高分的域,如果都是 0 則返回 'provider'
194
+ max_score = max(scores.values())
195
+ if max_score == 0:
196
+ return 'provider'
197
+
198
+ return max(scores, key=scores.get)
199
+
200
+
201
+ def search(
202
+ query: str,
203
+ domain: Optional[str] = None,
204
+ max_results: int = 5
205
+ ) -> List[Dict]:
206
+ """
207
+ 主搜索函數
208
+
209
+ Args:
210
+ query: 搜索查詢
211
+ domain: 搜索域 (None 表示自動偵測)
212
+ max_results: 最大結果數
213
+
214
+ Returns:
215
+ 結果列表 (按分數排序)
216
+ """
217
+ if not query:
218
+ return []
219
+
220
+ # 自動偵測域
221
+ if domain is None:
222
+ domain = detect_domain(query)
223
+
224
+ # 載入數據
225
+ rows, documents = load_csv(domain)
226
+ if not rows:
227
+ return []
228
+
229
+ # 計算 IDF
230
+ idf = compute_idf(documents)
231
+ avg_dl = sum(len(doc) for doc in documents) / len(documents)
232
+
233
+ # Query tokens
234
+ query_tokens = tokenize(query)
235
+
236
+ # 計算每個文檔的分數
237
+ scores = []
238
+ for i, doc_tokens in enumerate(documents):
239
+ score = bm25_score(query_tokens, doc_tokens, idf, avg_dl)
240
+ if score > 0:
241
+ scores.append((score, i))
242
+
243
+ # 排序並返回結果
244
+ scores.sort(reverse=True)
245
+
246
+ config = CSV_CONFIG[domain]
247
+ results = []
248
+
249
+ for score, idx in scores[:max_results]:
250
+ row = rows[idx]
251
+ result = {col: row.get(col, '') for col in config['output_cols']}
252
+ result['_score'] = round(score, 2)
253
+ result['_domain'] = domain
254
+ results.append(result)
255
+
256
+ return results
257
+
258
+
259
+ def search_all(query: str, max_per_domain: int = 3) -> Dict[str, List]:
260
+ """全域搜索 (搜索所有域)"""
261
+ all_results = {}
262
+
263
+ for domain in CSV_CONFIG.keys():
264
+ results = search(query, domain=domain, max_results=max_per_domain)
265
+ if results:
266
+ all_results[domain] = results
267
+
268
+ return all_results
269
+
270
+
271
+ if __name__ == '__main__':
272
+ # 測試
273
+ import sys
274
+
275
+ if len(sys.argv) < 2:
276
+ print("用法: python core.py <query> [domain]")
277
+ print(f"可用域: {', '.join(CSV_CONFIG.keys())}")
278
+ sys.exit(1)
279
+
280
+ query = sys.argv[1]
281
+ domain = sys.argv[2] if len(sys.argv) > 2 else None
282
+
283
+ if domain == 'all':
284
+ results = search_all(query)
285
+ print(json.dumps(results, ensure_ascii=False, indent=2))
286
+ else:
287
+ results = search(query, domain=domain)
288
+ print(json.dumps(results, ensure_ascii=False, indent=2))
@@ -0,0 +1,269 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Taiwan Payment 推薦系統
4
+
5
+ 基於關鍵字加權評分的智能推薦引擎
6
+ 根據需求場景推薦最適合的金流服務商
7
+
8
+ 用法:
9
+ python recommend.py "高交易量電商"
10
+ python recommend.py "快速整合 LINE Pay" --format json
11
+ python recommend.py "新創公司 API" --format simple
12
+ """
13
+
14
+ from typing import List, Dict, Tuple, Optional
15
+ import argparse
16
+ import csv
17
+ from pathlib import Path
18
+ from typing import Dict, List, Tuple
19
+ import json
20
+
21
+ # 路徑設定
22
+ SCRIPT_DIR = Path(__file__).parent
23
+ DATA_DIR = SCRIPT_DIR.parent / 'data'
24
+
25
+ # 推薦規則 (關鍵字 -> [(provider, 權重, 理由)])
26
+ RECOMMENDATION_RULES = {
27
+ '穩定': [('ecpay', 3, '市佔率最高,穩定性最佳')],
28
+ '高交易': [('ecpay', 3, '適合高交易量場景')],
29
+ '快速': [('ecpay', 2, '文檔完整,整合快速')],
30
+ '簡單': [('ecpay', 2, '範例豐富,容易上手')],
31
+ '整合': [('ecpay', 2, '社群資源豐富')],
32
+ '多元': [('newebpay', 3, '支援最多支付方式')],
33
+ '支付方式': [('newebpay', 3, '13 種付款方式')],
34
+ '電子錢包': [('newebpay', 3, 'LINE Pay / Apple Pay / Google Pay')],
35
+ 'line': [('newebpay', 3, '原生支援 LINE Pay')],
36
+ '行動': [('newebpay', 3, '行動支付完整')],
37
+ '記憶': [('newebpay', 3, '信用卡記憶功能')],
38
+ '會員': [('newebpay', 2, '適合會員制電商')],
39
+ 'api': [('payuni', 3, 'RESTful JSON API')],
40
+ 'json': [('payuni', 3, 'JSON 格式友好')],
41
+ 'restful': [('payuni', 3, 'RESTful 設計')],
42
+ '統一': [('payuni', 2, '統一集團背景')],
43
+ '新創': [('payuni', 2, 'API 設計優先')],
44
+ 'atm': [('ecpay', 2, 'ATM 虛擬帳號')],
45
+ '超商': [('ecpay', 2, '四大超商支援')],
46
+ '定期': [('ecpay', 3, '定期定額扣款')],
47
+ '訂閱': [('ecpay', 3, '訂閱制服務')],
48
+ '分期': [('ecpay', 3, '信用卡分期')],
49
+ 'bnpl': [('ecpay', 2, '先買後付')],
50
+ '測試': [('ecpay', 2, '測試帳號完整')],
51
+ 'php': [('ecpay', 2, 'PHP SDK 完整')],
52
+ 'node': [('payuni', 2, 'JSON API 友好')],
53
+ 'python': [('ecpay', 2, 'Python 範例完整')],
54
+ 'app': [('newebpay', 3, '行動支付完整')],
55
+ '跨境': [('newebpay', 2, '支援國際卡')],
56
+ '發票': [('ecpay', 2, '同時支援金流發票')],
57
+ '物流': [('ecpay', 2, '同時支援金流物流')],
58
+ }
59
+
60
+ # 反模式 (不建議的場景)
61
+ ANTI_PATTERNS = {
62
+ 'ecpay': [
63
+ ('無技術資源', 'SHA256 加密流程較複雜,建議有技術人員'),
64
+ ('極簡需求', '若只需基礎支付,可能功能過多'),
65
+ ],
66
+ 'newebpay': [
67
+ ('簡單 API', 'AES 雙層加密較複雜'),
68
+ ('單一支付', '若只需單一支付方式,不需選擇此平台'),
69
+ ],
70
+ 'payuni': [
71
+ ('大型專案', '社群資源較少,大型專案建議選 ECPay'),
72
+ ('完整文檔', '文檔完整度不如 ECPay'),
73
+ ]
74
+ }
75
+
76
+
77
+ def load_reasoning_csv() -> List[Dict]:
78
+ """從 reasoning.csv 載入推薦規則"""
79
+ csv_path = DATA_DIR / 'reasoning.csv'
80
+ if not csv_path.exists():
81
+ return []
82
+
83
+ rules = []
84
+ with open(csv_path, 'r', encoding='utf-8') as f:
85
+ reader = csv.DictReader(f)
86
+ for row in reader:
87
+ rules.append(row)
88
+
89
+ return rules
90
+
91
+
92
+ def analyze_requirements(query: str) -> Dict[str, Tuple[int, List[str]]]:
93
+ """
94
+ 分析需求並計算各服務商的推薦分數
95
+
96
+ Returns:
97
+ {provider: (score, [reasons])}
98
+ """
99
+ query_lower = query.lower()
100
+ scores = {'ecpay': 0, 'newebpay': 0, 'payuni': 0}
101
+ reasons = {'ecpay': [], 'newebpay': [], 'payuni': []}
102
+
103
+ # 基於關鍵字規則計分
104
+ for keyword, recommendations in RECOMMENDATION_RULES.items():
105
+ if keyword in query_lower:
106
+ for provider, weight, reason in recommendations:
107
+ scores[provider] += weight
108
+ reasons[provider].append(f'✓ {reason} (+{weight})')
109
+
110
+ # 從 CSV 載入額外規則
111
+ csv_rules = load_reasoning_csv()
112
+ for rule in csv_rules:
113
+ scenario = rule.get('scenario', '').lower()
114
+ if any(word in scenario for word in query_lower.split()):
115
+ provider = rule.get('recommended_provider', '').lower()
116
+ if provider in scores:
117
+ confidence = rule.get('confidence', 'MEDIUM')
118
+ weight_map = {'HIGH': 3, 'MEDIUM': 2, 'LOW': 1}
119
+ weight = weight_map.get(confidence, 1)
120
+ scores[provider] += weight
121
+
122
+ reason_text = rule.get('reason', '')
123
+ if reason_text:
124
+ reasons[provider].append(f'✓ {reason_text} (+{weight})')
125
+
126
+ return {p: (s, reasons[p]) for p, s in scores.items()}
127
+
128
+
129
+ def get_anti_patterns(provider: str) -> List[str]:
130
+ """獲取反模式警告"""
131
+ return [f'⚠ {pattern}: {desc}' for pattern, desc in ANTI_PATTERNS.get(provider, [])]
132
+
133
+
134
+ def format_recommendation_ascii(results: Dict[str, Tuple[int, List[str]]], query: str) -> str:
135
+ """格式化輸出 (ASCII Box)"""
136
+ # 排序
137
+ sorted_results = sorted(results.items(), key=lambda x: x[1][0], reverse=True)
138
+
139
+ output = []
140
+ output.append('╔' + '═' * 78 + '╗')
141
+ output.append('║' + f' 台灣金流推薦系統 - 分析結果'.center(76) + '║')
142
+ output.append('╠' + '═' * 78 + '╣')
143
+ output.append('║' + f' 查詢: {query}'.ljust(77) + '║')
144
+ output.append('╚' + '═' * 78 + '╝')
145
+ output.append('')
146
+
147
+ for rank, (provider, (score, reason_list)) in enumerate(sorted_results, 1):
148
+ if score == 0:
149
+ continue
150
+
151
+ # Provider 名稱
152
+ provider_names = {
153
+ 'ecpay': '綠界科技 ECPay',
154
+ 'newebpay': '藍新金流 NewebPay',
155
+ 'payuni': '統一金流 PAYUNi'
156
+ }
157
+ display_name = provider_names.get(provider, provider)
158
+
159
+ # Emoji
160
+ emoji = '🥇' if rank == 1 else '🥈' if rank == 2 else '🥉'
161
+
162
+ output.append(f'{emoji} 推薦 #{rank}: {display_name}')
163
+ output.append(f' 評分: {score} 分')
164
+ output.append('')
165
+
166
+ if reason_list:
167
+ output.append(' 推薦理由:')
168
+ for reason in reason_list:
169
+ output.append(f' {reason}')
170
+ output.append('')
171
+
172
+ # 反模式警告
173
+ anti = get_anti_patterns(provider)
174
+ if anti:
175
+ output.append(' 注意事項:')
176
+ for warning in anti:
177
+ output.append(f' {warning}')
178
+ output.append('')
179
+
180
+ output.append('─' * 80)
181
+
182
+ return '\n'.join(output)
183
+
184
+
185
+ def format_recommendation_json(results: Dict[str, Tuple[int, List[str]]], query: str) -> str:
186
+ """格式化輸出 (JSON)"""
187
+ sorted_results = sorted(results.items(), key=lambda x: x[1][0], reverse=True)
188
+
189
+ output_data = {
190
+ 'query': query,
191
+ 'recommendations': []
192
+ }
193
+
194
+ for rank, (provider, (score, reason_list)) in enumerate(sorted_results, 1):
195
+ if score == 0:
196
+ continue
197
+
198
+ provider_names = {
199
+ 'ecpay': '綠界科技 ECPay',
200
+ 'newebpay': '藍新金流 NewebPay',
201
+ 'payuni': '統一金流 PAYUNi'
202
+ }
203
+
204
+ rec = {
205
+ 'rank': rank,
206
+ 'provider': provider,
207
+ 'display_name': provider_names.get(provider, provider),
208
+ 'score': score,
209
+ 'reasons': [r.replace('✓ ', '').split(' (+')[0] for r in reason_list],
210
+ 'anti_patterns': [a.replace('⚠ ', '') for a in get_anti_patterns(provider)]
211
+ }
212
+ output_data['recommendations'].append(rec)
213
+
214
+ return json.dumps(output_data, ensure_ascii=False, indent=2)
215
+
216
+
217
+ def format_recommendation_simple(results: Dict[str, Tuple[int, List[str]]], query: str) -> str:
218
+ """格式化輸出 (Simple Text)"""
219
+ sorted_results = sorted(results.items(), key=lambda x: x[1][0], reverse=True)
220
+
221
+ output = [f'查詢: {query}\n']
222
+
223
+ for rank, (provider, (score, reason_list)) in enumerate(sorted_results, 1):
224
+ if score == 0:
225
+ continue
226
+
227
+ provider_names = {
228
+ 'ecpay': '綠界科技 ECPay',
229
+ 'newebpay': '藍新金流 NewebPay',
230
+ 'payuni': '統一金流 PAYUNi'
231
+ }
232
+
233
+ output.append(f'推薦 #{rank}: {provider_names.get(provider, provider)} ({score} 分)')
234
+
235
+ if reason_list:
236
+ for reason in reason_list:
237
+ output.append(f' - {reason.replace("✓ ", "")}')
238
+
239
+ anti = get_anti_patterns(provider)
240
+ if anti:
241
+ for warning in anti:
242
+ output.append(f' ! {warning.replace("⚠ ", "")}')
243
+
244
+ output.append('')
245
+
246
+ return '\n'.join(output)
247
+
248
+
249
+ def main():
250
+ parser = argparse.ArgumentParser(description='台灣金流推薦系統')
251
+ parser.add_argument('query', type=str, help='需求描述')
252
+ parser.add_argument('--format', choices=['ascii', 'json', 'simple'], default='ascii', help='輸出格式')
253
+
254
+ args = parser.parse_args()
255
+
256
+ # 分析需求
257
+ results = analyze_requirements(args.query)
258
+
259
+ # 格式化輸出
260
+ if args.format == 'json':
261
+ print(format_recommendation_json(results, args.query))
262
+ elif args.format == 'simple':
263
+ print(format_recommendation_simple(results, args.query))
264
+ else:
265
+ print(format_recommendation_ascii(results, args.query))
266
+
267
+
268
+ if __name__ == '__main__':
269
+ main()