quant-llm-wiki 0.2.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- quant_llm_wiki-0.2.0/LICENSE +21 -0
- quant_llm_wiki-0.2.0/PKG-INFO +544 -0
- quant_llm_wiki-0.2.0/README.md +507 -0
- quant_llm_wiki-0.2.0/pyproject.toml +60 -0
- quant_llm_wiki-0.2.0/quant_llm_wiki/__init__.py +0 -0
- quant_llm_wiki-0.2.0/quant_llm_wiki/agent/__init__.py +3 -0
- quant_llm_wiki-0.2.0/quant_llm_wiki/agent/cli.py +133 -0
- quant_llm_wiki-0.2.0/quant_llm_wiki/agent/graph.py +92 -0
- quant_llm_wiki-0.2.0/quant_llm_wiki/agent/prompts.py +48 -0
- quant_llm_wiki-0.2.0/quant_llm_wiki/agent/tools.py +714 -0
- quant_llm_wiki-0.2.0/quant_llm_wiki/cli.py +29 -0
- quant_llm_wiki-0.2.0/quant_llm_wiki/embed.py +359 -0
- quant_llm_wiki-0.2.0/quant_llm_wiki/enrich.py +648 -0
- quant_llm_wiki-0.2.0/quant_llm_wiki/ingest/__init__.py +0 -0
- quant_llm_wiki-0.2.0/quant_llm_wiki/ingest/wechat.py +501 -0
- quant_llm_wiki-0.2.0/quant_llm_wiki/query/__init__.py +0 -0
- quant_llm_wiki-0.2.0/quant_llm_wiki/query/brainstorm.py +833 -0
- quant_llm_wiki-0.2.0/quant_llm_wiki/query/rethink.py +459 -0
- quant_llm_wiki-0.2.0/quant_llm_wiki/shared.py +567 -0
- quant_llm_wiki-0.2.0/quant_llm_wiki/sync.py +170 -0
- quant_llm_wiki-0.2.0/quant_llm_wiki.egg-info/PKG-INFO +544 -0
- quant_llm_wiki-0.2.0/quant_llm_wiki.egg-info/SOURCES.txt +53 -0
- quant_llm_wiki-0.2.0/quant_llm_wiki.egg-info/dependency_links.txt +1 -0
- quant_llm_wiki-0.2.0/quant_llm_wiki.egg-info/entry_points.txt +2 -0
- quant_llm_wiki-0.2.0/quant_llm_wiki.egg-info/requires.txt +8 -0
- quant_llm_wiki-0.2.0/quant_llm_wiki.egg-info/top_level.txt +1 -0
- quant_llm_wiki-0.2.0/setup.cfg +4 -0
- quant_llm_wiki-0.2.0/tests/test_agent_graph.py +74 -0
- quant_llm_wiki-0.2.0/tests/test_agent_sanitize.py +175 -0
- quant_llm_wiki-0.2.0/tests/test_agent_tools.py +360 -0
- quant_llm_wiki-0.2.0/tests/test_brainstorm.py +141 -0
- quant_llm_wiki-0.2.0/tests/test_brainstorm_with_wiki.py +120 -0
- quant_llm_wiki-0.2.0/tests/test_code_math.py +46 -0
- quant_llm_wiki-0.2.0/tests/test_embed.py +105 -0
- quant_llm_wiki-0.2.0/tests/test_enrich.py +159 -0
- quant_llm_wiki-0.2.0/tests/test_enrich_timeout.py +202 -0
- quant_llm_wiki-0.2.0/tests/test_ingest_source.py +68 -0
- quant_llm_wiki-0.2.0/tests/test_ingest_timeout.py +102 -0
- quant_llm_wiki-0.2.0/tests/test_ingest_wechat.py +232 -0
- quant_llm_wiki-0.2.0/tests/test_kb_cli.py +53 -0
- quant_llm_wiki-0.2.0/tests/test_pdf_extract.py +62 -0
- quant_llm_wiki-0.2.0/tests/test_query_wiki_first_ask.py +54 -0
- quant_llm_wiki-0.2.0/tests/test_rethink.py +344 -0
- quant_llm_wiki-0.2.0/tests/test_shared_surrogate_sanitize.py +79 -0
- quant_llm_wiki-0.2.0/tests/test_sync_articles_by_status.py +65 -0
- quant_llm_wiki-0.2.0/tests/test_web_extract.py +49 -0
- quant_llm_wiki-0.2.0/tests/test_wiki_compile.py +284 -0
- quant_llm_wiki-0.2.0/tests/test_wiki_index.py +53 -0
- quant_llm_wiki-0.2.0/tests/test_wiki_lint.py +158 -0
- quant_llm_wiki-0.2.0/tests/test_wiki_lint_schema.py +82 -0
- quant_llm_wiki-0.2.0/tests/test_wiki_maintain.py +114 -0
- quant_llm_wiki-0.2.0/tests/test_wiki_paths.py +22 -0
- quant_llm_wiki-0.2.0/tests/test_wiki_schemas.py +184 -0
- quant_llm_wiki-0.2.0/tests/test_wiki_seed.py +45 -0
- quant_llm_wiki-0.2.0/tests/test_wiki_state.py +153 -0
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 jackwu321
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,544 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: quant_llm_wiki
|
|
3
|
+
Version: 0.2.0
|
|
4
|
+
Summary: AI-powered quant research knowledge base & brainstorm agent
|
|
5
|
+
Author: jackwu321
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/jackwu321/Quant_LLM_Wiki
|
|
8
|
+
Project-URL: Repository, https://github.com/jackwu321/Quant_LLM_Wiki
|
|
9
|
+
Project-URL: Issues, https://github.com/jackwu321/Quant_LLM_Wiki/issues
|
|
10
|
+
Project-URL: Changelog, https://github.com/jackwu321/Quant_LLM_Wiki/releases
|
|
11
|
+
Keywords: quant,knowledge-base,wiki,llm,rag,langgraph,chromadb,research,brainstorm
|
|
12
|
+
Classifier: Development Status :: 4 - Beta
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: Intended Audience :: Financial and Insurance Industry
|
|
15
|
+
Classifier: Intended Audience :: Science/Research
|
|
16
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
17
|
+
Classifier: Operating System :: OS Independent
|
|
18
|
+
Classifier: Programming Language :: Python :: 3
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
21
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
22
|
+
Classifier: Topic :: Office/Business :: Financial :: Investment
|
|
23
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
24
|
+
Classifier: Topic :: Text Processing :: Markup :: Markdown
|
|
25
|
+
Requires-Python: >=3.10
|
|
26
|
+
Description-Content-Type: text/markdown
|
|
27
|
+
License-File: LICENSE
|
|
28
|
+
Requires-Dist: requests>=2.28.0
|
|
29
|
+
Requires-Dist: beautifulsoup4>=4.12.0
|
|
30
|
+
Requires-Dist: chromadb>=0.4.0
|
|
31
|
+
Requires-Dist: langgraph>=0.2.0
|
|
32
|
+
Requires-Dist: langchain-core>=0.3.0
|
|
33
|
+
Requires-Dist: langchain-community>=0.3.0
|
|
34
|
+
Requires-Dist: langchain-openai>=0.3.0
|
|
35
|
+
Requires-Dist: python-dotenv>=1.0.0
|
|
36
|
+
Dynamic: license-file
|
|
37
|
+
|
|
38
|
+
# Quant_LLM_Wiki: A Karpathy-shaped wiki-first knowledge base for quant research
|
|
39
|
+
|
|
40
|
+
<p align="center">
|
|
41
|
+
<a href="#features">Features</a> |
|
|
42
|
+
<a href="#architecture">Architecture</a> |
|
|
43
|
+
<a href="#quick-start">Quick Start</a> |
|
|
44
|
+
<a href="#agent-usage">Agent Usage</a> |
|
|
45
|
+
<a href="#configuration">Configuration</a> |
|
|
46
|
+
<a href="#running-tests">Tests</a> |
|
|
47
|
+
<a href="#contributing">Contributing</a>
|
|
48
|
+
</p>
|
|
49
|
+
|
|
50
|
+
<p align="center">
|
|
51
|
+
<img src="https://img.shields.io/badge/python-3.10+-blue.svg" alt="Python">
|
|
52
|
+
<img src="https://img.shields.io/badge/license-MIT-green.svg" alt="License">
|
|
53
|
+
<img src="https://img.shields.io/badge/LLM-OpenAI_Compatible-orange.svg" alt="LLM">
|
|
54
|
+
<img src="https://img.shields.io/badge/vector_store-ChromaDB-purple.svg" alt="ChromaDB">
|
|
55
|
+
</p>
|
|
56
|
+
|
|
57
|
+
---
|
|
58
|
+
|
|
59
|
+
**Quant_LLM_Wiki** turns WeChat articles, web pages, and research PDFs into an LLM-built Markdown knowledge base for quantitative investment research. It follows Andrej Karpathy's [LLM-built KB method](https://karpathy.bearblog.dev/): a `raw/` ingest layer, an LLM-compiled `wiki/` of concept articles, and a `schema/` that the LLM and tools both follow. Vector RAG is preserved as a fallback substrate, **not** the primary retrieval path. Three durable verbs — `ingest`, `query`, `lint` — drive everything. A built-in **Rethink Layer** scores novelty and quality of brainstormed ideas before output.
|
|
60
|
+
|
|
61
|
+
> The goal is **research inspiration and cross-document idea combination**, not producing trade-ready strategies.
|
|
62
|
+
|
|
63
|
+
## Features
|
|
64
|
+
|
|
65
|
+
- **Multi-source Ingestion** — Ingest from single URLs, batch URL lists, or local HTML files; warns on re-ingesting previously rejected sources
|
|
66
|
+
- **LLM Enrichment** — Automatically extract structured fields: idea blocks, transfer targets, combination hooks, failure modes, and more. Concurrent processing with configurable parallelism
|
|
67
|
+
- **Hybrid RAG Retrieval** — Keyword + vector + RRF fusion retrieval across your knowledge base
|
|
68
|
+
- **Brainstorm Mode** — Generate new strategy ideas by combining insights from multiple articles
|
|
69
|
+
- **Rethink Layer** — Post-generation validation that checks idea novelty (via vector similarity) and scores quality (traceability, coherence, actionability)
|
|
70
|
+
- **Article Quality Control** — Mark articles as `rejected` to remove from KB and prevent re-ingestion; review tool shows only enriched articles
|
|
71
|
+
- **Interactive Agent** — LangGraph ReAct agent with 8 tools for full pipeline management, with real-time progress streaming
|
|
72
|
+
- **Provider-Agnostic** — Works with any OpenAI-compatible LLM API (Zhipu GLM, DeepSeek, Moonshot, Qwen, OpenAI, Ollama, etc.)
|
|
73
|
+
- **Local-First** — All data stored locally as Markdown files + ChromaDB vectors
|
|
74
|
+
|
|
75
|
+
## Architecture
|
|
76
|
+
|
|
77
|
+
The system has three durable layers and three operational verbs. Vector RAG is preserved as supporting substrate, not the primary retrieval path.
|
|
78
|
+
|
|
79
|
+
### Layout
|
|
80
|
+
|
|
81
|
+
```
|
|
82
|
+
raw/ — incoming source articles (one dir per article: article.md + source.json + images/)
|
|
83
|
+
wiki/ — LLM-built Markdown memory (the primary query surface)
|
|
84
|
+
├── INDEX.md — auto-maintained table of contents
|
|
85
|
+
├── state.json — content hashes, concept scores, retrieval hints
|
|
86
|
+
├── lint_report.json — last health audit
|
|
87
|
+
├── concepts/<slug>.md
|
|
88
|
+
├── sources/<basename>.md
|
|
89
|
+
├── queries/<date>_<slug>_<mode>.md — query → wiki feedback log
|
|
90
|
+
└── maintenance_report.md — last `kb lint --maintain` output
|
|
91
|
+
schema/ — rules the LLM and tools follow:
|
|
92
|
+
concept-schema.md, source-schema.md, wiki-structure.md, operations.md
|
|
93
|
+
vector_store/ — ChromaDB substrate, used as fallback only
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
Articles live **flat** under `raw/`. The frontmatter `status` field (`raw`, `reviewed`, `high_value`, `rejected`) is the source of truth — there is no directory-as-status convention.
|
|
97
|
+
|
|
98
|
+
### Three operations
|
|
99
|
+
|
|
100
|
+
```
|
|
101
|
+
┌──> wiki/concepts/<slug>.md
|
|
102
|
+
├──> wiki/sources/<basename>.md
|
|
103
|
+
WeChat URL / Web URL / PDF / HTML ├──> wiki/INDEX.md
|
|
104
|
+
| ├──> wiki/state.json
|
|
105
|
+
v │ (hashes, scores, freshness, retrieval hints)
|
|
106
|
+
[kb ingest] ──> raw/<dir>/article.md + source.json
|
|
107
|
+
| ▲
|
|
108
|
+
v │
|
|
109
|
+
[kb compile] ── schema/-injected LLM ──────┘
|
|
110
|
+
(auto after ingest)
|
|
111
|
+
|
|
|
112
|
+
v
|
|
113
|
+
[kb embed] ── ChromaDB substrate over raw/ + wiki/
|
|
114
|
+
(auto after compile)
|
|
115
|
+
|
|
|
116
|
+
v
|
|
117
|
+
[kb query] ── wiki-first retrieval (INDEX → matched concepts → source summaries)
|
|
118
|
+
| RAG runs ONLY when wiki has no relevant concept or audit reports degradation
|
|
119
|
+
| (mode: ask | brainstorm; brainstorm runs Rethink Layer post-generation)
|
|
120
|
+
|
|
|
121
|
+
v
|
|
122
|
+
┌─ outputs/brainstorms/<date>_<slug>_<mode>.md
|
|
123
|
+
└─ wiki/queries/<date>_<slug>_<mode>.md ── append_query_log:
|
|
124
|
+
cited concepts get importance bump
|
|
125
|
+
+ retrieval_hints append in state.json
|
|
126
|
+
|
|
127
|
+
[kb lint] ── schema-compliance audit (frontmatter, sections, source anchors)
|
|
128
|
+
[kb lint --fix] ── LLM auto-repair of schema-noncompliant concepts
|
|
129
|
+
[kb lint --maintain] ── gap analysis: unmapped source clusters, under-supported concepts,
|
|
130
|
+
stale concepts → suggested ingestion queries / new brainstorm prompts
|
|
131
|
+
(writes wiki/maintenance_report.md)
|
|
132
|
+
[kb lint --maintain --apply] ── apply query-derived state updates idempotently
|
|
133
|
+
```
|
|
134
|
+
|
|
135
|
+
### Wiki-first retrieval (load-bearing invariant)
|
|
136
|
+
|
|
137
|
+
`brainstorm_from_kb.retrieve_blocks` gates on `_should_use_wiki_memory(notes) and _wiki_is_healthy_for_query(kb_root)`. There is **no** `command == "brainstorm"` check — both `ask` and `brainstorm` pull `kb_layer=wiki_concept` blocks first (Chroma-filtered → state-score reranked → lexical fallback), then fill remaining slots with complementary article chunks excluding sources already cited by the surfaced concepts. Pure-vector retrieval is the fallback, not the default.
|
|
138
|
+
|
|
139
|
+
### Query → wiki feedback
|
|
140
|
+
|
|
141
|
+
Every `kb query` (unless `--no-file-back`) files a structured note into `wiki/queries/<date>_<slug>_<mode>.md` and bumps `state.json:concepts.<slug>.importance` + `retrieval_hints` for cited concepts. `kb lint --maintain` later distills these query logs into proposed concept-page improvements. This realizes Karpathy's *"my own explorations and queries always 'add up' in the knowledge base."*
|
|
142
|
+
|
|
143
|
+
### Schema is enforced, not advisory
|
|
144
|
+
|
|
145
|
+
`schema/concept-schema.md` and `schema/source-schema.md` define required frontmatter fields, valid enum values, and required section headers. `wiki_lint` checks these on every run (severity: warning), and `kb lint --fix` runs an LLM auto-repair pass via `recompile_concept` for schema-noncompliant concepts. The schema text is also injected into compile-time prompts so the LLM is told the source-anchor invariant.
|
|
146
|
+
|
|
147
|
+
### Rethink Layer
|
|
148
|
+
|
|
149
|
+
A post-generation validation layer that runs automatically in brainstorm mode:
|
|
150
|
+
|
|
151
|
+
1. **Idea Parsing** — Extracts structured ideas from LLM output (EN/CN formats)
|
|
152
|
+
2. **Novelty Check** — Embeds each idea and queries ChromaDB for similar existing articles (threshold: 0.75)
|
|
153
|
+
3. **Quality Scoring** — Traceability (heuristic) + Coherence & Actionability (LLM-as-judge)
|
|
154
|
+
4. **Rethink Report** — Appended to output with per-idea scores and reasoning
|
|
155
|
+
|
|
156
|
+
### Agent Layer
|
|
157
|
+
|
|
158
|
+
The LangGraph ReAct agent provides 12 tools:
|
|
159
|
+
|
|
160
|
+
| Tool | Description |
|
|
161
|
+
|------|-------------|
|
|
162
|
+
| `ingest_article` | Ingest from URL (auto: WeChat / web / PDF), batch URLs, HTML file, PDF file, PDF URL |
|
|
163
|
+
| `enrich_articles` | LLM-powered structured enrichment (concurrent, with `limit` support) |
|
|
164
|
+
| `list_articles` | List articles by status (raw / reviewed / high_value); all live flat under `raw/` |
|
|
165
|
+
| `review_articles` | Show enriched articles ready for review |
|
|
166
|
+
| `set_article_status` | Update article status field in frontmatter |
|
|
167
|
+
| `embed_knowledge` | Build/update ChromaDB vector index over `raw/` + `wiki/` |
|
|
168
|
+
| `query_knowledge_base` | Wiki-first Q&A or brainstorm; both modes pull stable wiki concepts before vectors |
|
|
169
|
+
| `compile_wiki` | Compile/update wiki (incremental or rebuild); auto-runs lint |
|
|
170
|
+
| `audit_wiki` | Wiki health report: schema violations, stale concepts, unsupported claims, duplicates |
|
|
171
|
+
| `list_concepts` | List wiki concepts by status (stable / proposed / deprecated) |
|
|
172
|
+
| `set_concept_status` | Override: approve/deprecate/delete a concept (escape hatch) |
|
|
173
|
+
| `read_wiki` | Read INDEX.md / a concept article / a source summary |
|
|
174
|
+
|
|
175
|
+
## File Structure
|
|
176
|
+
|
|
177
|
+
```
|
|
178
|
+
Quant_LLM_Wiki/
|
|
179
|
+
├── pyproject.toml # Package metadata + `qlw` console_script entry point
|
|
180
|
+
├── requirements.txt # Python dependencies (kept for non-pip-install users)
|
|
181
|
+
├── llm_config.example.env # Example LLM provider config
|
|
182
|
+
├── README.md
|
|
183
|
+
├── LICENSE
|
|
184
|
+
├── kb.py # Wiki-first KB CLI: ingest | query | lint | compile | embed
|
|
185
|
+
├── ingest_source.py # Unified ingest dispatcher (WeChat / web / PDF / HTML)
|
|
186
|
+
├── _wechat.py # WeChat-specific extraction
|
|
187
|
+
├── _web_extract.py # Generic web extraction (trafilatura)
|
|
188
|
+
├── _pdf_extract.py # PDF extraction (pypdf)
|
|
189
|
+
├── _code_math.py # Code/math preservation utilities
|
|
190
|
+
├── wiki_schemas.py # ConceptArticle / SourceSummary dataclasses
|
|
191
|
+
├── wiki_seed.py # Seed taxonomy + bootstrap
|
|
192
|
+
├── wiki_state.py # Machine state manifest + scoring (freshness decay etc.)
|
|
193
|
+
├── wiki_compile.py # compile_wiki orchestrator (schema-injected, soft-error)
|
|
194
|
+
├── wiki_compile_llm.py # assign_concepts + recompile_concept LLM wrappers
|
|
195
|
+
├── wiki_index.py # INDEX.md generator
|
|
196
|
+
├── wiki_lint.py # Schema enforcement + health checks + auto_fix
|
|
197
|
+
├── wiki_maintain.py # append_query_log + run_maintenance (Steps 6 + 7)
|
|
198
|
+
├── quant_llm_wiki/ # Restructured Python package (qlib-style)
|
|
199
|
+
│ ├── __init__.py
|
|
200
|
+
│ ├── cli.py # `qlw` dispatcher
|
|
201
|
+
│ ├── shared.py # Shared utilities, LLM HTTP client, paths, frontmatter
|
|
202
|
+
│ ├── ingest/
|
|
203
|
+
│ │ └── wechat.py # WeChat-specific ingest
|
|
204
|
+
│ ├── enrich.py # LLM enrichment pipeline
|
|
205
|
+
│ ├── embed.py # ChromaDB substrate over raw/ + wiki/
|
|
206
|
+
│ ├── sync.py # Article status-based file sync
|
|
207
|
+
│ ├── query/
|
|
208
|
+
│ │ ├── brainstorm.py # query (ask | brainstorm) — wiki-first retrieval
|
|
209
|
+
│ │ └── rethink.py # Post-generation novelty + quality validation
|
|
210
|
+
│ └── agent/ # LangGraph agent layer
|
|
211
|
+
│ ├── cli.py # Interactive ReAct agent CLI
|
|
212
|
+
│ ├── graph.py
|
|
213
|
+
│ ├── prompts.py
|
|
214
|
+
│ └── tools.py
|
|
215
|
+
├── raw/ # Incoming source articles, flat (one dir per article)
|
|
216
|
+
├── wiki/ # LLM-built Markdown memory
|
|
217
|
+
│ ├── INDEX.md # auto-maintained TOC
|
|
218
|
+
│ ├── state.json # content hashes, concept scores, retrieval hints
|
|
219
|
+
│ ├── lint_report.json # last health audit
|
|
220
|
+
│ ├── maintenance_report.md # last `kb lint --maintain` output
|
|
221
|
+
│ ├── concepts/ # one .md per concept
|
|
222
|
+
│ ├── sources/ # one .md per raw article (mechanically derived)
|
|
223
|
+
│ └── queries/ # one .md per filed `kb query` (Step 7 feedback log)
|
|
224
|
+
├── schema/ # Rules followed by LLM and tools
|
|
225
|
+
│ ├── concept-schema.md
|
|
226
|
+
│ ├── source-schema.md
|
|
227
|
+
│ ├── wiki-structure.md
|
|
228
|
+
│ └── operations.md
|
|
229
|
+
├── templates/ # Article markdown templates (research-note / strategy-note)
|
|
230
|
+
├── tests/ # unittest suite
|
|
231
|
+
│ ├── robustness/ # Edge-case tests (Layer 1–4)
|
|
232
|
+
│ ├── test_kb_cli.py # kb.py CLI dispatch
|
|
233
|
+
│ ├── test_query_wiki_first_ask.py
|
|
234
|
+
│ ├── test_wiki_lint_schema.py # Schema enforcement + auto_fix
|
|
235
|
+
│ ├── test_wiki_maintain.py # Query feedback + maintenance
|
|
236
|
+
│ └── test_*.py # Per-module coverage
|
|
237
|
+
└── docs/ # Design specs and usage guides
|
|
238
|
+
```
|
|
239
|
+
|
|
240
|
+
> **Repo / package / command names.** Repo: `Quant_LLM_Wiki`. Package: `quant_llm_wiki`. Console command: `qlw` (installed via `pip install -e .`). The wiki-first KB workflow (`raw/`, `wiki/`, `schema/`) remains driven by `kb.py`; the standalone scripts (enrichment, embedding, brainstorm, agent, sync, single-source ingest) are now subcommands of `qlw`.
|
|
241
|
+
|
|
242
|
+
### Command Renaming (vs. previous versions)
|
|
243
|
+
|
|
244
|
+
The standalone scripts at the repo root have moved into `quant_llm_wiki/` and are dispatched through a single `qlw` CLI:
|
|
245
|
+
|
|
246
|
+
| Old | New |
|
|
247
|
+
|-----|-----|
|
|
248
|
+
| `qlw ingest --url X` | `qlw ingest --url X` |
|
|
249
|
+
| `qlw enrich --limit 10` | `qlw enrich --limit 10` |
|
|
250
|
+
| `qlw embed` | `qlw embed` |
|
|
251
|
+
| `qlw sync` | `qlw sync` |
|
|
252
|
+
| `qlw ask --query Q` | `qlw ask --query Q` |
|
|
253
|
+
| `qlw brainstorm --query Q` | `qlw brainstorm --query Q` |
|
|
254
|
+
| `qlw agent` | `qlw agent` |
|
|
255
|
+
|
|
256
|
+
Install with `pip install -e .` to put `qlw` on PATH; otherwise use `python -m quant_llm_wiki.cli <subcmd>`. The `kb.py` wiki-first CLI is unchanged.
|
|
257
|
+
|
|
258
|
+
## Quick Start
|
|
259
|
+
|
|
260
|
+
### 1. Install
|
|
261
|
+
|
|
262
|
+
The recommended way to install is via [`pipx`](https://pipx.pypa.io/), which gives you the `qlw` command globally without polluting your system Python and without requiring you to activate a venv:
|
|
263
|
+
|
|
264
|
+
```bash
|
|
265
|
+
# From PyPI (once published)
|
|
266
|
+
pipx install quant-llm-wiki
|
|
267
|
+
|
|
268
|
+
# Or directly from GitHub (always tracks main)
|
|
269
|
+
pipx install git+https://github.com/jackwu321/Quant_LLM_Wiki.git
|
|
270
|
+
```
|
|
271
|
+
|
|
272
|
+
After install, `qlw` is on your PATH from any shell. Upgrade later with `pipx upgrade quant-llm-wiki`.
|
|
273
|
+
|
|
274
|
+
<details>
|
|
275
|
+
<summary>Alternative: clone for development</summary>
|
|
276
|
+
|
|
277
|
+
If you want to hack on the code, clone and install in editable mode:
|
|
278
|
+
|
|
279
|
+
```bash
|
|
280
|
+
git clone https://github.com/jackwu321/Quant_LLM_Wiki.git
|
|
281
|
+
cd Quant_LLM_Wiki
|
|
282
|
+
|
|
283
|
+
python3 -m venv .venv
|
|
284
|
+
source .venv/bin/activate
|
|
285
|
+
pip install -e .
|
|
286
|
+
```
|
|
287
|
+
|
|
288
|
+
</details>
|
|
289
|
+
|
|
290
|
+
### 2. Configure LLM Provider
|
|
291
|
+
|
|
292
|
+
Copy the example config and fill in your API key:
|
|
293
|
+
|
|
294
|
+
```bash
|
|
295
|
+
cp llm_config.example.env .env
|
|
296
|
+
# Edit .env with your API key and provider settings
|
|
297
|
+
```
|
|
298
|
+
|
|
299
|
+
Or set environment variables directly:
|
|
300
|
+
|
|
301
|
+
```bash
|
|
302
|
+
export LLM_API_KEY="your-api-key"
|
|
303
|
+
export LLM_BASE_URL="https://open.bigmodel.cn/api/paas/v4" # or any OpenAI-compatible endpoint
|
|
304
|
+
export LLM_MODEL="glm-4.7" # or gpt-4, deepseek-chat, etc.
|
|
305
|
+
```
|
|
306
|
+
|
|
307
|
+
See [llm_config.example.env](llm_config.example.env) for provider-specific examples (DeepSeek, Moonshot, Qwen, OpenAI, Ollama).
|
|
308
|
+
|
|
309
|
+
### 3. Ingest, Compile, Embed (one command)
|
|
310
|
+
|
|
311
|
+
```bash
|
|
312
|
+
# Single URL — ingest + auto-compile + auto-embed
|
|
313
|
+
python3 kb.py ingest --url "https://mp.weixin.qq.com/s/..."
|
|
314
|
+
|
|
315
|
+
# Skip the auto compile/embed
|
|
316
|
+
python3 kb.py ingest --url "..." --no-compile
|
|
317
|
+
|
|
318
|
+
# Local PDF
|
|
319
|
+
python3 kb.py ingest --pdf-file paper.pdf
|
|
320
|
+
|
|
321
|
+
# Saved WeChat HTML
|
|
322
|
+
python3 kb.py ingest --html-file saved.html
|
|
323
|
+
|
|
324
|
+
# Batch from a list (one URL per line)
|
|
325
|
+
python3 kb.py ingest --url-list urls.txt
|
|
326
|
+
```
|
|
327
|
+
|
|
328
|
+
Each URL has a hard 120 s ceiling; on hit, ingest prints `TIMEOUT <url>: exceeded 120s` and (in batch mode) continues with the next URL. Override via `INGEST_URL_TIMEOUT=<seconds>`. Note: a timed-out URL may leave a partial `articles/raw/<date>_*/` directory behind (same as ordinary `FAILED` cases).
|
|
329
|
+
|
|
330
|
+
`enrich_articles_with_llm.py` remains a separate step (run before `kb compile` if your raw articles need LLM-derived metadata first):
|
|
331
|
+
|
|
332
|
+
```bash
|
|
333
|
+
qlw enrich # all raw articles (concurrent)
|
|
334
|
+
qlw enrich --limit 10 # first 10 only
|
|
335
|
+
qlw enrich --concurrency 5 # 5 parallel LLM requests
|
|
336
|
+
```
|
|
337
|
+
|
|
338
|
+
Each article enrichment has a hard 360 s ceiling; on hit, the article is recorded as `failed: timeout: exceeded Ns` and the batch continues. Override via `LLM_ARTICLE_TIMEOUT=<seconds>`. Start / done / TIMEOUT / `[llm-retry]` events are printed to **stderr** (separate from the per-completion `[i/N] ... ok|failed` lines on stdout) so you can see what's happening even when the LLM API is slow or backing off.
|
|
339
|
+
|
|
340
|
+
### 4. Query (wiki-first)
|
|
341
|
+
|
|
342
|
+
```bash
|
|
343
|
+
# Factual Q&A — wiki concepts first, RAG fallback only
|
|
344
|
+
python3 kb.py query --mode ask --query "What momentum factors are discussed?"
|
|
345
|
+
|
|
346
|
+
# Brainstorm new ideas (with Rethink Layer + query-feedback)
|
|
347
|
+
python3 kb.py query --mode brainstorm --query "Combine momentum and volatility timing for ETF rotation"
|
|
348
|
+
|
|
349
|
+
# Show retrieved context only (dry run)
|
|
350
|
+
python3 kb.py query --mode brainstorm --query "..." --dry-run
|
|
351
|
+
|
|
352
|
+
# Run a debug query without filing it back into wiki/queries/
|
|
353
|
+
python3 kb.py query --mode ask --query "..." --no-file-back
|
|
354
|
+
```
|
|
355
|
+
|
|
356
|
+
### 5. Lint + Maintain
|
|
357
|
+
|
|
358
|
+
```bash
|
|
359
|
+
# Schema + health audit
|
|
360
|
+
python3 kb.py lint
|
|
361
|
+
|
|
362
|
+
# LLM auto-repair of schema-noncompliant concepts
|
|
363
|
+
python3 kb.py lint --fix
|
|
364
|
+
|
|
365
|
+
# Gap analysis: unmapped sources, under-supported concepts, stale concepts
|
|
366
|
+
python3 kb.py lint --maintain
|
|
367
|
+
|
|
368
|
+
# Apply query-derived state updates (idempotent)
|
|
369
|
+
python3 kb.py lint --maintain --apply
|
|
370
|
+
```
|
|
371
|
+
|
|
372
|
+
## Agent Usage
|
|
373
|
+
|
|
374
|
+
The interactive agent manages the full pipeline through natural language:
|
|
375
|
+
|
|
376
|
+
```bash
|
|
377
|
+
# Interactive mode
|
|
378
|
+
qlw agent
|
|
379
|
+
|
|
380
|
+
# Single command
|
|
381
|
+
qlw agent --query "ingest this article: https://mp.weixin.qq.com/s/..."
|
|
382
|
+
qlw agent --query "list all articles"
|
|
383
|
+
qlw agent --query "brainstorm: combine factor timing with risk parity"
|
|
384
|
+
```
|
|
385
|
+
|
|
386
|
+
### Example Agent Workflow
|
|
387
|
+
|
|
388
|
+
```
|
|
389
|
+
You: ingest these articles: url1, url2, url3
|
|
390
|
+
Agent: Ingested 3/3 articles. Auto-compiled wiki and refreshed vector index.
|
|
391
|
+
|
|
392
|
+
You: enrich the first 3 raw articles
|
|
393
|
+
Agent: [1/3] ok [2/3] ok [3/3] ok — Enriched 3/3 articles.
|
|
394
|
+
|
|
395
|
+
You: review the new articles
|
|
396
|
+
Agent: [Shows enriched articles with content types and summaries]
|
|
397
|
+
|
|
398
|
+
You: set articles 1 and 3 as high_value, article 2 as rejected (low research value)
|
|
399
|
+
Agent: Updated 3 articles. Article 2 recorded as rejected (URL noted to prevent re-ingest).
|
|
400
|
+
|
|
401
|
+
You: ingest url2 again
|
|
402
|
+
Agent: WARNING — url2 was previously rejected: "文章标题" (reason: low research value).
|
|
403
|
+
Use force=True to re-ingest.
|
|
404
|
+
|
|
405
|
+
You: brainstorm: how to combine momentum with volatility timing
|
|
406
|
+
Agent: [Wiki concepts surfaced first; complementary articles fill remaining slots]
|
|
407
|
+
[LLM generates ideas; Rethink Layer scores novelty + quality]
|
|
408
|
+
[Query filed back into wiki/queries/; cited concepts gain importance]
|
|
409
|
+
```
|
|
410
|
+
|
|
411
|
+
## Configuration
|
|
412
|
+
|
|
413
|
+
### LLM Provider
|
|
414
|
+
|
|
415
|
+
Quant_LLM_Wiki works with **any OpenAI-compatible API**. Configure via `.env` file (auto-loaded) or environment variables:
|
|
416
|
+
|
|
417
|
+
| Variable | Default | Description |
|
|
418
|
+
|----------|---------|-------------|
|
|
419
|
+
| `LLM_API_KEY` | — | Your API key |
|
|
420
|
+
| `LLM_BASE_URL` | `https://open.bigmodel.cn/api/paas/v4` | API base URL |
|
|
421
|
+
| `LLM_MODEL` | `glm-4.7` | Chat model name |
|
|
422
|
+
| `LLM_EMBEDDING_MODEL` | `embedding-3` | Embedding model name |
|
|
423
|
+
| `LLM_CONNECT_TIMEOUT` | `10` | Connection timeout (seconds) |
|
|
424
|
+
| `LLM_READ_TIMEOUT` | `120` | Read timeout (seconds) |
|
|
425
|
+
| `LLM_MAX_RETRIES` | `2` | Max retry attempts |
|
|
426
|
+
| `LLM_CONCURRENCY` | `3` | Max parallel LLM requests for enrichment |
|
|
427
|
+
|
|
428
|
+
Legacy `ZHIPU_*` prefixed variables are also supported as fallbacks.
|
|
429
|
+
|
|
430
|
+
### Content Classification
|
|
431
|
+
|
|
432
|
+
Each article is classified with exactly one `content_type`:
|
|
433
|
+
|
|
434
|
+
| Type | Description |
|
|
435
|
+
|------|-------------|
|
|
436
|
+
| `methodology` | Research frameworks, models, factor logic |
|
|
437
|
+
| `strategy` | Trading logic with entry/exit rules and backtest |
|
|
438
|
+
| `allocation` | Portfolio construction, rotation, ETF allocation |
|
|
439
|
+
| `risk_control` | Risk management, drawdown control, volatility targeting |
|
|
440
|
+
| `market_review` | Market commentary, sector reviews |
|
|
441
|
+
|
|
442
|
+
### Article Status Lifecycle
|
|
443
|
+
|
|
444
|
+
All articles live flat under `raw/`. The frontmatter `status` field is the source of truth.
|
|
445
|
+
|
|
446
|
+
| Status | Description |
|
|
447
|
+
|--------|-------------|
|
|
448
|
+
| `raw` | Ingested, pending enrichment and review |
|
|
449
|
+
| `reviewed` | Human-reviewed; included in wiki compilation and vector index |
|
|
450
|
+
| `high_value` | High research value; included in wiki compilation and vector index |
|
|
451
|
+
| `rejected` | Low value — removed from KB, source URL recorded to prevent re-ingestion |
|
|
452
|
+
|
|
453
|
+
## Running Tests
|
|
454
|
+
|
|
455
|
+
### Unit Tests
|
|
456
|
+
|
|
457
|
+
```bash
|
|
458
|
+
python3 -m unittest discover -s tests -p 'test_*.py' -v
|
|
459
|
+
```
|
|
460
|
+
|
|
461
|
+
### Robustness Tests
|
|
462
|
+
|
|
463
|
+
The `tests/robustness/` suite covers edge cases and failure modes across four layers:
|
|
464
|
+
|
|
465
|
+
| File | What it tests |
|
|
466
|
+
|------|---------------|
|
|
467
|
+
| `test_layer1_tool_robustness.py` | Agent tools with malformed/missing inputs |
|
|
468
|
+
| `test_layer2_workflow_integration.py` | End-to-end pipeline with bad data |
|
|
469
|
+
| `test_layer3_agent_routing.py` | Agent routing under unexpected queries |
|
|
470
|
+
| `test_layer4_llm_api_robustness.py` | LLM API timeouts, retries, and failures |
|
|
471
|
+
|
|
472
|
+
```bash
|
|
473
|
+
python3 -m unittest discover -s tests/robustness -p 'test_*.py' -v
|
|
474
|
+
```
|
|
475
|
+
|
|
476
|
+
## Design Principles
|
|
477
|
+
|
|
478
|
+
- **Wiki-first, RAG-as-substrate** — Both `kb query --mode ask` and `--mode brainstorm` retrieve stable wiki concepts before vectors. ChromaDB runs only as fallback when the wiki is empty/sparse or `audit_wiki` reports degradation.
|
|
479
|
+
- **Three durable verbs** — `kb ingest`, `kb query`, `kb lint` per Karpathy's prescription. `compile` and `embed` are internal operations auto-run by `ingest`.
|
|
480
|
+
- **Schema is enforced** — `schema/concept-schema.md` and `schema/source-schema.md` define required frontmatter fields, valid enums, and required section headers. `wiki_lint` checks these on every run; `kb lint --fix` runs an LLM auto-repair pass.
|
|
481
|
+
- **Inspiration over execution** — The knowledge base serves idea combination, not backtested trading signals.
|
|
482
|
+
- **Hybrid memory: Markdown + structured state** — Markdown is the inspectable interface; `wiki/state.json` and ChromaDB metadata are the operational substrate (scoring, freshness decay, conflict tracking).
|
|
483
|
+
- **Per-claim provenance** — Every bullet in a concept article ends with `[<source_basename>]`; un-anchored bullets fail lint and lower confidence.
|
|
484
|
+
- **Content-hash idempotency** — `kb compile` reruns produce zero LLM calls when source hashes are unchanged (no `mtime`, no date guessing).
|
|
485
|
+
- **Queries compound** — Every `kb query` files into `wiki/queries/` and bumps state.json scoring for cited concepts. `kb lint --maintain` distills the query log into proposed concept-page improvements.
|
|
486
|
+
- **Complementary retrieval** — Wiki concepts surface first, then complementary article chunks fill remaining slots (excluding sources already cited by concepts).
|
|
487
|
+
- **Graceful degradation** — Every component handles missing dependencies without crashing; `audit_wiki` errors push the wiki-first path to article-only fallback.
|
|
488
|
+
- **Self-healing vector store** — Automatic SQLite integrity check before each ChromaDB operation; corrupted stores are cleaned up and rebuilt transparently.
|
|
489
|
+
|
|
490
|
+
## Releasing (maintainers)
|
|
491
|
+
|
|
492
|
+
This repo publishes to PyPI automatically when a `v*.*.*` tag is pushed. The workflow is defined in [`.github/workflows/publish.yml`](.github/workflows/publish.yml) and uses [PyPI Trusted Publishing](https://docs.pypi.org/trusted-publishers/) (OIDC) — no API token is stored in GitHub secrets.
|
|
493
|
+
|
|
494
|
+
### One-time PyPI setup
|
|
495
|
+
|
|
496
|
+
Before the first release, configure a "pending publisher" on PyPI:
|
|
497
|
+
|
|
498
|
+
1. Log in to https://pypi.org/manage/account/publishing/
|
|
499
|
+
2. Add a pending publisher with:
|
|
500
|
+
- **PyPI Project Name:** `quant-llm-wiki`
|
|
501
|
+
- **Owner:** `jackwu321`
|
|
502
|
+
- **Repository name:** `Quant_LLM_Wiki`
|
|
503
|
+
- **Workflow filename:** `publish.yml`
|
|
504
|
+
- **Environment name:** `pypi`
|
|
505
|
+
3. In GitHub repo settings → Environments, create an environment named `pypi` (no secrets needed; OIDC handles auth).
|
|
506
|
+
|
|
507
|
+
### Cutting a release
|
|
508
|
+
|
|
509
|
+
```bash
|
|
510
|
+
# 1. Bump version in pyproject.toml (e.g. 0.2.0 -> 0.2.1)
|
|
511
|
+
# 2. Commit
|
|
512
|
+
git commit -am "release: v0.2.1"
|
|
513
|
+
# 3. Tag and push
|
|
514
|
+
git tag v0.2.1
|
|
515
|
+
git push origin main --tags
|
|
516
|
+
```
|
|
517
|
+
|
|
518
|
+
The workflow will:
|
|
519
|
+
1. Verify the tag matches `project.version` in `pyproject.toml`
|
|
520
|
+
2. Build sdist + wheel
|
|
521
|
+
3. Upload to PyPI via Trusted Publishing
|
|
522
|
+
|
|
523
|
+
Users then upgrade with `pipx upgrade quant-llm-wiki`.
|
|
524
|
+
|
|
525
|
+
> **Versioning.** Follow [SemVer](https://semver.org/): bump patch for fixes, minor for new features, major for breaking changes. The tag `v0.2.1` must match `version = "0.2.1"` in `pyproject.toml` exactly, or the workflow aborts before publishing.
|
|
526
|
+
|
|
527
|
+
## Contributing
|
|
528
|
+
|
|
529
|
+
Contributions are welcome! Please:
|
|
530
|
+
|
|
531
|
+
1. Fork the repository
|
|
532
|
+
2. Create a feature branch (`git checkout -b feature/amazing-feature`)
|
|
533
|
+
3. Write tests for new functionality
|
|
534
|
+
4. Ensure all tests pass (`python3 -m unittest discover -s tests -p 'test_*.py'`)
|
|
535
|
+
5. Commit your changes
|
|
536
|
+
6. Open a Pull Request
|
|
537
|
+
|
|
538
|
+
## License
|
|
539
|
+
|
|
540
|
+
This project is licensed under the MIT License — see the [LICENSE](LICENSE) file for details.
|
|
541
|
+
|
|
542
|
+
## Disclaimer
|
|
543
|
+
|
|
544
|
+
Quant_LLM_Wiki is a research tool for generating investment strategy ideas. It does **not** produce trade-ready strategies or financial advice. All generated ideas require independent validation, backtesting, and risk assessment before any real-world application. Use at your own risk.
|