deepresearch-flow 0.6.1__py3-none-any.whl → 0.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,259 @@
1
+ """Unpack snapshot to recover original files with readable names.
2
+
3
+ This is the reverse operation of builder.build_snapshot().
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ from dataclasses import dataclass
9
+ import hashlib
10
+ import json
11
+ from pathlib import Path
12
+ import re
13
+ import sqlite3
14
+ from typing import Any, Iterable
15
+
16
+ from rich.console import Console
17
+ from rich.table import Table
18
+
19
+
20
+ @dataclass(frozen=True)
21
+ class SnapshotUnpackBaseOptions:
22
+ snapshot_db: Path
23
+ static_export_dir: Path
24
+ pdf_roots: list[Path]
25
+
26
+
27
+ @dataclass(frozen=True)
28
+ class SnapshotUnpackMdOptions(SnapshotUnpackBaseOptions):
29
+ md_output_dir: Path
30
+ md_translated_output_dir: Path
31
+
32
+
33
+ @dataclass(frozen=True)
34
+ class SnapshotUnpackInfoOptions(SnapshotUnpackBaseOptions):
35
+ template: str
36
+ output_json: Path
37
+
38
+
39
+ @dataclass
40
+ class UnpackCounts:
41
+ total: int = 0
42
+ succeeded: int = 0
43
+ failed: int = 0
44
+ missing_pdf: int = 0
45
+ translated_succeeded: int = 0
46
+ translated_failed: int = 0
47
+
48
+
49
+ def _sanitize_filename(title: str) -> str:
50
+ """Convert title to safe filename."""
51
+ sanitized = re.sub(r'[<>:"/\\|?*]', "_", title)
52
+ if len(sanitized) > 200:
53
+ sanitized = sanitized[:200]
54
+ sanitized = sanitized.strip()
55
+ if not sanitized:
56
+ sanitized = "untitled"
57
+ return sanitized
58
+
59
+
60
+ def _hash_file(path: Path) -> str:
61
+ digest = hashlib.sha256()
62
+ with path.open("rb") as handle:
63
+ for chunk in iter(lambda: handle.read(1024 * 1024), b""):
64
+ digest.update(chunk)
65
+ return digest.hexdigest()
66
+
67
+
68
+ def _build_pdf_hash_index(pdf_roots: Iterable[Path]) -> dict[str, Path]:
69
+ index: dict[str, Path] = {}
70
+ for root in pdf_roots:
71
+ if root.is_file() and root.suffix.lower() == ".pdf":
72
+ pdf_hash = _hash_file(root)
73
+ index.setdefault(pdf_hash, root)
74
+ continue
75
+ if not root.is_dir():
76
+ continue
77
+ for path in root.rglob("*.pdf"):
78
+ if not path.is_file():
79
+ continue
80
+ pdf_hash = _hash_file(path)
81
+ index.setdefault(pdf_hash, path)
82
+ return index
83
+
84
+
85
+ def _unique_base_name(base: str, paper_id: str, used: set[str]) -> str:
86
+ candidate = base
87
+ if candidate in used:
88
+ candidate = f"{base}_{paper_id}"
89
+ counter = 1
90
+ while candidate in used:
91
+ candidate = f"{base}_{paper_id}_{counter}"
92
+ counter += 1
93
+ used.add(candidate)
94
+ return candidate
95
+
96
+
97
+ def _open_snapshot_db(path: Path) -> sqlite3.Connection:
98
+ conn = sqlite3.connect(path)
99
+ conn.row_factory = sqlite3.Row
100
+ return conn
101
+
102
+
103
+ def _print_summary(title: str, counts: UnpackCounts) -> None:
104
+ table = Table(title=title, header_style="bold cyan", title_style="bold magenta")
105
+ table.add_column("Metric", style="cyan", no_wrap=True)
106
+ table.add_column("Value", style="white", overflow="fold")
107
+ table.add_row("Total", str(counts.total))
108
+ table.add_row("Succeeded", str(counts.succeeded))
109
+ table.add_row("Failed", str(counts.failed))
110
+ table.add_row("Missing PDF", str(counts.missing_pdf))
111
+ if counts.translated_succeeded or counts.translated_failed:
112
+ table.add_row("Translated succeeded", str(counts.translated_succeeded))
113
+ table.add_row("Translated failed", str(counts.translated_failed))
114
+ Console().print(table)
115
+
116
+
117
+ def unpack_md(opts: SnapshotUnpackMdOptions) -> None:
118
+ """Unpack source/translated markdown and align filenames to PDFs."""
119
+ opts.md_output_dir.mkdir(parents=True, exist_ok=True)
120
+ opts.md_translated_output_dir.mkdir(parents=True, exist_ok=True)
121
+
122
+ pdf_index = _build_pdf_hash_index(opts.pdf_roots)
123
+ used_names: set[str] = set()
124
+ counts = UnpackCounts()
125
+
126
+ conn = _open_snapshot_db(opts.snapshot_db)
127
+ try:
128
+ cursor = conn.execute(
129
+ """
130
+ SELECT
131
+ paper_id,
132
+ title,
133
+ source_hash,
134
+ pdf_content_hash,
135
+ source_md_content_hash
136
+ FROM paper
137
+ ORDER BY paper_index, title
138
+ """
139
+ )
140
+ for row in cursor.fetchall():
141
+ counts.total += 1
142
+ paper_id = str(row["paper_id"])
143
+ title = str(row["title"] or "")
144
+ pdf_hash = row["pdf_content_hash"]
145
+ md_hash = row["source_md_content_hash"]
146
+
147
+ base = ""
148
+ if pdf_hash and pdf_hash in pdf_index:
149
+ base = pdf_index[pdf_hash].stem
150
+ else:
151
+ counts.missing_pdf += 1
152
+ base = _sanitize_filename(title)
153
+ base = _unique_base_name(base, paper_id, used_names)
154
+
155
+ if md_hash:
156
+ src_md = opts.static_export_dir / "md" / f"{md_hash}.md"
157
+ if src_md.exists():
158
+ dst_md = opts.md_output_dir / f"{base}.md"
159
+ try:
160
+ dst_md.write_text(src_md.read_text(encoding="utf-8"), encoding="utf-8")
161
+ counts.succeeded += 1
162
+ except OSError:
163
+ counts.failed += 1
164
+ else:
165
+ counts.failed += 1
166
+ else:
167
+ counts.failed += 1
168
+
169
+
170
+ for tr_row in conn.execute(
171
+ "SELECT lang, md_content_hash FROM paper_translation WHERE paper_id = ?",
172
+ (paper_id,),
173
+ ):
174
+ lang = str(tr_row["lang"] or "").lower()
175
+ tr_hash = tr_row["md_content_hash"]
176
+ if not lang or not tr_hash:
177
+ counts.translated_failed += 1
178
+ continue
179
+ src_tr = opts.static_export_dir / "md_translate" / lang / f"{tr_hash}.md"
180
+ if not src_tr.exists():
181
+ counts.translated_failed += 1
182
+ continue
183
+ dst_tr = opts.md_translated_output_dir / f"{base}.{lang}.md"
184
+ try:
185
+ dst_tr.write_text(src_tr.read_text(encoding="utf-8"), encoding="utf-8")
186
+ counts.translated_succeeded += 1
187
+ except OSError:
188
+ counts.translated_failed += 1
189
+ finally:
190
+ conn.close()
191
+
192
+ _print_summary("snapshot unpack md summary", counts)
193
+
194
+
195
+ def unpack_info(opts: SnapshotUnpackInfoOptions) -> None:
196
+ """Unpack aggregated paper_infos.json from snapshot summaries."""
197
+ pdf_index = _build_pdf_hash_index(opts.pdf_roots)
198
+ counts = UnpackCounts()
199
+ items: list[dict[str, Any]] = []
200
+
201
+ conn = _open_snapshot_db(opts.snapshot_db)
202
+ try:
203
+ cursor = conn.execute(
204
+ """
205
+ SELECT
206
+ paper_id,
207
+ title,
208
+ source_hash,
209
+ pdf_content_hash
210
+ FROM paper
211
+ ORDER BY paper_index, title
212
+ """
213
+ )
214
+ for row in cursor.fetchall():
215
+ counts.total += 1
216
+ paper_id = str(row["paper_id"])
217
+ pdf_hash = row["pdf_content_hash"]
218
+ if not (pdf_hash and pdf_hash in pdf_index):
219
+ counts.missing_pdf += 1
220
+
221
+ summary_path = opts.static_export_dir / "summary" / paper_id / f"{opts.template}.json"
222
+ fallback_path = opts.static_export_dir / "summary" / f"{paper_id}.json"
223
+ target_path = summary_path if summary_path.exists() else fallback_path
224
+ used_fallback = target_path == fallback_path
225
+ if not target_path.exists():
226
+ counts.failed += 1
227
+ continue
228
+ try:
229
+ payload = json.loads(target_path.read_text(encoding="utf-8"))
230
+ except json.JSONDecodeError:
231
+ counts.failed += 1
232
+ continue
233
+ if not isinstance(payload, dict):
234
+ counts.failed += 1
235
+ continue
236
+
237
+ base = ""
238
+ if pdf_hash and pdf_hash in pdf_index:
239
+ base = pdf_index[pdf_hash].stem
240
+ else:
241
+ base = _sanitize_filename(str(row["title"] or ""))
242
+ source_path = f"{base}.md" if base else ""
243
+
244
+ payload["paper_id"] = paper_id
245
+ payload["paper_title"] = str(row["title"] or "")
246
+ payload["source_path"] = source_path
247
+ payload["source_hash"] = str(row["source_hash"] or "")
248
+
249
+ if used_fallback:
250
+ counts.failed += 1
251
+ else:
252
+ counts.succeeded += 1
253
+ items.append(payload)
254
+ finally:
255
+ conn.close()
256
+
257
+ opts.output_json.parent.mkdir(parents=True, exist_ok=True)
258
+ opts.output_json.write_text(json.dumps(items, ensure_ascii=False, indent=2), encoding="utf-8")
259
+ _print_summary("snapshot unpack info summary", counts)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: deepresearch-flow
3
- Version: 0.6.1
3
+ Version: 0.7.1
4
4
  Summary: Workflow tools for paper extraction, review, and research automation.
5
5
  Author-email: DengQi <dengqi935@gmail.com>
6
6
  License: MIT License
@@ -49,6 +49,7 @@ Requires-Dist: jinja2>=3.1.3
49
49
  Requires-Dist: json-repair>=0.55.1
50
50
  Requires-Dist: jsonschema>=4.26.0
51
51
  Requires-Dist: markdown-it-py>=3.0.0
52
+ Requires-Dist: fastmcp>=3.0.0b1
52
53
  Requires-Dist: mdit-py-plugins>=0.4.0
53
54
  Requires-Dist: pypdf>=6.6.2
54
55
  Requires-Dist: pylatexenc>=2.10
@@ -56,7 +57,7 @@ Requires-Dist: pybtex>=0.24.0
56
57
  Requires-Dist: rich>=14.3.1
57
58
  Requires-Dist: rumdl>=0.1.6
58
59
  Requires-Dist: starlette>=0.52.1
59
- Requires-Dist: tqdm>=4.66.4
60
+ Requires-Dist: tqdm>=4.67.2
60
61
  Requires-Dist: uvicorn>=0.27.1
61
62
  Dynamic: license-file
62
63
 
@@ -519,7 +520,14 @@ server {
519
520
  }
520
521
 
521
522
  location /api/ {
522
- proxy_pass http://127.0.0.1:8001/;
523
+ proxy_pass http://127.0.0.1:8001;
524
+ proxy_set_header Host $host;
525
+ proxy_set_header X-Real-IP $remote_addr;
526
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
527
+ }
528
+
529
+ location ^~ /mcp {
530
+ proxy_pass http://127.0.0.1:8001;
523
531
  proxy_set_header Host $host;
524
532
  proxy_set_header X-Real-IP $remote_addr;
525
533
  proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
@@ -554,6 +562,147 @@ uv run deepresearch-flow paper db api serve \
554
562
  --host 0.0.0.0 --port 8001
555
563
  ```
556
564
 
565
+ ### 3.1) MCP (FastMCP Streamable HTTP)
566
+
567
+ This project exposes an MCP server mounted at `/mcp` on the snapshot API:
568
+
569
+ - Endpoint: `http://<host>:8001/mcp` (same host/port as `paper db api serve`)
570
+ - Transport: Streamable HTTP via `POST` only (no SSE; `GET` returns 405)
571
+ - Protocol header: optional `mcp-protocol-version` (`2025-03-26` or `2025-06-18`)
572
+ - Static reads: summary/source/translation are served as **text content** by reading snapshot static assets (local-first via `PAPER_DB_STATIC_EXPORT_DIR`, HTTP fallback via `PAPER_DB_STATIC_BASE` / `PAPER_DB_STATIC_BASE_URL`)
573
+
574
+ Optional (avoid HTTP fetch by reading exported assets directly on the API host):
575
+
576
+ ```bash
577
+ export PAPER_DB_STATIC_EXPORT_DIR=/data/paper-static
578
+ ```
579
+
580
+ #### MCP Tools (API functions)
581
+
582
+ <details>
583
+ <summary><strong>search_papers(query, limit=10)</strong> — full-text search (relevance-ranked)</summary>
584
+
585
+ - Args:
586
+ - `query` (str): keywords / topic query
587
+ - `limit` (int): number of results (clamped to API max page size)
588
+ - Returns: list of `{ paper_id, title, year, venue, snippet_markdown }`
589
+
590
+ </details>
591
+
592
+ <details>
593
+ <summary><strong>search_papers_by_keyword(keyword, limit=10)</strong> — facet keyword search</summary>
594
+
595
+ - Args:
596
+ - `keyword` (str): keyword substring
597
+ - `limit` (int): number of results (clamped)
598
+ - Returns: list of `{ paper_id, title, year, venue, snippet_markdown }`
599
+
600
+ </details>
601
+
602
+ <details>
603
+ <summary><strong>get_paper_metadata(paper_id)</strong> — metadata + available summary templates</summary>
604
+
605
+ - Args:
606
+ - `paper_id` (str)
607
+ - Returns: dict with:
608
+ - `paper_id`, `title`, `year`, `venue`
609
+ - `doi`, `arxiv_id`, `openreview_id`, `paper_pw_url`
610
+ - `preferred_summary_template`, `available_summary_templates`
611
+
612
+ </details>
613
+
614
+ <details>
615
+ <summary><strong>get_paper_summary(paper_id, template=None, max_chars=None)</strong> — summary JSON as raw text</summary>
616
+
617
+ - Notes:
618
+ - Uses `preferred_summary_template` if `template` is omitted
619
+ - Returns the **full JSON content** (not a URL)
620
+ - Args:
621
+ - `paper_id` (str)
622
+ - `template` (str | null)
623
+ - `max_chars` (int | null): truncation limit
624
+ - Returns: JSON string (may include a `[truncated: ...]` marker)
625
+
626
+ </details>
627
+
628
+ <details>
629
+ <summary><strong>get_paper_source(paper_id, max_chars=None)</strong> — source markdown as raw text</summary>
630
+
631
+ - Args:
632
+ - `paper_id` (str)
633
+ - `max_chars` (int | null): truncation limit
634
+ - Returns: markdown string (may include a `[truncated: ...]` marker)
635
+
636
+ </details>
637
+
638
+ <details>
639
+ <summary><strong>get_database_stats()</strong> — snapshot-level stats</summary>
640
+
641
+ - Returns:
642
+ - `total`
643
+ - `years`, `months`: list of `{ value, paper_count }`
644
+ - `authors`, `venues`, `institutions`, `keywords`, `tags`: top lists of `{ value, paper_count }`
645
+
646
+ </details>
647
+
648
+ <details>
649
+ <summary><strong>list_top_facets(category, limit=20)</strong> — top values for one facet</summary>
650
+
651
+ - Args:
652
+ - `category`: `author | venue | keyword | institution | tag`
653
+ - `limit` (int)
654
+ - Returns: list of `{ value, paper_count }`
655
+
656
+ </details>
657
+
658
+ <details>
659
+ <summary><strong>filter_papers(author=None, venue=None, year=None, keyword=None, tag=None, limit=10)</strong> — structured filtering</summary>
660
+
661
+ - Args (all optional except `limit`):
662
+ - `author`, `venue`, `keyword`, `tag`: substring match
663
+ - `year`: exact match
664
+ - `limit` (int): number of results (clamped)
665
+ - Returns: list of `{ paper_id, title, year, venue }`
666
+
667
+ </details>
668
+
669
+ #### MCP Resources (URI access)
670
+
671
+ <details>
672
+ <summary><strong>paper://{paper_id}/metadata</strong> — metadata JSON</summary>
673
+
674
+ Returns the same content as `get_paper_metadata(paper_id)` (as a JSON string).
675
+
676
+ </details>
677
+
678
+ <details>
679
+ <summary><strong>paper://{paper_id}/summary</strong> — preferred summary JSON</summary>
680
+
681
+ Returns the same content as `get_paper_summary(paper_id)` (preferred template; JSON string).
682
+
683
+ </details>
684
+
685
+ <details>
686
+ <summary><strong>paper://{paper_id}/summary/{template}</strong> — summary JSON for template</summary>
687
+
688
+ Returns the same content as `get_paper_summary(paper_id, template=template)` (JSON string).
689
+
690
+ </details>
691
+
692
+ <details>
693
+ <summary><strong>paper://{paper_id}/source</strong> — source markdown</summary>
694
+
695
+ Returns the same content as `get_paper_source(paper_id)` (markdown string).
696
+
697
+ </details>
698
+
699
+ <details>
700
+ <summary><strong>paper://{paper_id}/translation/{lang}</strong> — translated markdown</summary>
701
+
702
+ Returns translated markdown for `lang` (e.g. `zh`, `ja`) when available.
703
+
704
+ </details>
705
+
557
706
  ### 4) Frontend (static build or dev)
558
707
 
559
708
  ```bash
@@ -810,7 +959,7 @@ docker run --rm -p 8899:8899 \
810
959
  ```
811
960
 
812
961
  Notes:
813
- - nginx listens on 8899 and proxies `/api` to the internal API at `127.0.0.1:8000`.
962
+ - nginx listens on 8899 and proxies `/api` and `/mcp` to the internal API at `127.0.0.1:8000`.
814
963
  - Mount your snapshot DB to `/db/papers.db` inside the container.
815
964
  - Mount snapshot static assets to `/static` when serving assets from this container (default `PAPER_DB_STATIC_BASE` is `/static`).
816
965
  - If `PAPER_DB_STATIC_BASE` is a full URL (e.g. `https://static.example.com`), nginx still serves the frontend locally, while API responses use that external static base for asset links.
@@ -1,10 +1,10 @@
1
- deepresearch_flow/__init__.py,sha256=rjP9ES4zJCfEN_MCDYAYPL1mNJZGjojdmbRwnZ9FlEk,83
1
+ deepresearch_flow/__init__.py,sha256=kes-OKst6kwiFPY1FmyOV1E5dHTyyHBCM3Iy0EdDp8g,83
2
2
  deepresearch_flow/__main__.py,sha256=Ceo0rMTOhHhwFPD-HyDDagenNsmWEzPmsdYLI7kwKVA,115
3
3
  deepresearch_flow/cli.py,sha256=t4oowCNWldL0DrVJ4d0UlRkuGU2qHej_G0mAc_quteQ,455
4
4
  deepresearch_flow/paper/__init__.py,sha256=sunaOkcgAJBrfmcaJTumcWbPGVUSGWvOv2a2Yidzy0A,43
5
5
  deepresearch_flow/paper/cli.py,sha256=68d-yccScU0yL6d7eqZVdudPO6i_in8F4v-hKDWILMo,13647
6
6
  deepresearch_flow/paper/config.py,sha256=V7z4ApPXCV1acSl2FU3nZGq6nt8uisMhm0GtOq5zzmg,12021
7
- deepresearch_flow/paper/db.py,sha256=Bxhrd8NCaPZ9Ijtp1uiOplwh2Uy0n2Dyn1bO0d4A9bE,91780
7
+ deepresearch_flow/paper/db.py,sha256=RvUN9jeoaEgLNvf8NhWYD-cgIIMZwdZRK3cq17pNWZI,94727
8
8
  deepresearch_flow/paper/db_ops.py,sha256=cb64jn2ax39i3tCS-0DYmlsJdGX3uBS2u5ncUIbUBic,73980
9
9
  deepresearch_flow/paper/extract.py,sha256=78ASAyNLfCl1AsAk2o_v1vskZCNZuayaCHgr0S4V2Vs,87632
10
10
  deepresearch_flow/paper/llm.py,sha256=mHfs5IkT3Q6BOh46MDlfUmgVTX24WRf0IKKoOnN8nV8,4007
@@ -43,11 +43,14 @@ deepresearch_flow/paper/schemas/default_paper_schema.json,sha256=6h_2ayHolJj8JMn
43
43
  deepresearch_flow/paper/schemas/eight_questions_schema.json,sha256=VFKKpdZkgPdQkYIW5jyrZQ7c2TlQZwB4svVWfoiwxdg,1005
44
44
  deepresearch_flow/paper/schemas/three_pass_schema.json,sha256=8aNr4EdRiilxszIRBCC4hRNXrfIOcdnVW4Qhe6Fnh0o,689
45
45
  deepresearch_flow/paper/snapshot/__init__.py,sha256=1VLO36xxDB3J5Yoo-HH9vyI-4ev2HcivXN0sNLg8O5k,102
46
- deepresearch_flow/paper/snapshot/api.py,sha256=WgkOgS7n_2Fx-Bl4KnLrh5nhRJAsWJaPjXu7vX5ubxY,36960
46
+ deepresearch_flow/paper/snapshot/api.py,sha256=F_qehvCjxTBTGj9FmqP4NnJQayUPJm0N5e_8mm5JlDQ,37405
47
47
  deepresearch_flow/paper/snapshot/builder.py,sha256=HbRcfNteMoP4RnQ4y2onZCm9XfnIvzXLn_EwsLZsDzY,38692
48
+ deepresearch_flow/paper/snapshot/common.py,sha256=KAhlGlPgabOCe9Faps8BoDqin71qpkCfaL_ADCr_9vg,917
48
49
  deepresearch_flow/paper/snapshot/identity.py,sha256=k9x1EZPFBU1qgxzkTGvwVtDjLgcosmM_udPuvRLl0uI,7748
50
+ deepresearch_flow/paper/snapshot/mcp_server.py,sha256=lvgbXmuZCZ_zaQMdZEMjN-OChHPdoZ9MmuuQ-7ORias,22901
49
51
  deepresearch_flow/paper/snapshot/schema.py,sha256=DcVmAklLYyEeDoVV9jYw7hoMHnHd9Eziivl-LP2busY,8991
50
52
  deepresearch_flow/paper/snapshot/text.py,sha256=0RnxLowa6AdirdLsUYym6BhWbjwiP2Qj2oZeA-pjmdE,4368
53
+ deepresearch_flow/paper/snapshot/unpacker.py,sha256=ScKSFdrQLJHrITHe9KAxgAEH-vAAnXLolvW9zeJ3wsc,8575
51
54
  deepresearch_flow/paper/snapshot/tests/__init__.py,sha256=G0IowrxHjGUIaqxcw6SvlcLFAtE5ZsleG6ECgd-sIdk,52
52
55
  deepresearch_flow/paper/snapshot/tests/test_identity.py,sha256=KDFixAUU9l68KOum7gf1IrD0Oy18dBCSXG7RbJTqflA,4520
53
56
  deepresearch_flow/paper/templates/__init__.py,sha256=p8W6kINvrf-T2X6Ow4GMr28syVOorFuMn0pbmieVzAw,35
@@ -463,9 +466,9 @@ deepresearch_flow/translator/placeholder.py,sha256=mEgqA-dPdOsIhno0h_hzfpXpY2asb
463
466
  deepresearch_flow/translator/prompts.py,sha256=EvfBvBIpQXARDj4m87GAyFXJGL8EJeahj_rOmp9mv68,5556
464
467
  deepresearch_flow/translator/protector.py,sha256=yUMuS2FgVofK_MRXrcauLRiwNvdCCjNAnh6CcNd686o,11777
465
468
  deepresearch_flow/translator/segment.py,sha256=rBFMCLTrvm2GrPc_hNFymi-8Ih2DAtUQlZHCRE9nLaM,5146
466
- deepresearch_flow-0.6.1.dist-info/licenses/LICENSE,sha256=hT8F2Py1pe6flxq3Ufdm2UKFk0B8CBm0aAQfsLXfvjw,1063
467
- deepresearch_flow-0.6.1.dist-info/METADATA,sha256=y_CHy1YJ-3P31W43Q_fd8dEkznj7LKLRrCF6F-sGHaQ,26696
468
- deepresearch_flow-0.6.1.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
469
- deepresearch_flow-0.6.1.dist-info/entry_points.txt,sha256=1uIKscs0YRMg_mFsg9NjsaTt4CvQqQ_-zGERUKhhL_Y,65
470
- deepresearch_flow-0.6.1.dist-info/top_level.txt,sha256=qBl4RvPJNJUbL8CFfMNWxY0HpQLx5RlF_ko-z_aKpm0,18
471
- deepresearch_flow-0.6.1.dist-info/RECORD,,
469
+ deepresearch_flow-0.7.1.dist-info/licenses/LICENSE,sha256=hT8F2Py1pe6flxq3Ufdm2UKFk0B8CBm0aAQfsLXfvjw,1063
470
+ deepresearch_flow-0.7.1.dist-info/METADATA,sha256=rSmAZMSVrjhXLo6Dte3Gaf9AvVyznUaHd-Ahwn47Ne4,31331
471
+ deepresearch_flow-0.7.1.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
472
+ deepresearch_flow-0.7.1.dist-info/entry_points.txt,sha256=1uIKscs0YRMg_mFsg9NjsaTt4CvQqQ_-zGERUKhhL_Y,65
473
+ deepresearch_flow-0.7.1.dist-info/top_level.txt,sha256=qBl4RvPJNJUbL8CFfMNWxY0HpQLx5RlF_ko-z_aKpm0,18
474
+ deepresearch_flow-0.7.1.dist-info/RECORD,,