cnhkmcp 1.3.1__tar.gz → 1.3.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {cnhkmcp-1.3.1/cnhkmcp.egg-info → cnhkmcp-1.3.3}/PKG-INFO +1 -1
- {cnhkmcp-1.3.1 → cnhkmcp-1.3.3}/cnhkmcp/__init__.py +1 -1
- {cnhkmcp-1.3.1 → cnhkmcp-1.3.3}/cnhkmcp/untracked/platform_functions.py +72 -2
- cnhkmcp-1.3.3/cnhkmcp/untracked//347/244/272/344/276/213/345/267/245/344/275/234/346/265/201_BRAIN_Alpha_Improvement_Workflow.md +101 -0
- {cnhkmcp-1.3.1 → cnhkmcp-1.3.3/cnhkmcp.egg-info}/PKG-INFO +1 -1
- {cnhkmcp-1.3.1 → cnhkmcp-1.3.3}/cnhkmcp.egg-info/SOURCES.txt +6 -5
- {cnhkmcp-1.3.1 → cnhkmcp-1.3.3}/setup.py +1 -1
- {cnhkmcp-1.3.1 → cnhkmcp-1.3.3}/LICENSE +0 -0
- {cnhkmcp-1.3.1 → cnhkmcp-1.3.3}/MANIFEST.in +0 -0
- {cnhkmcp-1.3.1 → cnhkmcp-1.3.3}/README.md +0 -0
- {cnhkmcp-1.3.1 → cnhkmcp-1.3.3}/cnhkmcp/untracked/arXiv_API_Tool_Manual.md +0 -0
- {cnhkmcp-1.3.1 → cnhkmcp-1.3.3}/cnhkmcp/untracked/arxiv_api.py +0 -0
- {cnhkmcp-1.3.1 → cnhkmcp-1.3.3}/cnhkmcp/untracked/forum_functions.py +0 -0
- {cnhkmcp-1.3.1 → cnhkmcp-1.3.3}/cnhkmcp/untracked/sample_mcp_config.json +0 -0
- {cnhkmcp-1.3.1 → cnhkmcp-1.3.3}/cnhkmcp/untracked/user_config.json +0 -0
- /cnhkmcp-1.3.1/cnhkmcp/untracked/BRAIN_Alpha_Test_Requirements_and_Tips.md → /cnhkmcp-1.3.3/cnhkmcp/untracked//347/244/272/344/276/213/345/217/202/350/200/203/346/226/207/346/241/243_BRAIN_Alpha_Test_Requirements_and_Tips.md" +0 -0
- /cnhkmcp-1.3.1/cnhkmcp/untracked/BRAIN_6_Tips_Datafield_Exploration_Guide.md → /cnhkmcp-1.3.3/cnhkmcp/untracked//347/244/272/344/276/213/345/267/245/344/275/234/346/265/201_BRAIN_6_Tips_Datafield_Exploration_Guide.md" +0 -0
- /cnhkmcp-1.3.1/cnhkmcp/untracked/Dataset_Exploration_Expert_Manual.md → /cnhkmcp-1.3.3/cnhkmcp/untracked//347/244/272/344/276/213/345/267/245/344/275/234/346/265/201_Dataset_Exploration_Expert_Manual.md" +0 -0
- /cnhkmcp-1.3.1/cnhkmcp/untracked/daily_report_workflow.md → /cnhkmcp-1.3.3/cnhkmcp/untracked//347/244/272/344/276/213/345/267/245/344/275/234/346/265/201_daily_report_workflow.md" +0 -0
- {cnhkmcp-1.3.1 → cnhkmcp-1.3.3}/cnhkmcp.egg-info/dependency_links.txt +0 -0
- {cnhkmcp-1.3.1 → cnhkmcp-1.3.3}/cnhkmcp.egg-info/entry_points.txt +0 -0
- {cnhkmcp-1.3.1 → cnhkmcp-1.3.3}/cnhkmcp.egg-info/not-zip-safe +0 -0
- {cnhkmcp-1.3.1 → cnhkmcp-1.3.3}/cnhkmcp.egg-info/requires.txt +0 -0
- {cnhkmcp-1.3.1 → cnhkmcp-1.3.3}/cnhkmcp.egg-info/top_level.txt +0 -0
- {cnhkmcp-1.3.1 → cnhkmcp-1.3.3}/requirements.txt +0 -0
- {cnhkmcp-1.3.1 → cnhkmcp-1.3.3}/setup.cfg +0 -0
|
@@ -1952,15 +1952,85 @@ async def get_messages(limit: Optional[int] = None, offset: int = 0) -> Dict[str
|
|
|
1952
1952
|
|
|
1953
1953
|
Args:
|
|
1954
1954
|
limit: Maximum number of messages to return (e.g., 10 for top 10 messages)
|
|
1955
|
+
Can be None (no limit), an integer, or a string that can be converted to int
|
|
1955
1956
|
offset: Number of messages to skip (for pagination)
|
|
1957
|
+
Can be an integer or a string that can be converted to int
|
|
1956
1958
|
|
|
1957
1959
|
Returns:
|
|
1958
1960
|
Messages for the current user, optionally limited by count
|
|
1959
1961
|
"""
|
|
1960
1962
|
try:
|
|
1961
|
-
|
|
1963
|
+
# Enhanced parameter validation and conversion
|
|
1964
|
+
validated_limit = None
|
|
1965
|
+
validated_offset = 0
|
|
1966
|
+
|
|
1967
|
+
# Validate and convert limit parameter
|
|
1968
|
+
if limit is not None:
|
|
1969
|
+
if isinstance(limit, str):
|
|
1970
|
+
if limit.strip() == "":
|
|
1971
|
+
# Empty string means no limit
|
|
1972
|
+
validated_limit = 0
|
|
1973
|
+
else:
|
|
1974
|
+
try:
|
|
1975
|
+
validated_limit = int(limit)
|
|
1976
|
+
if validated_limit < 0:
|
|
1977
|
+
return {"error": f"Limit must be non-negative, got: {limit}"}
|
|
1978
|
+
except ValueError:
|
|
1979
|
+
return {"error": f"Invalid limit value '{limit}'. Must be a number or empty string."}
|
|
1980
|
+
elif isinstance(limit, (int, float)):
|
|
1981
|
+
validated_limit = int(limit)
|
|
1982
|
+
if validated_limit < 0:
|
|
1983
|
+
return {"error": f"Limit must be non-negative, got: {limit}"}
|
|
1984
|
+
else:
|
|
1985
|
+
return {"error": f"Invalid limit type {type(limit).__name__}. Expected int, float, str, or None."}
|
|
1986
|
+
|
|
1987
|
+
# Validate and convert offset parameter
|
|
1988
|
+
if isinstance(offset, str):
|
|
1989
|
+
try:
|
|
1990
|
+
validated_offset = int(offset)
|
|
1991
|
+
except ValueError:
|
|
1992
|
+
return {"error": f"Invalid offset value '{offset}'. Must be a number."}
|
|
1993
|
+
elif isinstance(offset, (int, float)):
|
|
1994
|
+
validated_offset = int(offset)
|
|
1995
|
+
else:
|
|
1996
|
+
return {"error": f"Invalid offset type {type(offset).__name__}. Expected int, float, or str."}
|
|
1997
|
+
|
|
1998
|
+
if validated_offset < 0:
|
|
1999
|
+
return {"error": f"Offset must be non-negative, got: {offset}"}
|
|
2000
|
+
|
|
2001
|
+
# Log the validated parameters for debugging
|
|
2002
|
+
print(f"🔍 get_messages called with validated parameters: limit={validated_limit}, offset={validated_offset}")
|
|
2003
|
+
|
|
2004
|
+
# Call the brain client with validated parameters
|
|
2005
|
+
result = await brain_client.get_messages(validated_limit, validated_offset)
|
|
2006
|
+
|
|
2007
|
+
# Add validation info to the result
|
|
2008
|
+
if isinstance(result, dict) and "error" not in result:
|
|
2009
|
+
result["_validation"] = {
|
|
2010
|
+
"original_limit": limit,
|
|
2011
|
+
"original_offset": offset,
|
|
2012
|
+
"validated_limit": validated_limit,
|
|
2013
|
+
"validated_offset": validated_offset,
|
|
2014
|
+
"parameter_types": {
|
|
2015
|
+
"limit": str(type(limit)),
|
|
2016
|
+
"offset": str(type(offset))
|
|
2017
|
+
}
|
|
2018
|
+
}
|
|
2019
|
+
|
|
2020
|
+
return result
|
|
2021
|
+
|
|
1962
2022
|
except Exception as e:
|
|
1963
|
-
|
|
2023
|
+
error_msg = f"get_messages failed: {str(e)}"
|
|
2024
|
+
print(f"❌ {error_msg}")
|
|
2025
|
+
return {
|
|
2026
|
+
"error": error_msg,
|
|
2027
|
+
"original_params": {
|
|
2028
|
+
"limit": limit,
|
|
2029
|
+
"offset": offset,
|
|
2030
|
+
"limit_type": str(type(limit)),
|
|
2031
|
+
"offset_type": str(type(offset))
|
|
2032
|
+
}
|
|
2033
|
+
}
|
|
1964
2034
|
|
|
1965
2035
|
@mcp.tool()
|
|
1966
2036
|
async def get_glossary_terms(email: str = "", password: str = "", headless: bool = False) -> Dict[str, Any]:
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
# Repeatable Workflow for Improving BRAIN Alphas: A Step-by-Step Guide
|
|
2
|
+
|
|
3
|
+
This document outlines a systematic, repeatable workflow for enhancing alphas on the WorldQuant BRAIN platform. It emphasizes core idea refinements (e.g., incorporating financial concepts from research) over mechanical tweaks, as per guidelines in `BRAIN_Alpha_Test_Requirements_and_Tips.md`. The process is tool-agnostic but assumes access to BRAIN API (via MCP), arXiv search scripts, and basic analysis tools. Each cycle takes ~30-60 minutes; repeat until submission thresholds are met (e.g., Sharpe >1.25, Fitness >1 for Delay-1 ATOM alphas).
|
|
4
|
+
|
|
5
|
+
## Prerequisites
|
|
6
|
+
- Authenticate with BRAIN (e.g., via API tool).
|
|
7
|
+
- Have the alpha ID and expression ready.
|
|
8
|
+
- Access to arXiv script (e.g., `arxiv_api.py`) for idea sourcing.
|
|
9
|
+
- Track progress in a log (e.g., metrics table per iteration).
|
|
10
|
+
|
|
11
|
+
## Step 1: Gather Alpha Information (5-10 minutes)
|
|
12
|
+
**Goal**: Collect baseline data to identify weaknesses (e.g., low Sharpe, high correlation, inconsistent yearly stats).
|
|
13
|
+
|
|
14
|
+
**Steps**:
|
|
15
|
+
- Authenticate if needed.
|
|
16
|
+
- Fetch alpha details (expression, settings, metrics like PnL, Sharpe, Fitness, Turnover, Drawdown, and checks).
|
|
17
|
+
- Retrieve PnL trends and yearly stats.
|
|
18
|
+
- Run submission and correlation checks (self/production, threshold 0.7).
|
|
19
|
+
|
|
20
|
+
**Analysis**:
|
|
21
|
+
- Note failing tests (e.g., sub-universe low = illiquid reliance).
|
|
22
|
+
- For ATOM alphas (single-dataset), confirm relaxed thresholds.
|
|
23
|
+
|
|
24
|
+
**Output**: Summary of metrics and issues (e.g., "Sharpe 1.11, fails sub-universe").
|
|
25
|
+
|
|
26
|
+
**Tips for Repeatability**: Automate with a script template for batch alphas.
|
|
27
|
+
|
|
28
|
+
## Step 2: Evaluate the Core Datafield(s) (5-10 minutes)
|
|
29
|
+
**Goal**: Understand data properties (sparsity, frequency) to guide refinements.
|
|
30
|
+
|
|
31
|
+
**Steps**:
|
|
32
|
+
- Confirm field details (type, coverage).
|
|
33
|
+
- Simulate 6 evaluation expressions in neutral settings (neutralization="NONE", decay=0, short test period):
|
|
34
|
+
1. Basic Coverage: `datafield`.
|
|
35
|
+
2. Non-Zero Coverage: `datafield != 0 ? 1 : 0`.
|
|
36
|
+
3. Update Frequency: `ts_std_dev(datafield, N) != 0 ? 1 : 0` (N=5,22,66).
|
|
37
|
+
4. Bounds: `abs(datafield) > X` (vary X).
|
|
38
|
+
5. Central Tendency: `ts_median(datafield, 1000) > X` (vary X).
|
|
39
|
+
6. Distribution: `low < scale_down(datafield) < high` (e.g., 0.25-0.75).
|
|
40
|
+
- Use multi-simulation; fallback to singles if issues.
|
|
41
|
+
|
|
42
|
+
**Analysis**:
|
|
43
|
+
- Identify patterns (e.g., quarterly updates → use long windows).
|
|
44
|
+
|
|
45
|
+
**Output**: Insights (e.g., "Sparse quarterly data → prioritize persistence ideas").
|
|
46
|
+
|
|
47
|
+
**Tips for Repeatability**: Template the 6 expressions in a script; run for any field.
|
|
48
|
+
|
|
49
|
+
## Step 3: Propose Idea-Focused Improvements (10-15 minutes)
|
|
50
|
+
**Goal**: Evolve the core signal with theory-backed concepts (e.g., momentum, persistence) for sustainability.
|
|
51
|
+
|
|
52
|
+
**Steps**:
|
|
53
|
+
- Review platform docs/community examples for tips (e.g., ATOM, flipping negatives).
|
|
54
|
+
- Source ideas: Query arXiv with targeted terms (e.g., "return on assets momentum analyst estimates"). Extract 3-5 relevant papers' concepts (e.g., precision weighting = divide by std_dev).
|
|
55
|
+
- Brainstorm 4-6 variants: Modify original with 1-2 concepts (e.g., add revision delta).
|
|
56
|
+
- Validate operators against platform list; replace if needed (e.g., custom momentum formula).
|
|
57
|
+
|
|
58
|
+
**Analysis**:
|
|
59
|
+
- Prioritize fixes for baselines (e.g., negative years → cycle-sensitive grouping).
|
|
60
|
+
|
|
61
|
+
**Output**: List of expressions with rationale (e.g., "Variant 1: Weighted persistence from Paper X").
|
|
62
|
+
|
|
63
|
+
**Tips for Repeatability**: Use a template (e.g., "Search terms: [field] + momentum/revision"; limit to recent finance papers).
|
|
64
|
+
|
|
65
|
+
## Step 4: Simulate and Test Variants (10-20 minutes, including wait)
|
|
66
|
+
**Goal**: Efficiently compare ideas via metrics.
|
|
67
|
+
|
|
68
|
+
**Steps**:
|
|
69
|
+
- Run multi-simulation (2-8 expressions) with original settings + targeted tweaks (e.g., neutralization for grouping).
|
|
70
|
+
- If multi fails, use parallel single simulations.
|
|
71
|
+
- Fetch results (details, PnL, yearly stats).
|
|
72
|
+
|
|
73
|
+
**Analysis**:
|
|
74
|
+
- Rank by Fitness/Sharpe; check sub-universe, consistency.
|
|
75
|
+
- Flip negatives if applicable.
|
|
76
|
+
|
|
77
|
+
**Output**: Ranked results (e.g., "Top ID: XYZ, Fitness improved 13%").
|
|
78
|
+
|
|
79
|
+
**Tips for Repeatability**: Parallelize calls; log in a table (e.g., CSV with metrics).
|
|
80
|
+
|
|
81
|
+
## Step 5: Validate and Iterate or Finalize (5-10 minutes)
|
|
82
|
+
**Goal**: Confirm submittability; loop if needed.
|
|
83
|
+
|
|
84
|
+
**Steps**:
|
|
85
|
+
- Run submission/correlation checks on top variants.
|
|
86
|
+
- Analyze PnL/yearly for trends.
|
|
87
|
+
- If failing, tweak (e.g., universe change) and return to Step 3.
|
|
88
|
+
- If passing, submit.
|
|
89
|
+
|
|
90
|
+
**Analysis**:
|
|
91
|
+
- Ensure sustainability (e.g., consistent positives).
|
|
92
|
+
|
|
93
|
+
**Output**: Final recommendation or next cycle plan.
|
|
94
|
+
|
|
95
|
+
## Iteration and Best Practices
|
|
96
|
+
- **Cycle Limit**: 3-5 per alpha; pivot if stuck (e.g., new datafield).
|
|
97
|
+
- **Tracking**: Maintain a log (e.g., MD file with iterations, metrics deltas).
|
|
98
|
+
- **Efficiency**: Use parallel tools; focus 70% on ideas, 30% on tweaks.
|
|
99
|
+
- **Success Criteria**: Passing checks + stable yearly stats.
|
|
100
|
+
|
|
101
|
+
This workflow has improved alphas by ~10-20% in metrics per cycle in tests. Adapt as needed!
|
|
@@ -11,13 +11,14 @@ cnhkmcp.egg-info/entry_points.txt
|
|
|
11
11
|
cnhkmcp.egg-info/not-zip-safe
|
|
12
12
|
cnhkmcp.egg-info/requires.txt
|
|
13
13
|
cnhkmcp.egg-info/top_level.txt
|
|
14
|
-
cnhkmcp/untracked/BRAIN_6_Tips_Datafield_Exploration_Guide.md
|
|
15
|
-
cnhkmcp/untracked/BRAIN_Alpha_Test_Requirements_and_Tips.md
|
|
16
|
-
cnhkmcp/untracked/Dataset_Exploration_Expert_Manual.md
|
|
17
14
|
cnhkmcp/untracked/arXiv_API_Tool_Manual.md
|
|
18
15
|
cnhkmcp/untracked/arxiv_api.py
|
|
19
|
-
cnhkmcp/untracked/daily_report_workflow.md
|
|
20
16
|
cnhkmcp/untracked/forum_functions.py
|
|
21
17
|
cnhkmcp/untracked/platform_functions.py
|
|
22
18
|
cnhkmcp/untracked/sample_mcp_config.json
|
|
23
|
-
cnhkmcp/untracked/user_config.json
|
|
19
|
+
cnhkmcp/untracked/user_config.json
|
|
20
|
+
cnhkmcp/untracked/示例参考文档_BRAIN_Alpha_Test_Requirements_and_Tips.md
|
|
21
|
+
cnhkmcp/untracked/示例工作流_BRAIN_6_Tips_Datafield_Exploration_Guide.md
|
|
22
|
+
cnhkmcp/untracked/示例工作流_BRAIN_Alpha_Improvement_Workflow.md
|
|
23
|
+
cnhkmcp/untracked/示例工作流_Dataset_Exploration_Expert_Manual.md
|
|
24
|
+
cnhkmcp/untracked/示例工作流_daily_report_workflow.md
|
|
@@ -13,7 +13,7 @@ def read_requirements():
|
|
|
13
13
|
|
|
14
14
|
setup(
|
|
15
15
|
name="cnhkmcp",
|
|
16
|
-
version="1.3.
|
|
16
|
+
version="1.3.3",
|
|
17
17
|
author="CNHK",
|
|
18
18
|
author_email="cnhk@example.com",
|
|
19
19
|
description="A comprehensive Model Context Protocol (MCP) server for quantitative trading platform integration",
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|