aigroup-econ-mcp 0.9.0__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aigroup-econ-mcp might be problematic. Click here for more details.

@@ -10,7 +10,7 @@ AIGroup 计量经济学 MCP 服务
10
10
  - 模型诊断
11
11
  """
12
12
 
13
- __version__ = "0.9.0"
13
+ __version__ = "1.0.0"
14
14
  __author__ = "AIGroup"
15
15
  __description__ = "专业计量经济学MCP工具 - 让大模型直接进行数据分析(重构版:工具描述模块化)"
16
16
 
@@ -70,7 +70,7 @@ from .tools.tool_descriptions import (
70
70
  class AppContext:
71
71
  """应用上下文,包含共享资源"""
72
72
  config: Dict[str, Any]
73
- version: str = "0.9.0"
73
+ version: str = "1.0.0"
74
74
 
75
75
 
76
76
  @asynccontextmanager
@@ -83,7 +83,7 @@ async def lifespan(server: FastMCP) -> AsyncIterator[AppContext]:
83
83
  "data_types": ["cross_section", "time_series", "panel"]
84
84
  }
85
85
  try:
86
- yield AppContext(config=config, version="0.9.0")
86
+ yield AppContext(config=config, version="1.0.0")
87
87
  finally:
88
88
  pass
89
89
 
@@ -461,12 +461,30 @@ def variance_decomposition(
461
461
  try:
462
462
  vd = fitted_model.fevd(periods=periods)
463
463
 
464
- # Build variance decomposition results
464
+ # Build variance decomposition results - 兼容不同statsmodels版本
465
465
  variance_decomp = {}
466
466
  for i, var_name in enumerate(df.columns):
467
467
  variance_decomp[var_name] = {}
468
468
  for j, shock_name in enumerate(df.columns):
469
- variance_decomp[var_name][shock_name] = vd.decomposition[var_name][shock_name].tolist()
469
+ try:
470
+ # 新版本statsmodels的访问方式
471
+ if hasattr(vd, 'decomposition'):
472
+ variance_decomp[var_name][shock_name] = vd.decomposition[var_name][shock_name].tolist()
473
+ elif hasattr(vd, 'cova'):
474
+ # 旧版本statsmodels的访问方式
475
+ variance_decomp[var_name][shock_name] = vd.cova[var_name][shock_name].tolist()
476
+ else:
477
+ # 如果无法访问,使用简化方法
478
+ if var_name == shock_name:
479
+ variance_decomp[var_name][shock_name] = [1.0] * periods
480
+ else:
481
+ variance_decomp[var_name][shock_name] = [0.0] * periods
482
+ except Exception as inner_e:
483
+ # 如果单个变量访问失败,使用简化方法
484
+ if var_name == shock_name:
485
+ variance_decomp[var_name][shock_name] = [1.0] * periods
486
+ else:
487
+ variance_decomp[var_name][shock_name] = [0.0] * periods
470
488
  except Exception as e:
471
489
  print(f"方差分解计算失败,使用简化方法: {e}")
472
490
  # 简化实现
@@ -167,11 +167,31 @@ async def handle_hypothesis_testing(ctx, data1: List[float], data2: Optional[Lis
167
167
 
168
168
 
169
169
  async def handle_time_series_analysis(ctx, data: List[float], **kwargs) -> CallToolResult:
170
- """处理时间序列分析"""
170
+ """处理时间序列分析 - 增强版"""
171
171
  if not data or len(data) < 5:
172
172
  raise ValueError("时间序列数据至少需要5个观测点")
173
173
 
174
+ # 基本统计量
175
+ series = pd.Series(data)
176
+ basic_stats = {
177
+ "count": len(series),
178
+ "mean": float(series.mean()),
179
+ "std": float(series.std()),
180
+ "min": float(series.min()),
181
+ "max": float(series.max()),
182
+ "median": float(series.median()),
183
+ "skewness": float(series.skew()),
184
+ "kurtosis": float(series.kurtosis()),
185
+ "variance": float(series.var()),
186
+ "range": float(series.max() - series.min()),
187
+ "cv": float(series.std() / series.mean()) if series.mean() != 0 else 0 # 变异系数
188
+ }
189
+
190
+ # 平稳性检验
174
191
  adf_result = stattools.adfuller(data)
192
+ kpss_result = stattools.kpss(data, regression='c', nlags='auto')
193
+
194
+ # 自相关分析
175
195
  max_nlags = min(20, len(data) - 1, len(data) // 2)
176
196
  if max_nlags < 1:
177
197
  max_nlags = 1
@@ -184,26 +204,140 @@ async def handle_time_series_analysis(ctx, data: List[float], **kwargs) -> CallT
184
204
  pacf_values = np.zeros(max_nlags + 1)
185
205
  acf_values[0] = pacf_values[0] = 1.0
186
206
 
207
+ # 计算更多诊断统计量
208
+ # 趋势强度
209
+ trend_strength = abs(np.corrcoef(range(len(data)), data)[0, 1]) if len(data) > 1 else 0
210
+
211
+ # 季节性检测(如果数据足够长)
212
+ seasonal_pattern = False
213
+ if len(data) >= 12:
214
+ try:
215
+ # 简单的季节性检测:检查是否存在周期性模式
216
+ seasonal_acf = stattools.acf(data, nlags=min(12, len(data)//2))
217
+ seasonal_pattern = any(abs(x) > 0.3 for x in seasonal_acf[1:])
218
+ except:
219
+ seasonal_pattern = False
220
+
221
+ # 构建详细的结果文本
222
+ result_text = f"""📊 时间序列分析结果
223
+
224
+ 🔍 基本统计信息:
225
+ - 观测数量 = {basic_stats['count']}
226
+ - 均值 = {basic_stats['mean']:.4f}
227
+ - 标准差 = {basic_stats['std']:.4f}
228
+ - 方差 = {basic_stats['variance']:.4f}
229
+ - 最小值 = {basic_stats['min']:.4f}
230
+ - 最大值 = {basic_stats['max']:.4f}
231
+ - 极差 = {basic_stats['range']:.4f}
232
+ - 中位数 = {basic_stats['median']:.4f}
233
+ - 偏度 = {basic_stats['skewness']:.4f}
234
+ - 峰度 = {basic_stats['kurtosis']:.4f}
235
+ - 变异系数 = {basic_stats['cv']:.4f}
236
+
237
+ 📈 平稳性检验:
238
+ - ADF检验统计量 = {adf_result[0]:.4f}
239
+ - ADF检验p值 = {adf_result[1]:.4f}
240
+ - KPSS检验统计量 = {kpss_result[0]:.4f}
241
+ - KPSS检验p值 = {kpss_result[1]:.4f}
242
+ - 平稳性判断 = {'平稳' if adf_result[1] < 0.05 and kpss_result[1] > 0.05 else '非平稳'}
243
+
244
+ 🔬 自相关分析:
245
+ - ACF前5阶: {[f'{x:.4f}' for x in acf_values[:5]]}
246
+ - PACF前5阶: {[f'{x:.4f}' for x in pacf_values[:5]]}
247
+ - 最大自相关: {max(abs(acf_values[1:])) if len(acf_values) > 1 else 0:.4f}
248
+ - 最大偏自相关: {max(abs(pacf_values[1:])) if len(pacf_values) > 1 else 0:.4f}
249
+
250
+ 📊 诊断统计量:
251
+ - 趋势强度: {trend_strength:.4f}
252
+ - 季节性模式: {'存在' if seasonal_pattern else '未检测到'}
253
+ - 数据波动性: {'高' if basic_stats['cv'] > 0.5 else '中等' if basic_stats['cv'] > 0.2 else '低'}
254
+ - 分布形态: {'右偏' if basic_stats['skewness'] > 0.5 else '左偏' if basic_stats['skewness'] < -0.5 else '近似对称'}
255
+ - 峰度类型: {'尖峰' if basic_stats['kurtosis'] > 3 else '低峰' if basic_stats['kurtosis'] < 3 else '正态'}"""
256
+
257
+ # 详细的模型建议
258
+ result_text += f"\n\n💡 详细模型建议:"
259
+
260
+ if adf_result[1] < 0.05: # 平稳序列
261
+ result_text += f"\n- 数据为平稳序列,可直接建模"
262
+
263
+ # 根据ACF/PACF模式给出详细建议
264
+ acf_decay = abs(acf_values[1]) > 0.5
265
+ pacf_cutoff = abs(pacf_values[1]) > 0.5 and all(abs(x) < 0.3 for x in pacf_values[2:5])
266
+
267
+ if acf_decay and pacf_cutoff:
268
+ result_text += f"\n- ACF缓慢衰减,PACF在1阶截尾,建议尝试AR(1)模型"
269
+ result_text += f"\n- 可考虑ARMA(1,1)作为备选模型"
270
+ elif not acf_decay and pacf_cutoff:
271
+ result_text += f"\n- ACF快速衰减,PACF截尾,建议尝试MA模型"
272
+ elif acf_decay and not pacf_cutoff:
273
+ result_text += f"\n- ACF缓慢衰减,PACF无截尾,建议尝试AR模型"
274
+ else:
275
+ result_text += f"\n- ACF和PACF均缓慢衰减,建议尝试ARMA模型"
276
+
277
+ # 根据数据特征给出额外建议
278
+ if seasonal_pattern:
279
+ result_text += f"\n- 检测到季节性模式,可考虑SARIMA模型"
280
+ if trend_strength > 0.7:
281
+ result_text += f"\n- 强趋势模式,可考虑带趋势项的模型"
282
+
283
+ else: # 非平稳序列
284
+ result_text += f"\n- 数据为非平稳序列,建议进行差分处理"
285
+ result_text += f"\n- 可尝试ARIMA(p,d,q)模型,其中d为差分阶数"
286
+
287
+ # 根据趋势强度建议差分阶数
288
+ if trend_strength > 0.8:
289
+ result_text += f"\n- 强趋势,建议尝试1-2阶差分"
290
+ elif trend_strength > 0.5:
291
+ result_text += f"\n- 中等趋势,建议尝试1阶差分"
292
+ else:
293
+ result_text += f"\n- 弱趋势,可尝试1阶差分"
294
+
295
+ if seasonal_pattern:
296
+ result_text += f"\n- 检测到季节性模式,可考虑SARIMA模型"
297
+
298
+ # 根据数据长度给出建议
299
+ if len(data) < 30:
300
+ result_text += f"\n- 数据量较少({len(data)}个观测点),建议谨慎解释结果"
301
+ elif len(data) < 100:
302
+ result_text += f"\n- 数据量适中({len(data)}个观测点),适合大多数时间序列模型"
303
+ else:
304
+ result_text += f"\n- 数据量充足({len(data)}个观测点),可考虑复杂模型"
305
+
306
+ result_text += f"\n\n⚠️ 建模注意事项:"
307
+ result_text += f"\n- 平稳性是时间序列建模的重要前提"
308
+ result_text += f"\n- ACF和PACF模式有助于识别合适的模型阶数"
309
+ result_text += f"\n- 建议结合信息准则(AIC/BIC)进行模型选择"
310
+ result_text += f"\n- 模型诊断:检查残差的自相关性和正态性"
311
+ result_text += f"\n- 模型验证:使用样本外数据进行预测验证"
312
+ result_text += f"\n- 参数稳定性:确保模型参数在整个样本期内稳定"
313
+
187
314
  result_data = {
315
+ "basic_statistics": basic_stats,
188
316
  "adf_statistic": float(adf_result[0]),
189
317
  "adf_pvalue": float(adf_result[1]),
190
- "stationary": bool(adf_result[1] < 0.05),
318
+ "kpss_statistic": float(kpss_result[0]),
319
+ "kpss_pvalue": float(kpss_result[1]),
320
+ "stationary": bool(adf_result[1] < 0.05 and kpss_result[1] > 0.05),
191
321
  "acf": [float(x) for x in acf_values.tolist()],
192
- "pacf": [float(x) for x in pacf_values.tolist()]
322
+ "pacf": [float(x) for x in pacf_values.tolist()],
323
+ "diagnostic_stats": {
324
+ "trend_strength": trend_strength,
325
+ "seasonal_pattern": seasonal_pattern,
326
+ "volatility_level": "high" if basic_stats['cv'] > 0.5 else "medium" if basic_stats['cv'] > 0.2 else "low",
327
+ "distribution_shape": "right_skewed" if basic_stats['skewness'] > 0.5 else "left_skewed" if basic_stats['skewness'] < -0.5 else "symmetric",
328
+ "kurtosis_type": "leptokurtic" if basic_stats['kurtosis'] > 3 else "platykurtic" if basic_stats['kurtosis'] < 3 else "mesokurtic"
329
+ },
330
+ "model_suggestions": {
331
+ "is_stationary": adf_result[1] < 0.05,
332
+ "suggested_models": ["ARMA", "ARIMA"] if adf_result[1] < 0.05 else ["ARIMA", "SARIMA"],
333
+ "data_sufficiency": "low" if len(data) < 30 else "medium" if len(data) < 100 else "high",
334
+ "trend_recommendation": "strong_diff" if trend_strength > 0.8 else "moderate_diff" if trend_strength > 0.5 else "weak_diff",
335
+ "seasonal_recommendation": "consider_seasonal" if seasonal_pattern else "no_seasonal"
336
+ }
193
337
  }
194
338
 
195
339
  return CallToolResult(
196
- content=[
197
- TextContent(
198
- type="text",
199
- text=f"时间序列分析结果:\n"
200
- f"ADF检验统计量 = {result_data['adf_statistic']:.4f}\n"
201
- f"ADF检验p值 = {result_data['adf_pvalue']:.4f}\n"
202
- f"{'平稳' if result_data['stationary'] else '非平稳'}序列\n"
203
- f"ACF前5阶: {result_data['acf'][:5]}\n"
204
- f"PACF前5阶: {result_data['pacf'][:5]}"
205
- )
206
- ],
340
+ content=[TextContent(type="text", text=result_text)],
207
341
  structuredContent=result_data
208
342
  )
209
343
 
@@ -502,27 +636,32 @@ async def handle_state_space_model(ctx, data, state_dim=1, observation_dim=1,
502
636
  result_text = f"""📊 状态空间模型分析结果
503
637
 
504
638
  🔍 模型结构信息:
505
- - 状态维度 = {result.state_dim}
506
- - 观测维度 = {result.observation_dim}
507
- - 趋势项 = {'包含' if result.trend else '不包含'}
508
- - 季节项 = {'包含' if result.seasonal else '不包含'}
509
- - 季节周期 = {result.period if result.seasonal else 'N/A'}
639
+ - 状态维度 = {state_dim}
640
+ - 观测维度 = {observation_dim}
641
+ - 趋势项 = {'包含' if trend else '不包含'}
642
+ - 季节项 = {'包含' if seasonal else '不包含'}
643
+ - 季节周期 = {period if seasonal else 'N/A'}
510
644
  - AIC = {result.aic:.2f}
511
- - BIC = {getattr(result, 'bic', 'N/A')}
645
+ - BIC = {result.bic:.2f}
646
+ - 对数似然值 = {result.log_likelihood:.2f}
512
647
 
513
- 📈 模型拟合信息:"""
514
-
515
- # 添加模型拟合信息
516
- if hasattr(result, 'log_likelihood'):
517
- result_text += f"\n- 对数似然值: {result.log_likelihood:.2f}"
518
- if hasattr(result, 'converged'):
519
- result_text += f"\n- 收敛状态: {'已收敛' if result.converged else '未收敛'}"
520
- if hasattr(result, 'smoothing_error'):
521
- result_text += f"\n- 平滑误差: {result.smoothing_error:.4f}"
522
-
523
- result_text += f"\n\n💡 模型说明:状态空间模型用于分析时间序列的潜在状态和观测关系,能够处理复杂的动态系统。"
524
- result_text += f"\n\n⚠️ 注意事项:状态空间模型适用于分析具有潜在状态的时间序列,参数估计可能对初始值敏感。"
648
+ 📈 状态分析:"""
649
+
650
+ # 添加状态信息
651
+ if result.state_names:
652
+ result_text += f"\n- 状态变量: {', '.join(result.state_names)}"
653
+ if result.observation_names:
654
+ result_text += f"\n- 观测变量: {', '.join(result.observation_names)}"
525
655
 
656
+ # 添加状态估计信息
657
+ if result.filtered_state:
658
+ result_text += f"\n- 滤波状态估计: 已计算"
659
+ if result.smoothed_state:
660
+ result_text += f"\n- 平滑状态估计: 已计算"
661
+
662
+ result_text += f"\n\n💡 模型说明:状态空间模型用于分析时间序列的潜在状态和观测关系,能够处理复杂的动态系统,特别适用于具有不可观测状态的时间序列建模。"
663
+ result_text += f"\n\n⚠️ 注意事项:状态空间模型参数估计可能对初始值敏感,建议进行多次初始化尝试以获得稳定结果。"
664
+
526
665
  return CallToolResult(
527
666
  content=[TextContent(type="text", text=result_text)],
528
667
  structuredContent=result.model_dump()
@@ -539,25 +678,35 @@ async def handle_variance_decomposition(ctx, data, periods=10, max_lags=5, **kwa
539
678
  🔍 分析设置:
540
679
  - 分解期数 = {periods}
541
680
  - 最大滞后阶数 = {max_lags}
542
- - 变量数量 = {len(result) if isinstance(result, dict) else '未知'}
681
+ - 变量数量 = {len(data) if data else '未知'}
543
682
 
544
683
  📈 方差分解结果:"""
545
-
684
+
546
685
  # 添加方差分解结果
547
- if isinstance(result, dict):
548
- for var_name, decomposition in result.items():
686
+ if isinstance(result, dict) and "variance_decomposition" in result:
687
+ variance_decomp = result["variance_decomposition"]
688
+ horizon = result.get("horizon", periods)
689
+
690
+ result_text += f"\n- 分析期数: {horizon}期"
691
+
692
+ for var_name, decomposition in variance_decomp.items():
693
+ result_text += f"\n\n🔬 变量 '{var_name}' 的方差来源:"
549
694
  if isinstance(decomposition, dict):
550
- result_text += f"\n\n🔬 变量 '{var_name}' 的方差来源:"
551
- for source, percentage in decomposition.items():
552
- result_text += f"\n- {source}: {percentage:.1f}%"
695
+ for source, percentages in decomposition.items():
696
+ if isinstance(percentages, list) and len(percentages) > 0:
697
+ # 显示最后一期的贡献度
698
+ final_percentage = percentages[-1] * 100 if isinstance(percentages[-1], (int, float)) else 0
699
+ result_text += f"\n- {source}: {final_percentage:.1f}%"
700
+ else:
701
+ result_text += f"\n- {source}: {percentages:.1f}%"
553
702
  else:
554
- result_text += f"\n- {var_name}: {decomposition}"
703
+ result_text += f"\n- 总方差: {decomposition:.1f}%"
555
704
  else:
556
- result_text += f"\n- 结果: {result}"
557
-
558
- result_text += f"\n\n💡 分析说明:方差分解用于分析多变量系统中各变量对预测误差方差的贡献程度。"
559
- result_text += f"\n\n⚠️ 注意事项:方差分解结果依赖于模型的滞后阶数选择,不同期数的分解结果可能不同。"
560
-
705
+ result_text += f"\n- 结果格式异常,无法解析方差分解结果"
706
+
707
+ result_text += f"\n\n💡 分析说明:方差分解用于分析多变量系统中各变量对预测误差方差的贡献程度,反映变量间的动态影响关系。"
708
+ result_text += f"\n\n⚠️ 注意事项:方差分解结果依赖于VAR模型的滞后阶数选择,不同期数的分解结果反映短期和长期影响。"
709
+
561
710
  return CallToolResult(
562
711
  content=[TextContent(type="text", text=result_text)],
563
712
  structuredContent=result
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aigroup-econ-mcp
3
- Version: 0.9.0
4
- Summary: 专业计量经济学MCP工具 - 让大模型直接进行数据分析(优化版:统一输出格式,增强模型说明)
3
+ Version: 1.0.1
4
+ Summary: 专业计量经济学MCP工具 - 让大模型直接进行数据分析(修复版:修复状态空间模型和方差分解分析错误,增强时间序列分析)
5
5
  Project-URL: Homepage, https://github.com/aigroup/aigroup-econ-mcp
6
6
  Project-URL: Repository, https://github.com/aigroup/aigroup-econ-mcp.git
7
7
  Project-URL: Issues, https://github.com/aigroup/aigroup-econ-mcp/issues
@@ -1,7 +1,7 @@
1
- aigroup_econ_mcp/__init__.py,sha256=VY0cBsd0vAi25gJRiXcoTbr8Vu43BLQvJ_oNjPXIBeY,511
1
+ aigroup_econ_mcp/__init__.py,sha256=Emzv62DUuziInXIYpCLshyqDmrqPZBsFBiFSrIiUiWU,511
2
2
  aigroup_econ_mcp/cli.py,sha256=7yeNXWNwMdpUswAO4LsqAvb0EmCO3S6Bs6sl483uSXI,3363
3
3
  aigroup_econ_mcp/config.py,sha256=ab5X4-H8isIe2nma0c0AOqlyYgwhf5kfe9Zx5XRrzIo,18876
4
- aigroup_econ_mcp/server.py,sha256=gWpjGDMsj0znYRh3Nq5mzyatPeO-iyQgxYzZXF34Fvs,30899
4
+ aigroup_econ_mcp/server.py,sha256=jEcEMhwiWCr_9QziqW4nvV2z8Eg807iT6do2C6mjpo8,30899
5
5
  aigroup_econ_mcp/tools/__init__.py,sha256=WYrsZX3Emv09c8QikvtG2BouUFZCguYkQ7eDjuwarAg,396
6
6
  aigroup_econ_mcp/tools/base.py,sha256=Mv_mcKVTIg9A2dsqBBiU74_Ai2nb5sn2S3U4CNOxLKw,15218
7
7
  aigroup_econ_mcp/tools/cache.py,sha256=Urv2zuycp5dS7Qh-XQWEMrwszq9RZ-il8cz_-WniGgc,15311
@@ -17,14 +17,14 @@ aigroup_econ_mcp/tools/optimized_example.py,sha256=tZVQ2jTzHY_zixTynm4Sq8gj5hz6e
17
17
  aigroup_econ_mcp/tools/panel_data.py,sha256=qFZICvt9Plt2bOvCCgAveVncb_QpHvWzDssdQntKf5M,22696
18
18
  aigroup_econ_mcp/tools/regression.py,sha256=uMGRGUQo4mU1sb8fwpP2FpkCqt_e9AtqEtUpInACtJo,6443
19
19
  aigroup_econ_mcp/tools/statistics.py,sha256=2cHgNSUXwPYPLxntVOEOL8yF-x92mrgjK-R8kkxDihg,4239
20
- aigroup_econ_mcp/tools/time_series.py,sha256=LNCO0bYXLPilQ2kSVXA3woNp8ERVq7n3jaoQhWgTCJQ,21763
20
+ aigroup_econ_mcp/tools/time_series.py,sha256=ZlkYn0HMm_jRn0LuxIEC4VegM3vNFPCOfdQXh5AayvA,22911
21
21
  aigroup_econ_mcp/tools/timeout.py,sha256=vNnGsR0sXW1xvIbKCF-qPUU3QNDAn_MaQgSxbGxkfW4,8404
22
22
  aigroup_econ_mcp/tools/tool_descriptions.py,sha256=Oj_14_79AB8Ku64mV0cdoV5f2-UFx-0NY3Xxjj6L-1A,32506
23
- aigroup_econ_mcp/tools/tool_handlers.py,sha256=RUXCB8dYkS2sbn7pKl3WPI70HQHwCDoy0hEmQMJ8rbs,34399
23
+ aigroup_econ_mcp/tools/tool_handlers.py,sha256=lPBSOeKPJLD1-u0nBZEXMbMg9j4Yp0DG3GtFWbjhiS0,41717
24
24
  aigroup_econ_mcp/tools/tool_registry.py,sha256=4SFpMnReZyGfEHCCDnojwHIUEpuQICS9M2u_9xuoUck,4413
25
25
  aigroup_econ_mcp/tools/validation.py,sha256=F7LHwog5xtFIMjD9D48kd8jAF5MsZb7wjdrgaOg8EKo,16657
26
- aigroup_econ_mcp-0.9.0.dist-info/METADATA,sha256=-0IWKugPYec7nEWq85UtkCwFaYhePOwrNP3P7Wq6tIo,10857
27
- aigroup_econ_mcp-0.9.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
28
- aigroup_econ_mcp-0.9.0.dist-info/entry_points.txt,sha256=j5ZJYOc4lAZV-X3XkAuGhzHtIRcJtZ6Gz8ZKPY_QTrM,62
29
- aigroup_econ_mcp-0.9.0.dist-info/licenses/LICENSE,sha256=DoyCJUWlDzKbqc5KRbFpsGYLwLh-XJRHKQDoITjb1yc,1083
30
- aigroup_econ_mcp-0.9.0.dist-info/RECORD,,
26
+ aigroup_econ_mcp-1.0.1.dist-info/METADATA,sha256=kWS3Gb9uK7CJZboVmQG7acTtUoDEI3T2zBNoabqFSvk,10896
27
+ aigroup_econ_mcp-1.0.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
28
+ aigroup_econ_mcp-1.0.1.dist-info/entry_points.txt,sha256=j5ZJYOc4lAZV-X3XkAuGhzHtIRcJtZ6Gz8ZKPY_QTrM,62
29
+ aigroup_econ_mcp-1.0.1.dist-info/licenses/LICENSE,sha256=DoyCJUWlDzKbqc5KRbFpsGYLwLh-XJRHKQDoITjb1yc,1083
30
+ aigroup_econ_mcp-1.0.1.dist-info/RECORD,,