massgen 0.1.0a2__py3-none-any.whl → 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of massgen might be problematic. Click here for more details.
- massgen/__init__.py +1 -1
- massgen/agent_config.py +17 -0
- massgen/api_params_handler/_api_params_handler_base.py +1 -0
- massgen/api_params_handler/_chat_completions_api_params_handler.py +8 -1
- massgen/api_params_handler/_claude_api_params_handler.py +8 -1
- massgen/api_params_handler/_gemini_api_params_handler.py +73 -0
- massgen/api_params_handler/_response_api_params_handler.py +8 -1
- massgen/backend/base.py +31 -0
- massgen/backend/{base_with_mcp.py → base_with_custom_tool_and_mcp.py} +282 -11
- massgen/backend/chat_completions.py +182 -92
- massgen/backend/claude.py +115 -18
- massgen/backend/claude_code.py +378 -14
- massgen/backend/docs/CLAUDE_API_RESEARCH.md +3 -3
- massgen/backend/gemini.py +1275 -1607
- massgen/backend/gemini_mcp_manager.py +545 -0
- massgen/backend/gemini_trackers.py +344 -0
- massgen/backend/gemini_utils.py +43 -0
- massgen/backend/response.py +129 -70
- massgen/cli.py +643 -132
- massgen/config_builder.py +381 -32
- massgen/configs/README.md +111 -80
- massgen/configs/basic/multi/three_agents_default.yaml +1 -1
- massgen/configs/basic/single/single_agent.yaml +1 -1
- massgen/configs/providers/openai/gpt5_nano.yaml +3 -3
- massgen/configs/tools/custom_tools/claude_code_custom_tool_example.yaml +32 -0
- massgen/configs/tools/custom_tools/claude_code_custom_tool_example_no_path.yaml +28 -0
- massgen/configs/tools/custom_tools/claude_code_custom_tool_with_mcp_example.yaml +40 -0
- massgen/configs/tools/custom_tools/claude_code_custom_tool_with_wrong_mcp_example.yaml +38 -0
- massgen/configs/tools/custom_tools/claude_code_wrong_custom_tool_with_mcp_example.yaml +38 -0
- massgen/configs/tools/custom_tools/claude_custom_tool_example.yaml +24 -0
- massgen/configs/tools/custom_tools/claude_custom_tool_example_no_path.yaml +22 -0
- massgen/configs/tools/custom_tools/claude_custom_tool_with_mcp_example.yaml +35 -0
- massgen/configs/tools/custom_tools/claude_custom_tool_with_wrong_mcp_example.yaml +33 -0
- massgen/configs/tools/custom_tools/claude_wrong_custom_tool_with_mcp_example.yaml +33 -0
- massgen/configs/tools/custom_tools/gemini_custom_tool_example.yaml +24 -0
- massgen/configs/tools/custom_tools/gemini_custom_tool_example_no_path.yaml +22 -0
- massgen/configs/tools/custom_tools/gemini_custom_tool_with_mcp_example.yaml +35 -0
- massgen/configs/tools/custom_tools/gemini_custom_tool_with_wrong_mcp_example.yaml +33 -0
- massgen/configs/tools/custom_tools/gemini_wrong_custom_tool_with_mcp_example.yaml +33 -0
- massgen/configs/tools/custom_tools/github_issue_market_analysis.yaml +94 -0
- massgen/configs/tools/custom_tools/gpt5_nano_custom_tool_example.yaml +24 -0
- massgen/configs/tools/custom_tools/gpt5_nano_custom_tool_example_no_path.yaml +22 -0
- massgen/configs/tools/custom_tools/gpt5_nano_custom_tool_with_mcp_example.yaml +35 -0
- massgen/configs/tools/custom_tools/gpt5_nano_custom_tool_with_wrong_mcp_example.yaml +33 -0
- massgen/configs/tools/custom_tools/gpt5_nano_wrong_custom_tool_with_mcp_example.yaml +33 -0
- massgen/configs/tools/custom_tools/gpt_oss_custom_tool_example.yaml +25 -0
- massgen/configs/tools/custom_tools/gpt_oss_custom_tool_example_no_path.yaml +23 -0
- massgen/configs/tools/custom_tools/gpt_oss_custom_tool_with_mcp_example.yaml +34 -0
- massgen/configs/tools/custom_tools/gpt_oss_custom_tool_with_wrong_mcp_example.yaml +34 -0
- massgen/configs/tools/custom_tools/gpt_oss_wrong_custom_tool_with_mcp_example.yaml +34 -0
- massgen/configs/tools/custom_tools/grok3_mini_custom_tool_example.yaml +24 -0
- massgen/configs/tools/custom_tools/grok3_mini_custom_tool_example_no_path.yaml +22 -0
- massgen/configs/tools/custom_tools/grok3_mini_custom_tool_with_mcp_example.yaml +35 -0
- massgen/configs/tools/custom_tools/grok3_mini_custom_tool_with_wrong_mcp_example.yaml +33 -0
- massgen/configs/tools/custom_tools/grok3_mini_wrong_custom_tool_with_mcp_example.yaml +33 -0
- massgen/configs/tools/custom_tools/qwen_api_custom_tool_example.yaml +25 -0
- massgen/configs/tools/custom_tools/qwen_api_custom_tool_example_no_path.yaml +23 -0
- massgen/configs/tools/custom_tools/qwen_api_custom_tool_with_mcp_example.yaml +36 -0
- massgen/configs/tools/custom_tools/qwen_api_custom_tool_with_wrong_mcp_example.yaml +34 -0
- massgen/configs/tools/custom_tools/qwen_api_wrong_custom_tool_with_mcp_example.yaml +34 -0
- massgen/configs/tools/custom_tools/qwen_local_custom_tool_example.yaml +24 -0
- massgen/configs/tools/custom_tools/qwen_local_custom_tool_example_no_path.yaml +22 -0
- massgen/configs/tools/custom_tools/qwen_local_custom_tool_with_mcp_example.yaml +35 -0
- massgen/configs/tools/custom_tools/qwen_local_custom_tool_with_wrong_mcp_example.yaml +33 -0
- massgen/configs/tools/custom_tools/qwen_local_wrong_custom_tool_with_mcp_example.yaml +33 -0
- massgen/configs/tools/filesystem/claude_code_context_sharing.yaml +1 -1
- massgen/configs/voting/gemini_gpt_voting_sensitivity.yaml +67 -0
- massgen/formatter/_chat_completions_formatter.py +104 -0
- massgen/formatter/_claude_formatter.py +120 -0
- massgen/formatter/_gemini_formatter.py +448 -0
- massgen/formatter/_response_formatter.py +88 -0
- massgen/frontend/coordination_ui.py +4 -2
- massgen/logger_config.py +35 -3
- massgen/message_templates.py +56 -6
- massgen/orchestrator.py +179 -10
- massgen/stream_chunk/base.py +3 -0
- massgen/tests/custom_tools_example.py +392 -0
- massgen/tests/mcp_test_server.py +17 -7
- massgen/tests/test_config_builder.py +423 -0
- massgen/tests/test_custom_tools.py +401 -0
- massgen/tests/test_tools.py +127 -0
- massgen/tool/README.md +935 -0
- massgen/tool/__init__.py +39 -0
- massgen/tool/_async_helpers.py +70 -0
- massgen/tool/_basic/__init__.py +8 -0
- massgen/tool/_basic/_two_num_tool.py +24 -0
- massgen/tool/_code_executors/__init__.py +10 -0
- massgen/tool/_code_executors/_python_executor.py +74 -0
- massgen/tool/_code_executors/_shell_executor.py +61 -0
- massgen/tool/_exceptions.py +39 -0
- massgen/tool/_file_handlers/__init__.py +10 -0
- massgen/tool/_file_handlers/_file_operations.py +218 -0
- massgen/tool/_manager.py +634 -0
- massgen/tool/_registered_tool.py +88 -0
- massgen/tool/_result.py +66 -0
- massgen/tool/_self_evolution/_github_issue_analyzer.py +369 -0
- massgen/tool/docs/builtin_tools.md +681 -0
- massgen/tool/docs/exceptions.md +794 -0
- massgen/tool/docs/execution_results.md +691 -0
- massgen/tool/docs/manager.md +887 -0
- massgen/tool/docs/workflow_toolkits.md +529 -0
- massgen/tool/workflow_toolkits/__init__.py +57 -0
- massgen/tool/workflow_toolkits/base.py +55 -0
- massgen/tool/workflow_toolkits/new_answer.py +126 -0
- massgen/tool/workflow_toolkits/vote.py +167 -0
- {massgen-0.1.0a2.dist-info → massgen-0.1.1.dist-info}/METADATA +89 -131
- {massgen-0.1.0a2.dist-info → massgen-0.1.1.dist-info}/RECORD +111 -36
- {massgen-0.1.0a2.dist-info → massgen-0.1.1.dist-info}/WHEEL +0 -0
- {massgen-0.1.0a2.dist-info → massgen-0.1.1.dist-info}/entry_points.txt +0 -0
- {massgen-0.1.0a2.dist-info → massgen-0.1.1.dist-info}/licenses/LICENSE +0 -0
- {massgen-0.1.0a2.dist-info → massgen-0.1.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
"""Registered tool entry data model."""
|
|
3
|
+
|
|
4
|
+
from copy import deepcopy
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from typing import Any, Callable, Literal, Optional, Type
|
|
7
|
+
|
|
8
|
+
from pydantic import BaseModel
|
|
9
|
+
|
|
10
|
+
from ._result import ExecutionResult
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@dataclass
|
|
14
|
+
class RegisteredToolEntry:
|
|
15
|
+
"""Container for registered tool metadata and configuration."""
|
|
16
|
+
|
|
17
|
+
tool_name: str
|
|
18
|
+
"""Identifier for the tool function."""
|
|
19
|
+
|
|
20
|
+
category: str | Literal["default"]
|
|
21
|
+
"""Category this tool belongs to."""
|
|
22
|
+
|
|
23
|
+
origin: Literal["function", "mcp_server", "function_group"]
|
|
24
|
+
"""Source type of the tool."""
|
|
25
|
+
|
|
26
|
+
base_function: Callable
|
|
27
|
+
"""The underlying callable function."""
|
|
28
|
+
|
|
29
|
+
schema_def: dict
|
|
30
|
+
"""JSON schema definition for the tool."""
|
|
31
|
+
|
|
32
|
+
preset_params: dict[str, Any] = field(default_factory=dict)
|
|
33
|
+
"""Pre-configured parameters hidden from schema."""
|
|
34
|
+
|
|
35
|
+
extension_model: Optional[Type[BaseModel]] = None
|
|
36
|
+
"""Optional model for extending the base schema."""
|
|
37
|
+
|
|
38
|
+
mcp_server_id: Optional[str] = None
|
|
39
|
+
"""MCP server identifier if applicable."""
|
|
40
|
+
|
|
41
|
+
post_processor: Optional[Callable[[dict, ExecutionResult], Optional[ExecutionResult]]] = None
|
|
42
|
+
"""Optional post-processing function for results."""
|
|
43
|
+
|
|
44
|
+
@property
|
|
45
|
+
def get_extended_schema(self) -> dict:
|
|
46
|
+
"""Generate the complete schema including extensions.
|
|
47
|
+
|
|
48
|
+
Returns:
|
|
49
|
+
Merged JSON schema with extensions applied
|
|
50
|
+
"""
|
|
51
|
+
if self.extension_model is None:
|
|
52
|
+
return self.schema_def
|
|
53
|
+
|
|
54
|
+
# Generate extension schema
|
|
55
|
+
ext_schema = self.extension_model.model_json_schema()
|
|
56
|
+
combined_schema = deepcopy(self.schema_def)
|
|
57
|
+
|
|
58
|
+
# Clean up title fields
|
|
59
|
+
self._clean_titles(ext_schema)
|
|
60
|
+
|
|
61
|
+
# Merge extension properties
|
|
62
|
+
for prop_key, prop_val in ext_schema.get("properties", {}).items():
|
|
63
|
+
existing_props = combined_schema["function"]["parameters"]["properties"]
|
|
64
|
+
if prop_key in existing_props:
|
|
65
|
+
raise ValueError(
|
|
66
|
+
f"Property '{prop_key}' conflicts with existing schema for '{self.tool_name}'",
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
existing_props[prop_key] = prop_val
|
|
70
|
+
|
|
71
|
+
# Add to required list if necessary
|
|
72
|
+
if prop_key in ext_schema.get("required", []):
|
|
73
|
+
if "required" not in combined_schema["function"]["parameters"]:
|
|
74
|
+
combined_schema["function"]["parameters"]["required"] = []
|
|
75
|
+
combined_schema["function"]["parameters"]["required"].append(prop_key)
|
|
76
|
+
|
|
77
|
+
return combined_schema
|
|
78
|
+
|
|
79
|
+
@staticmethod
|
|
80
|
+
def _clean_titles(schema_obj: Any) -> None:
|
|
81
|
+
"""Recursively remove title fields from schema."""
|
|
82
|
+
if isinstance(schema_obj, dict):
|
|
83
|
+
schema_obj.pop("title", None)
|
|
84
|
+
for val in schema_obj.values():
|
|
85
|
+
RegisteredToolEntry._clean_titles(val)
|
|
86
|
+
elif isinstance(schema_obj, list):
|
|
87
|
+
for element in schema_obj:
|
|
88
|
+
RegisteredToolEntry._clean_titles(element)
|
massgen/tool/_result.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
"""Execution result class for tool outputs."""
|
|
3
|
+
|
|
4
|
+
from dataclasses import dataclass, field
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
from typing import List, Optional, Union
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def _generate_id() -> str:
|
|
10
|
+
"""Generate a unique identifier with timestamp."""
|
|
11
|
+
return datetime.now().strftime("%Y%m%d_%H%M%S_%f")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass
|
|
15
|
+
class ContentBlock:
|
|
16
|
+
"""Base class for content blocks."""
|
|
17
|
+
|
|
18
|
+
block_type: str
|
|
19
|
+
data: str
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class TextContent(ContentBlock):
|
|
24
|
+
"""Text content block."""
|
|
25
|
+
|
|
26
|
+
def __init__(self, data: str):
|
|
27
|
+
super().__init__(block_type="text", data=data)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@dataclass
|
|
31
|
+
class ImageContent(ContentBlock):
|
|
32
|
+
"""Image content block."""
|
|
33
|
+
|
|
34
|
+
def __init__(self, data: str):
|
|
35
|
+
super().__init__(block_type="image", data=data)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@dataclass
|
|
39
|
+
class AudioContent(ContentBlock):
|
|
40
|
+
"""Audio content block."""
|
|
41
|
+
|
|
42
|
+
def __init__(self, data: str):
|
|
43
|
+
super().__init__(block_type="audio", data=data)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
@dataclass
|
|
47
|
+
class ExecutionResult:
|
|
48
|
+
"""Result container for tool execution outputs."""
|
|
49
|
+
|
|
50
|
+
output_blocks: List[Union[TextContent, ImageContent, AudioContent]]
|
|
51
|
+
"""The execution output blocks from the tool."""
|
|
52
|
+
|
|
53
|
+
meta_info: Optional[dict] = None
|
|
54
|
+
"""Additional metadata accessible within the system."""
|
|
55
|
+
|
|
56
|
+
is_streaming: bool = False
|
|
57
|
+
"""Indicates if the output is being streamed."""
|
|
58
|
+
|
|
59
|
+
is_final: bool = True
|
|
60
|
+
"""Indicates if this is the final result in a stream."""
|
|
61
|
+
|
|
62
|
+
was_interrupted: bool = False
|
|
63
|
+
"""Indicates if the execution was interrupted."""
|
|
64
|
+
|
|
65
|
+
result_id: str = field(default_factory=_generate_id)
|
|
66
|
+
"""Unique identifier for this result."""
|
|
@@ -0,0 +1,369 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
"""
|
|
3
|
+
GitHub Issue Analyzer - Self-Evolution Tool for Market Analysis
|
|
4
|
+
|
|
5
|
+
This tool enables MassGen agents to analyze GitHub issues and pull requests
|
|
6
|
+
to understand user needs, prioritize features, and drive market-driven development.
|
|
7
|
+
|
|
8
|
+
This demonstrates Self-Evolution: Market Analysis capabilities where
|
|
9
|
+
agents can autonomously understand what users need and identify next features.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from typing import AsyncGenerator, List, Optional
|
|
13
|
+
|
|
14
|
+
import aiohttp
|
|
15
|
+
|
|
16
|
+
from massgen.tool._result import ExecutionResult, TextContent
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
async def fetch_github_issues(
|
|
20
|
+
repo: str,
|
|
21
|
+
state: str = "open",
|
|
22
|
+
labels: Optional[List[str]] = None,
|
|
23
|
+
limit: int = 50,
|
|
24
|
+
) -> AsyncGenerator[ExecutionResult, None]:
|
|
25
|
+
"""Fetch and analyze GitHub issues for a repository.
|
|
26
|
+
|
|
27
|
+
This tool fetches issues from a GitHub repository and provides structured
|
|
28
|
+
analysis to help understand user needs, feature requests, and pain points.
|
|
29
|
+
Useful for market-driven development and feature prioritization.
|
|
30
|
+
|
|
31
|
+
Note: GitHub API returns both issues and pull requests from the /issues endpoint.
|
|
32
|
+
This tool automatically filters out PRs and returns only actual issues.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
repo: Repository in format "owner/repo" (e.g., "Leezekun/MassGen")
|
|
36
|
+
state: Issue state - "open", "closed", or "all" (default: "open")
|
|
37
|
+
labels: Optional list of label names to filter by (e.g., ["enhancement", "bug"])
|
|
38
|
+
limit: Maximum number of issues to analyze (default: 50, max: 100)
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
ExecutionResult with issue analysis including titles, descriptions,
|
|
42
|
+
labels, engagement metrics, and categorization
|
|
43
|
+
|
|
44
|
+
Example:
|
|
45
|
+
>>> async for result in fetch_github_issues(
|
|
46
|
+
... repo="Leezekun/MassGen",
|
|
47
|
+
... state="open",
|
|
48
|
+
... labels=["enhancement"],
|
|
49
|
+
... limit=10
|
|
50
|
+
... ):
|
|
51
|
+
... print(result.output_blocks[0].data)
|
|
52
|
+
"""
|
|
53
|
+
# Validate inputs
|
|
54
|
+
if limit > 100:
|
|
55
|
+
limit = 100
|
|
56
|
+
if state not in ["open", "closed", "all"]:
|
|
57
|
+
state = "open"
|
|
58
|
+
|
|
59
|
+
# Initial status
|
|
60
|
+
yield ExecutionResult(
|
|
61
|
+
output_blocks=[
|
|
62
|
+
TextContent(
|
|
63
|
+
data=f"🔍 Fetching {state} issues from {repo} (limit: {limit})...",
|
|
64
|
+
),
|
|
65
|
+
],
|
|
66
|
+
is_streaming=True,
|
|
67
|
+
is_final=False,
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
try:
|
|
71
|
+
# GitHub API endpoint
|
|
72
|
+
api_url = f"https://api.github.com/repos/{repo}/issues"
|
|
73
|
+
|
|
74
|
+
# Fetch more items than requested to account for PR filtering
|
|
75
|
+
# GitHub API returns both issues and PRs, so we need extra buffer
|
|
76
|
+
fetch_limit = min(limit * 2, 100) # Fetch 2x requested, max 100
|
|
77
|
+
|
|
78
|
+
# Build query parameters
|
|
79
|
+
params = {
|
|
80
|
+
"state": state,
|
|
81
|
+
"per_page": fetch_limit,
|
|
82
|
+
"sort": "created",
|
|
83
|
+
"direction": "desc",
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
if labels:
|
|
87
|
+
params["labels"] = ",".join(labels)
|
|
88
|
+
|
|
89
|
+
# Fetch issues from GitHub API
|
|
90
|
+
async with aiohttp.ClientSession() as session:
|
|
91
|
+
async with session.get(api_url, params=params) as response:
|
|
92
|
+
if response.status != 200:
|
|
93
|
+
error_text = await response.text()
|
|
94
|
+
yield ExecutionResult(
|
|
95
|
+
output_blocks=[
|
|
96
|
+
TextContent(
|
|
97
|
+
data=f"❌ Error fetching issues: HTTP {response.status}\n{error_text}",
|
|
98
|
+
),
|
|
99
|
+
],
|
|
100
|
+
is_streaming=True,
|
|
101
|
+
is_final=True,
|
|
102
|
+
)
|
|
103
|
+
return
|
|
104
|
+
|
|
105
|
+
issues_data = await response.json()
|
|
106
|
+
|
|
107
|
+
# Filter out pull requests (GitHub API returns PRs as issues)
|
|
108
|
+
total_fetched = len(issues_data)
|
|
109
|
+
issues = [issue for issue in issues_data if "pull_request" not in issue]
|
|
110
|
+
prs_filtered = total_fetched - len(issues)
|
|
111
|
+
|
|
112
|
+
# Limit to requested number of issues
|
|
113
|
+
issues = issues[:limit]
|
|
114
|
+
|
|
115
|
+
if not issues:
|
|
116
|
+
yield ExecutionResult(
|
|
117
|
+
output_blocks=[
|
|
118
|
+
TextContent(
|
|
119
|
+
data=f"ℹ️ No issues found matching criteria (repo: {repo}, state: {state}, labels: {labels})\n" f"Fetched {total_fetched} items, filtered out {prs_filtered} pull requests.",
|
|
120
|
+
),
|
|
121
|
+
],
|
|
122
|
+
is_streaming=True,
|
|
123
|
+
is_final=True,
|
|
124
|
+
)
|
|
125
|
+
return
|
|
126
|
+
|
|
127
|
+
# Progress update with PR filtering info
|
|
128
|
+
filter_msg = f" (filtered out {prs_filtered} PRs)" if prs_filtered > 0 else ""
|
|
129
|
+
yield ExecutionResult(
|
|
130
|
+
output_blocks=[
|
|
131
|
+
TextContent(data=f"📊 Analyzing {len(issues)} issues{filter_msg}..."),
|
|
132
|
+
],
|
|
133
|
+
is_streaming=True,
|
|
134
|
+
is_final=False,
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
# Analyze issues
|
|
138
|
+
analysis = _analyze_issues(issues, repo)
|
|
139
|
+
|
|
140
|
+
# Format final result
|
|
141
|
+
result_text = _format_analysis(analysis, repo, state, labels)
|
|
142
|
+
|
|
143
|
+
yield ExecutionResult(
|
|
144
|
+
output_blocks=[TextContent(data=result_text)],
|
|
145
|
+
meta_info={
|
|
146
|
+
"total_issues": len(issues),
|
|
147
|
+
"repo": repo,
|
|
148
|
+
"state": state,
|
|
149
|
+
"labels": labels or [],
|
|
150
|
+
"categories": list(analysis["by_category"].keys()),
|
|
151
|
+
},
|
|
152
|
+
is_streaming=True,
|
|
153
|
+
is_final=True,
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
except aiohttp.ClientError as e:
|
|
157
|
+
yield ExecutionResult(
|
|
158
|
+
output_blocks=[
|
|
159
|
+
TextContent(data=f"❌ Network error fetching issues: {str(e)}"),
|
|
160
|
+
],
|
|
161
|
+
is_streaming=True,
|
|
162
|
+
is_final=True,
|
|
163
|
+
)
|
|
164
|
+
except Exception as e:
|
|
165
|
+
yield ExecutionResult(
|
|
166
|
+
output_blocks=[
|
|
167
|
+
TextContent(data=f"❌ Error analyzing issues: {str(e)}"),
|
|
168
|
+
],
|
|
169
|
+
is_streaming=True,
|
|
170
|
+
is_final=True,
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
def _analyze_issues(issues: List[dict], repo: str) -> dict:
|
|
175
|
+
"""Analyze issues and extract insights."""
|
|
176
|
+
analysis = {
|
|
177
|
+
"total": len(issues),
|
|
178
|
+
"by_category": {},
|
|
179
|
+
"by_label": {},
|
|
180
|
+
"top_engaged": [],
|
|
181
|
+
"recent": [],
|
|
182
|
+
"all_issues": [],
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
# Categorize issues
|
|
186
|
+
for issue in issues:
|
|
187
|
+
# Extract data
|
|
188
|
+
title = issue.get("title", "")
|
|
189
|
+
number = issue.get("number", 0)
|
|
190
|
+
labels = [label["name"] for label in issue.get("labels", [])]
|
|
191
|
+
comments = issue.get("comments", 0)
|
|
192
|
+
reactions = issue.get("reactions", {}).get("total_count", 0)
|
|
193
|
+
created_at = issue.get("created_at", "")
|
|
194
|
+
body = issue.get("body", "")[:500] # Truncate long descriptions
|
|
195
|
+
url = issue.get("html_url", "")
|
|
196
|
+
|
|
197
|
+
# Calculate engagement score
|
|
198
|
+
engagement = comments + (reactions * 2)
|
|
199
|
+
|
|
200
|
+
# Categorize by keywords
|
|
201
|
+
category = _categorize_issue(title, labels, body)
|
|
202
|
+
if category not in analysis["by_category"]:
|
|
203
|
+
analysis["by_category"][category] = []
|
|
204
|
+
analysis["by_category"][category].append(
|
|
205
|
+
{"number": number, "title": title, "engagement": engagement},
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
# Count by label
|
|
209
|
+
for label in labels:
|
|
210
|
+
if label not in analysis["by_label"]:
|
|
211
|
+
analysis["by_label"][label] = 0
|
|
212
|
+
analysis["by_label"][label] += 1
|
|
213
|
+
|
|
214
|
+
# Store issue data
|
|
215
|
+
issue_summary = {
|
|
216
|
+
"number": number,
|
|
217
|
+
"title": title,
|
|
218
|
+
"labels": labels,
|
|
219
|
+
"comments": comments,
|
|
220
|
+
"reactions": reactions,
|
|
221
|
+
"engagement": engagement,
|
|
222
|
+
"created_at": created_at,
|
|
223
|
+
"category": category,
|
|
224
|
+
"url": url,
|
|
225
|
+
"body_preview": body,
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
analysis["all_issues"].append(issue_summary)
|
|
229
|
+
|
|
230
|
+
# Get top engaged issues
|
|
231
|
+
analysis["top_engaged"] = sorted(
|
|
232
|
+
analysis["all_issues"],
|
|
233
|
+
key=lambda x: x["engagement"],
|
|
234
|
+
reverse=True,
|
|
235
|
+
)[:5]
|
|
236
|
+
|
|
237
|
+
# Get most recent issues
|
|
238
|
+
analysis["recent"] = sorted(
|
|
239
|
+
analysis["all_issues"],
|
|
240
|
+
key=lambda x: x["created_at"],
|
|
241
|
+
reverse=True,
|
|
242
|
+
)[:5]
|
|
243
|
+
|
|
244
|
+
return analysis
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
def _categorize_issue(title: str, labels: List[str], body: str) -> str:
|
|
248
|
+
"""Categorize an issue based on title, labels, and body."""
|
|
249
|
+
title_lower = title.lower()
|
|
250
|
+
body_lower = body.lower()
|
|
251
|
+
labels_lower = [label.lower() for label in labels]
|
|
252
|
+
|
|
253
|
+
# Check labels first (most reliable)
|
|
254
|
+
if "bug" in labels_lower:
|
|
255
|
+
return "Bug Fix"
|
|
256
|
+
if "enhancement" in labels_lower or "feature" in labels_lower:
|
|
257
|
+
return "Feature Request"
|
|
258
|
+
if "documentation" in labels_lower or "docs" in labels_lower:
|
|
259
|
+
return "Documentation"
|
|
260
|
+
if "performance" in labels_lower:
|
|
261
|
+
return "Performance"
|
|
262
|
+
if "question" in labels_lower or "help wanted" in labels_lower:
|
|
263
|
+
return "Question/Support"
|
|
264
|
+
|
|
265
|
+
# Check title and body
|
|
266
|
+
if any(word in title_lower or word in body_lower for word in ["add", "support", "implement", "new feature", "feature request"]):
|
|
267
|
+
return "Feature Request"
|
|
268
|
+
if any(word in title_lower or word in body_lower for word in ["bug", "error", "crash", "broken", "fix"]):
|
|
269
|
+
return "Bug Fix"
|
|
270
|
+
if any(word in title_lower or word in body_lower for word in ["doc", "readme"]):
|
|
271
|
+
return "Documentation"
|
|
272
|
+
if any(word in title_lower or word in body_lower for word in ["slow", "performance", "optimize"]):
|
|
273
|
+
return "Performance"
|
|
274
|
+
|
|
275
|
+
return "Other"
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
def _format_analysis(
|
|
279
|
+
analysis: dict,
|
|
280
|
+
repo: str,
|
|
281
|
+
state: str,
|
|
282
|
+
labels: Optional[List[str]],
|
|
283
|
+
) -> str:
|
|
284
|
+
"""Format the analysis into a readable report."""
|
|
285
|
+
lines = []
|
|
286
|
+
|
|
287
|
+
# Header
|
|
288
|
+
lines.append(f"# GitHub Issues Analysis: {repo}")
|
|
289
|
+
lines.append(f"**State**: {state}")
|
|
290
|
+
if labels:
|
|
291
|
+
lines.append(f"**Filters**: {', '.join(labels)}")
|
|
292
|
+
lines.append(f"**Total Issues Analyzed**: {analysis['total']}")
|
|
293
|
+
lines.append("")
|
|
294
|
+
|
|
295
|
+
# Category breakdown
|
|
296
|
+
lines.append("## Issues by Category")
|
|
297
|
+
for category, issues in sorted(
|
|
298
|
+
analysis["by_category"].items(),
|
|
299
|
+
key=lambda x: len(x[1]),
|
|
300
|
+
reverse=True,
|
|
301
|
+
):
|
|
302
|
+
lines.append(f"- **{category}**: {len(issues)} issues")
|
|
303
|
+
lines.append("")
|
|
304
|
+
|
|
305
|
+
# Label breakdown
|
|
306
|
+
if analysis["by_label"]:
|
|
307
|
+
lines.append("## Most Common Labels")
|
|
308
|
+
for label, count in sorted(
|
|
309
|
+
analysis["by_label"].items(),
|
|
310
|
+
key=lambda x: x[1],
|
|
311
|
+
reverse=True,
|
|
312
|
+
)[:10]:
|
|
313
|
+
lines.append(f"- `{label}`: {count} issues")
|
|
314
|
+
lines.append("")
|
|
315
|
+
|
|
316
|
+
# Top engaged issues
|
|
317
|
+
lines.append("## Top 5 Most Engaged Issues")
|
|
318
|
+
lines.append("*(Based on comments + reactions)*")
|
|
319
|
+
lines.append("")
|
|
320
|
+
for i, issue in enumerate(analysis["top_engaged"], 1):
|
|
321
|
+
lines.append(
|
|
322
|
+
f"{i}. **#{issue['number']}**: {issue['title']} " f"({issue['comments']} comments, {issue['reactions']} reactions) " f"[{issue['category']}]",
|
|
323
|
+
)
|
|
324
|
+
lines.append("")
|
|
325
|
+
|
|
326
|
+
# Recent issues
|
|
327
|
+
lines.append("## 5 Most Recent Issues")
|
|
328
|
+
lines.append("")
|
|
329
|
+
for i, issue in enumerate(analysis["recent"], 1):
|
|
330
|
+
labels_str = ", ".join(f"`{label}`" for label in issue["labels"][:3])
|
|
331
|
+
if len(issue["labels"]) > 3:
|
|
332
|
+
labels_str += f" +{len(issue['labels']) - 3} more"
|
|
333
|
+
lines.append(
|
|
334
|
+
f"{i}. **#{issue['number']}**: {issue['title']} " f"[{issue['category']}] {labels_str}",
|
|
335
|
+
)
|
|
336
|
+
lines.append("")
|
|
337
|
+
|
|
338
|
+
# Recommendations
|
|
339
|
+
lines.append("## 💡 Insights & Recommendations")
|
|
340
|
+
lines.append("")
|
|
341
|
+
|
|
342
|
+
# Most requested category
|
|
343
|
+
if analysis["by_category"]:
|
|
344
|
+
top_category = max(analysis["by_category"].items(), key=lambda x: len(x[1]))
|
|
345
|
+
lines.append(
|
|
346
|
+
f"- **Primary User Need**: {top_category[0]} ({len(top_category[1])} requests)",
|
|
347
|
+
)
|
|
348
|
+
|
|
349
|
+
# High engagement issues
|
|
350
|
+
high_engagement = [i for i in analysis["all_issues"] if i["engagement"] > 5]
|
|
351
|
+
if high_engagement:
|
|
352
|
+
lines.append(
|
|
353
|
+
f"- **High Engagement**: {len(high_engagement)} issues with 5+ comments/reactions",
|
|
354
|
+
)
|
|
355
|
+
|
|
356
|
+
# Feature vs bug ratio
|
|
357
|
+
features = len(analysis["by_category"].get("Feature Request", []))
|
|
358
|
+
bugs = len(analysis["by_category"].get("Bug Fix", []))
|
|
359
|
+
if features > 0 or bugs > 0:
|
|
360
|
+
lines.append(
|
|
361
|
+
f"- **Feature vs Bug Ratio**: {features} features / {bugs} bugs",
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
lines.append("")
|
|
365
|
+
lines.append(
|
|
366
|
+
"**Next Steps**: Review high-engagement issues and top category for prioritization.",
|
|
367
|
+
)
|
|
368
|
+
|
|
369
|
+
return "\n".join(lines)
|