ragnarbot-ai 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ragnarbot/__init__.py +6 -0
- ragnarbot/__main__.py +8 -0
- ragnarbot/agent/__init__.py +8 -0
- ragnarbot/agent/context.py +223 -0
- ragnarbot/agent/loop.py +365 -0
- ragnarbot/agent/memory.py +109 -0
- ragnarbot/agent/skills.py +228 -0
- ragnarbot/agent/subagent.py +241 -0
- ragnarbot/agent/tools/__init__.py +6 -0
- ragnarbot/agent/tools/base.py +102 -0
- ragnarbot/agent/tools/cron.py +114 -0
- ragnarbot/agent/tools/filesystem.py +191 -0
- ragnarbot/agent/tools/message.py +86 -0
- ragnarbot/agent/tools/registry.py +73 -0
- ragnarbot/agent/tools/shell.py +141 -0
- ragnarbot/agent/tools/spawn.py +65 -0
- ragnarbot/agent/tools/web.py +163 -0
- ragnarbot/bus/__init__.py +6 -0
- ragnarbot/bus/events.py +37 -0
- ragnarbot/bus/queue.py +81 -0
- ragnarbot/channels/__init__.py +6 -0
- ragnarbot/channels/base.py +121 -0
- ragnarbot/channels/manager.py +129 -0
- ragnarbot/channels/telegram.py +302 -0
- ragnarbot/cli/__init__.py +1 -0
- ragnarbot/cli/commands.py +568 -0
- ragnarbot/config/__init__.py +6 -0
- ragnarbot/config/loader.py +95 -0
- ragnarbot/config/schema.py +114 -0
- ragnarbot/cron/__init__.py +6 -0
- ragnarbot/cron/service.py +346 -0
- ragnarbot/cron/types.py +59 -0
- ragnarbot/heartbeat/__init__.py +5 -0
- ragnarbot/heartbeat/service.py +130 -0
- ragnarbot/providers/__init__.py +6 -0
- ragnarbot/providers/base.py +69 -0
- ragnarbot/providers/litellm_provider.py +135 -0
- ragnarbot/providers/transcription.py +67 -0
- ragnarbot/session/__init__.py +5 -0
- ragnarbot/session/manager.py +202 -0
- ragnarbot/skills/README.md +24 -0
- ragnarbot/skills/cron/SKILL.md +40 -0
- ragnarbot/skills/github/SKILL.md +48 -0
- ragnarbot/skills/skill-creator/SKILL.md +371 -0
- ragnarbot/skills/summarize/SKILL.md +67 -0
- ragnarbot/skills/tmux/SKILL.md +121 -0
- ragnarbot/skills/tmux/scripts/find-sessions.sh +112 -0
- ragnarbot/skills/tmux/scripts/wait-for-text.sh +83 -0
- ragnarbot/skills/weather/SKILL.md +49 -0
- ragnarbot/utils/__init__.py +5 -0
- ragnarbot/utils/helpers.py +91 -0
- ragnarbot_ai-0.1.0.dist-info/METADATA +28 -0
- ragnarbot_ai-0.1.0.dist-info/RECORD +56 -0
- ragnarbot_ai-0.1.0.dist-info/WHEEL +4 -0
- ragnarbot_ai-0.1.0.dist-info/entry_points.txt +2 -0
- ragnarbot_ai-0.1.0.dist-info/licenses/LICENSE +22 -0
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
"""Base LLM provider interface."""
|
|
2
|
+
|
|
3
|
+
from abc import ABC, abstractmethod
|
|
4
|
+
from dataclasses import dataclass, field
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass
|
|
9
|
+
class ToolCallRequest:
|
|
10
|
+
"""A tool call request from the LLM."""
|
|
11
|
+
id: str
|
|
12
|
+
name: str
|
|
13
|
+
arguments: dict[str, Any]
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@dataclass
|
|
17
|
+
class LLMResponse:
|
|
18
|
+
"""Response from an LLM provider."""
|
|
19
|
+
content: str | None
|
|
20
|
+
tool_calls: list[ToolCallRequest] = field(default_factory=list)
|
|
21
|
+
finish_reason: str = "stop"
|
|
22
|
+
usage: dict[str, int] = field(default_factory=dict)
|
|
23
|
+
|
|
24
|
+
@property
|
|
25
|
+
def has_tool_calls(self) -> bool:
|
|
26
|
+
"""Check if response contains tool calls."""
|
|
27
|
+
return len(self.tool_calls) > 0
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class LLMProvider(ABC):
|
|
31
|
+
"""
|
|
32
|
+
Abstract base class for LLM providers.
|
|
33
|
+
|
|
34
|
+
Implementations should handle the specifics of each provider's API
|
|
35
|
+
while maintaining a consistent interface.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
def __init__(self, api_key: str | None = None, api_base: str | None = None):
|
|
39
|
+
self.api_key = api_key
|
|
40
|
+
self.api_base = api_base
|
|
41
|
+
|
|
42
|
+
@abstractmethod
|
|
43
|
+
async def chat(
|
|
44
|
+
self,
|
|
45
|
+
messages: list[dict[str, Any]],
|
|
46
|
+
tools: list[dict[str, Any]] | None = None,
|
|
47
|
+
model: str | None = None,
|
|
48
|
+
max_tokens: int = 4096,
|
|
49
|
+
temperature: float = 0.7,
|
|
50
|
+
) -> LLMResponse:
|
|
51
|
+
"""
|
|
52
|
+
Send a chat completion request.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
messages: List of message dicts with 'role' and 'content'.
|
|
56
|
+
tools: Optional list of tool definitions.
|
|
57
|
+
model: Model identifier (provider-specific).
|
|
58
|
+
max_tokens: Maximum tokens in response.
|
|
59
|
+
temperature: Sampling temperature.
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
LLMResponse with content and/or tool calls.
|
|
63
|
+
"""
|
|
64
|
+
pass
|
|
65
|
+
|
|
66
|
+
@abstractmethod
|
|
67
|
+
def get_default_model(self) -> str:
|
|
68
|
+
"""Get the default model for this provider."""
|
|
69
|
+
pass
|
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
"""LiteLLM provider implementation for multi-provider support."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
import litellm
|
|
7
|
+
from litellm import acompletion
|
|
8
|
+
|
|
9
|
+
from ragnarbot.providers.base import LLMProvider, LLMResponse, ToolCallRequest
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class LiteLLMProvider(LLMProvider):
|
|
13
|
+
"""
|
|
14
|
+
LLM provider using LiteLLM for multi-provider support.
|
|
15
|
+
|
|
16
|
+
Supports Anthropic, OpenAI, and Gemini through a unified interface.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
api_key: str | None = None,
|
|
22
|
+
api_base: str | None = None,
|
|
23
|
+
default_model: str = "anthropic/claude-opus-4-5"
|
|
24
|
+
):
|
|
25
|
+
super().__init__(api_key, api_base)
|
|
26
|
+
self.default_model = default_model
|
|
27
|
+
|
|
28
|
+
# Configure LiteLLM based on provider
|
|
29
|
+
if api_key:
|
|
30
|
+
if "anthropic" in default_model:
|
|
31
|
+
os.environ.setdefault("ANTHROPIC_API_KEY", api_key)
|
|
32
|
+
elif "openai" in default_model or "gpt" in default_model:
|
|
33
|
+
os.environ.setdefault("OPENAI_API_KEY", api_key)
|
|
34
|
+
elif "gemini" in default_model.lower():
|
|
35
|
+
os.environ.setdefault("GEMINI_API_KEY", api_key)
|
|
36
|
+
|
|
37
|
+
if api_base:
|
|
38
|
+
litellm.api_base = api_base
|
|
39
|
+
|
|
40
|
+
# Disable LiteLLM logging noise
|
|
41
|
+
litellm.suppress_debug_info = True
|
|
42
|
+
|
|
43
|
+
async def chat(
|
|
44
|
+
self,
|
|
45
|
+
messages: list[dict[str, Any]],
|
|
46
|
+
tools: list[dict[str, Any]] | None = None,
|
|
47
|
+
model: str | None = None,
|
|
48
|
+
max_tokens: int = 4096,
|
|
49
|
+
temperature: float = 0.7,
|
|
50
|
+
) -> LLMResponse:
|
|
51
|
+
"""
|
|
52
|
+
Send a chat completion request via LiteLLM.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
messages: List of message dicts with 'role' and 'content'.
|
|
56
|
+
tools: Optional list of tool definitions in OpenAI format.
|
|
57
|
+
model: Model identifier (e.g., 'anthropic/claude-sonnet-4-5').
|
|
58
|
+
max_tokens: Maximum tokens in response.
|
|
59
|
+
temperature: Sampling temperature.
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
LLMResponse with content and/or tool calls.
|
|
63
|
+
"""
|
|
64
|
+
model = model or self.default_model
|
|
65
|
+
|
|
66
|
+
# For Gemini, ensure gemini/ prefix if not already present
|
|
67
|
+
if "gemini" in model.lower() and not model.startswith("gemini/"):
|
|
68
|
+
model = f"gemini/{model}"
|
|
69
|
+
|
|
70
|
+
kwargs: dict[str, Any] = {
|
|
71
|
+
"model": model,
|
|
72
|
+
"messages": messages,
|
|
73
|
+
"max_tokens": max_tokens,
|
|
74
|
+
"temperature": temperature,
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
# Pass api_base directly for custom endpoints (vLLM, etc.)
|
|
78
|
+
if self.api_base:
|
|
79
|
+
kwargs["api_base"] = self.api_base
|
|
80
|
+
|
|
81
|
+
if tools:
|
|
82
|
+
kwargs["tools"] = tools
|
|
83
|
+
kwargs["tool_choice"] = "auto"
|
|
84
|
+
|
|
85
|
+
try:
|
|
86
|
+
response = await acompletion(**kwargs)
|
|
87
|
+
return self._parse_response(response)
|
|
88
|
+
except Exception as e:
|
|
89
|
+
# Return error as content for graceful handling
|
|
90
|
+
return LLMResponse(
|
|
91
|
+
content=f"Error calling LLM: {str(e)}",
|
|
92
|
+
finish_reason="error",
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
def _parse_response(self, response: Any) -> LLMResponse:
|
|
96
|
+
"""Parse LiteLLM response into our standard format."""
|
|
97
|
+
choice = response.choices[0]
|
|
98
|
+
message = choice.message
|
|
99
|
+
|
|
100
|
+
tool_calls = []
|
|
101
|
+
if hasattr(message, "tool_calls") and message.tool_calls:
|
|
102
|
+
for tc in message.tool_calls:
|
|
103
|
+
# Parse arguments from JSON string if needed
|
|
104
|
+
args = tc.function.arguments
|
|
105
|
+
if isinstance(args, str):
|
|
106
|
+
import json
|
|
107
|
+
try:
|
|
108
|
+
args = json.loads(args)
|
|
109
|
+
except json.JSONDecodeError:
|
|
110
|
+
args = {"raw": args}
|
|
111
|
+
|
|
112
|
+
tool_calls.append(ToolCallRequest(
|
|
113
|
+
id=tc.id,
|
|
114
|
+
name=tc.function.name,
|
|
115
|
+
arguments=args,
|
|
116
|
+
))
|
|
117
|
+
|
|
118
|
+
usage = {}
|
|
119
|
+
if hasattr(response, "usage") and response.usage:
|
|
120
|
+
usage = {
|
|
121
|
+
"prompt_tokens": response.usage.prompt_tokens,
|
|
122
|
+
"completion_tokens": response.usage.completion_tokens,
|
|
123
|
+
"total_tokens": response.usage.total_tokens,
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
return LLMResponse(
|
|
127
|
+
content=message.content,
|
|
128
|
+
tool_calls=tool_calls,
|
|
129
|
+
finish_reason=choice.finish_reason or "stop",
|
|
130
|
+
usage=usage,
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
def get_default_model(self) -> str:
|
|
134
|
+
"""Get the default model."""
|
|
135
|
+
return self.default_model
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
"""Standalone voice transcription tool using Groq Whisper."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
import httpx
|
|
8
|
+
from loguru import logger
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class GroqTranscriptionProvider:
|
|
12
|
+
"""
|
|
13
|
+
Standalone voice transcription tool using Groq's Whisper API.
|
|
14
|
+
|
|
15
|
+
This is not an LLM provider — it provides speech-to-text transcription
|
|
16
|
+
for voice messages. Groq offers extremely fast transcription with a
|
|
17
|
+
generous free tier.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
def __init__(self, api_key: str | None = None):
|
|
21
|
+
self.api_key = api_key or os.environ.get("GROQ_API_KEY")
|
|
22
|
+
self.api_url = "https://api.groq.com/openai/v1/audio/transcriptions"
|
|
23
|
+
|
|
24
|
+
async def transcribe(self, file_path: str | Path) -> str:
|
|
25
|
+
"""
|
|
26
|
+
Transcribe an audio file using Groq.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
file_path: Path to the audio file.
|
|
30
|
+
|
|
31
|
+
Returns:
|
|
32
|
+
Transcribed text.
|
|
33
|
+
"""
|
|
34
|
+
if not self.api_key:
|
|
35
|
+
logger.warning("Groq API key not configured for transcription")
|
|
36
|
+
return ""
|
|
37
|
+
|
|
38
|
+
path = Path(file_path)
|
|
39
|
+
if not path.exists():
|
|
40
|
+
logger.error(f"Audio file not found: {file_path}")
|
|
41
|
+
return ""
|
|
42
|
+
|
|
43
|
+
try:
|
|
44
|
+
async with httpx.AsyncClient() as client:
|
|
45
|
+
with open(path, "rb") as f:
|
|
46
|
+
files = {
|
|
47
|
+
"file": (path.name, f),
|
|
48
|
+
"model": (None, "whisper-large-v3"),
|
|
49
|
+
}
|
|
50
|
+
headers = {
|
|
51
|
+
"Authorization": f"Bearer {self.api_key}",
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
response = await client.post(
|
|
55
|
+
self.api_url,
|
|
56
|
+
headers=headers,
|
|
57
|
+
files=files,
|
|
58
|
+
timeout=60.0
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
response.raise_for_status()
|
|
62
|
+
data = response.json()
|
|
63
|
+
return data.get("text", "")
|
|
64
|
+
|
|
65
|
+
except Exception as e:
|
|
66
|
+
logger.error(f"Groq transcription error: {e}")
|
|
67
|
+
return ""
|
|
@@ -0,0 +1,202 @@
|
|
|
1
|
+
"""Session management for conversation history."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from datetime import datetime
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
from loguru import logger
|
|
10
|
+
|
|
11
|
+
from ragnarbot.utils.helpers import ensure_dir, safe_filename
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass
|
|
15
|
+
class Session:
|
|
16
|
+
"""
|
|
17
|
+
A conversation session.
|
|
18
|
+
|
|
19
|
+
Stores messages in JSONL format for easy reading and persistence.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
key: str # channel:chat_id
|
|
23
|
+
messages: list[dict[str, Any]] = field(default_factory=list)
|
|
24
|
+
created_at: datetime = field(default_factory=datetime.now)
|
|
25
|
+
updated_at: datetime = field(default_factory=datetime.now)
|
|
26
|
+
metadata: dict[str, Any] = field(default_factory=dict)
|
|
27
|
+
|
|
28
|
+
def add_message(self, role: str, content: str, **kwargs: Any) -> None:
|
|
29
|
+
"""Add a message to the session."""
|
|
30
|
+
msg = {
|
|
31
|
+
"role": role,
|
|
32
|
+
"content": content,
|
|
33
|
+
"timestamp": datetime.now().isoformat(),
|
|
34
|
+
**kwargs
|
|
35
|
+
}
|
|
36
|
+
self.messages.append(msg)
|
|
37
|
+
self.updated_at = datetime.now()
|
|
38
|
+
|
|
39
|
+
def get_history(self, max_messages: int = 50) -> list[dict[str, Any]]:
|
|
40
|
+
"""
|
|
41
|
+
Get message history for LLM context.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
max_messages: Maximum messages to return.
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
List of messages in LLM format.
|
|
48
|
+
"""
|
|
49
|
+
# Get recent messages
|
|
50
|
+
recent = self.messages[-max_messages:] if len(self.messages) > max_messages else self.messages
|
|
51
|
+
|
|
52
|
+
# Convert to LLM format (just role and content)
|
|
53
|
+
return [{"role": m["role"], "content": m["content"]} for m in recent]
|
|
54
|
+
|
|
55
|
+
def clear(self) -> None:
|
|
56
|
+
"""Clear all messages in the session."""
|
|
57
|
+
self.messages = []
|
|
58
|
+
self.updated_at = datetime.now()
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class SessionManager:
|
|
62
|
+
"""
|
|
63
|
+
Manages conversation sessions.
|
|
64
|
+
|
|
65
|
+
Sessions are stored as JSONL files in the sessions directory.
|
|
66
|
+
"""
|
|
67
|
+
|
|
68
|
+
def __init__(self, workspace: Path):
|
|
69
|
+
self.workspace = workspace
|
|
70
|
+
self.sessions_dir = ensure_dir(Path.home() / ".ragnarbot" / "sessions")
|
|
71
|
+
self._cache: dict[str, Session] = {}
|
|
72
|
+
|
|
73
|
+
def _get_session_path(self, key: str) -> Path:
|
|
74
|
+
"""Get the file path for a session."""
|
|
75
|
+
safe_key = safe_filename(key.replace(":", "_"))
|
|
76
|
+
return self.sessions_dir / f"{safe_key}.jsonl"
|
|
77
|
+
|
|
78
|
+
def get_or_create(self, key: str) -> Session:
|
|
79
|
+
"""
|
|
80
|
+
Get an existing session or create a new one.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
key: Session key (usually channel:chat_id).
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
The session.
|
|
87
|
+
"""
|
|
88
|
+
# Check cache
|
|
89
|
+
if key in self._cache:
|
|
90
|
+
return self._cache[key]
|
|
91
|
+
|
|
92
|
+
# Try to load from disk
|
|
93
|
+
session = self._load(key)
|
|
94
|
+
if session is None:
|
|
95
|
+
session = Session(key=key)
|
|
96
|
+
|
|
97
|
+
self._cache[key] = session
|
|
98
|
+
return session
|
|
99
|
+
|
|
100
|
+
def _load(self, key: str) -> Session | None:
|
|
101
|
+
"""Load a session from disk."""
|
|
102
|
+
path = self._get_session_path(key)
|
|
103
|
+
|
|
104
|
+
if not path.exists():
|
|
105
|
+
return None
|
|
106
|
+
|
|
107
|
+
try:
|
|
108
|
+
messages = []
|
|
109
|
+
metadata = {}
|
|
110
|
+
created_at = None
|
|
111
|
+
|
|
112
|
+
with open(path) as f:
|
|
113
|
+
for line in f:
|
|
114
|
+
line = line.strip()
|
|
115
|
+
if not line:
|
|
116
|
+
continue
|
|
117
|
+
|
|
118
|
+
data = json.loads(line)
|
|
119
|
+
|
|
120
|
+
if data.get("_type") == "metadata":
|
|
121
|
+
metadata = data.get("metadata", {})
|
|
122
|
+
created_at = datetime.fromisoformat(data["created_at"]) if data.get("created_at") else None
|
|
123
|
+
else:
|
|
124
|
+
messages.append(data)
|
|
125
|
+
|
|
126
|
+
return Session(
|
|
127
|
+
key=key,
|
|
128
|
+
messages=messages,
|
|
129
|
+
created_at=created_at or datetime.now(),
|
|
130
|
+
metadata=metadata
|
|
131
|
+
)
|
|
132
|
+
except Exception as e:
|
|
133
|
+
logger.warning(f"Failed to load session {key}: {e}")
|
|
134
|
+
return None
|
|
135
|
+
|
|
136
|
+
def save(self, session: Session) -> None:
|
|
137
|
+
"""Save a session to disk."""
|
|
138
|
+
path = self._get_session_path(session.key)
|
|
139
|
+
|
|
140
|
+
with open(path, "w") as f:
|
|
141
|
+
# Write metadata first
|
|
142
|
+
metadata_line = {
|
|
143
|
+
"_type": "metadata",
|
|
144
|
+
"created_at": session.created_at.isoformat(),
|
|
145
|
+
"updated_at": session.updated_at.isoformat(),
|
|
146
|
+
"metadata": session.metadata
|
|
147
|
+
}
|
|
148
|
+
f.write(json.dumps(metadata_line) + "\n")
|
|
149
|
+
|
|
150
|
+
# Write messages
|
|
151
|
+
for msg in session.messages:
|
|
152
|
+
f.write(json.dumps(msg) + "\n")
|
|
153
|
+
|
|
154
|
+
self._cache[session.key] = session
|
|
155
|
+
|
|
156
|
+
def delete(self, key: str) -> bool:
|
|
157
|
+
"""
|
|
158
|
+
Delete a session.
|
|
159
|
+
|
|
160
|
+
Args:
|
|
161
|
+
key: Session key.
|
|
162
|
+
|
|
163
|
+
Returns:
|
|
164
|
+
True if deleted, False if not found.
|
|
165
|
+
"""
|
|
166
|
+
# Remove from cache
|
|
167
|
+
self._cache.pop(key, None)
|
|
168
|
+
|
|
169
|
+
# Remove file
|
|
170
|
+
path = self._get_session_path(key)
|
|
171
|
+
if path.exists():
|
|
172
|
+
path.unlink()
|
|
173
|
+
return True
|
|
174
|
+
return False
|
|
175
|
+
|
|
176
|
+
def list_sessions(self) -> list[dict[str, Any]]:
|
|
177
|
+
"""
|
|
178
|
+
List all sessions.
|
|
179
|
+
|
|
180
|
+
Returns:
|
|
181
|
+
List of session info dicts.
|
|
182
|
+
"""
|
|
183
|
+
sessions = []
|
|
184
|
+
|
|
185
|
+
for path in self.sessions_dir.glob("*.jsonl"):
|
|
186
|
+
try:
|
|
187
|
+
# Read just the metadata line
|
|
188
|
+
with open(path) as f:
|
|
189
|
+
first_line = f.readline().strip()
|
|
190
|
+
if first_line:
|
|
191
|
+
data = json.loads(first_line)
|
|
192
|
+
if data.get("_type") == "metadata":
|
|
193
|
+
sessions.append({
|
|
194
|
+
"key": path.stem.replace("_", ":"),
|
|
195
|
+
"created_at": data.get("created_at"),
|
|
196
|
+
"updated_at": data.get("updated_at"),
|
|
197
|
+
"path": str(path)
|
|
198
|
+
})
|
|
199
|
+
except Exception:
|
|
200
|
+
continue
|
|
201
|
+
|
|
202
|
+
return sorted(sessions, key=lambda x: x.get("updated_at", ""), reverse=True)
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
# ragnarbot Skills
|
|
2
|
+
|
|
3
|
+
This directory contains built-in skills that extend ragnarbot's capabilities.
|
|
4
|
+
|
|
5
|
+
## Skill Format
|
|
6
|
+
|
|
7
|
+
Each skill is a directory containing a `SKILL.md` file with:
|
|
8
|
+
- YAML frontmatter (name, description, metadata)
|
|
9
|
+
- Markdown instructions for the agent
|
|
10
|
+
|
|
11
|
+
## Attribution
|
|
12
|
+
|
|
13
|
+
These skills are adapted from [OpenClaw](https://github.com/openclaw/openclaw)'s skill system.
|
|
14
|
+
The skill format and metadata structure follow OpenClaw's conventions to maintain compatibility.
|
|
15
|
+
|
|
16
|
+
## Available Skills
|
|
17
|
+
|
|
18
|
+
| Skill | Description |
|
|
19
|
+
|-------|-------------|
|
|
20
|
+
| `github` | Interact with GitHub using the `gh` CLI |
|
|
21
|
+
| `weather` | Get weather info using wttr.in and Open-Meteo |
|
|
22
|
+
| `summarize` | Summarize URLs, files, and YouTube videos |
|
|
23
|
+
| `tmux` | Remote-control tmux sessions |
|
|
24
|
+
| `skill-creator` | Create new skills |
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: cron
|
|
3
|
+
description: Schedule reminders and recurring tasks.
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Cron
|
|
7
|
+
|
|
8
|
+
Use the `cron` tool to schedule reminders or recurring tasks.
|
|
9
|
+
|
|
10
|
+
## Two Modes
|
|
11
|
+
|
|
12
|
+
1. **Reminder** - message is sent directly to user
|
|
13
|
+
2. **Task** - message is a task description, agent executes and sends result
|
|
14
|
+
|
|
15
|
+
## Examples
|
|
16
|
+
|
|
17
|
+
Fixed reminder:
|
|
18
|
+
```
|
|
19
|
+
cron(action="add", message="Time to take a break!", every_seconds=1200)
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
Dynamic task (agent executes each time):
|
|
23
|
+
```
|
|
24
|
+
cron(action="add", message="Check HKUDS/ragnarbot GitHub stars and report", every_seconds=600)
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
List/remove:
|
|
28
|
+
```
|
|
29
|
+
cron(action="list")
|
|
30
|
+
cron(action="remove", job_id="abc123")
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
## Time Expressions
|
|
34
|
+
|
|
35
|
+
| User says | Parameters |
|
|
36
|
+
|-----------|------------|
|
|
37
|
+
| every 20 minutes | every_seconds: 1200 |
|
|
38
|
+
| every hour | every_seconds: 3600 |
|
|
39
|
+
| every day at 8am | cron_expr: "0 8 * * *" |
|
|
40
|
+
| weekdays at 5pm | cron_expr: "0 17 * * 1-5" |
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: github
|
|
3
|
+
description: "Interact with GitHub using the `gh` CLI. Use `gh issue`, `gh pr`, `gh run`, and `gh api` for issues, PRs, CI runs, and advanced queries."
|
|
4
|
+
metadata: {"ragnarbot":{"emoji":"🐙","requires":{"bins":["gh"]},"install":[{"id":"brew","kind":"brew","formula":"gh","bins":["gh"],"label":"Install GitHub CLI (brew)"},{"id":"apt","kind":"apt","package":"gh","bins":["gh"],"label":"Install GitHub CLI (apt)"}]}}
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
# GitHub Skill
|
|
8
|
+
|
|
9
|
+
Use the `gh` CLI to interact with GitHub. Always specify `--repo owner/repo` when not in a git directory, or use URLs directly.
|
|
10
|
+
|
|
11
|
+
## Pull Requests
|
|
12
|
+
|
|
13
|
+
Check CI status on a PR:
|
|
14
|
+
```bash
|
|
15
|
+
gh pr checks 55 --repo owner/repo
|
|
16
|
+
```
|
|
17
|
+
|
|
18
|
+
List recent workflow runs:
|
|
19
|
+
```bash
|
|
20
|
+
gh run list --repo owner/repo --limit 10
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
View a run and see which steps failed:
|
|
24
|
+
```bash
|
|
25
|
+
gh run view <run-id> --repo owner/repo
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
View logs for failed steps only:
|
|
29
|
+
```bash
|
|
30
|
+
gh run view <run-id> --repo owner/repo --log-failed
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
## API for Advanced Queries
|
|
34
|
+
|
|
35
|
+
The `gh api` command is useful for accessing data not available through other subcommands.
|
|
36
|
+
|
|
37
|
+
Get PR with specific fields:
|
|
38
|
+
```bash
|
|
39
|
+
gh api repos/owner/repo/pulls/55 --jq '.title, .state, .user.login'
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
## JSON Output
|
|
43
|
+
|
|
44
|
+
Most commands support `--json` for structured output. You can use `--jq` to filter:
|
|
45
|
+
|
|
46
|
+
```bash
|
|
47
|
+
gh issue list --repo owner/repo --json number,title --jq '.[] | "\(.number): \(.title)"'
|
|
48
|
+
```
|