intentkit 0.6.7.dev7__py3-none-any.whl → 0.6.7.dev9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of intentkit might be problematic. Click here for more details.

@@ -18,22 +18,6 @@
18
18
  "states": {
19
19
  "type": "object",
20
20
  "properties": {
21
- "get_mentions": {
22
- "type": "string",
23
- "title": "Get Mentions",
24
- "enum": [
25
- "disabled",
26
- "public",
27
- "private"
28
- ],
29
- "x-enum-title": [
30
- "Disabled",
31
- "Agent Owner + All Users",
32
- "Agent Owner Only"
33
- ],
34
- "description": "This tool uses the Elfa AI API to query hourly-updated tweets from \"smart accounts\" \u2013 accounts identified as influential or relevant \u2013 that have received at least 10 interactions (comments, retweets, quote tweets).",
35
- "default": "disabled"
36
- },
37
21
  "get_top_mentions": {
38
22
  "type": "string",
39
23
  "title": "Get Top Mentions",
@@ -63,7 +47,7 @@
63
47
  "Agent Owner + All Users",
64
48
  "Agent Owner Only"
65
49
  ],
66
- "description": "This tool uses the Elfa API to search tweets mentioning up to five keywords. It can search within the past 30 days of data, which is updated every 5 minutes, or access up to six months of historical tweet data.",
50
+ "description": "This tool uses the Elfa API to search tweets mentioning up to five keywords or from specific accounts. It can search within the past 30 days of data, which is updated every 5 minutes, or access up to six months of historical tweet data.",
67
51
  "default": "private"
68
52
  },
69
53
  "get_trending_tokens": {
@@ -1,118 +1,85 @@
1
- from typing import Type
1
+ """Smart stats skill for Elfa AI API."""
2
+
3
+ from typing import Any, Dict, Optional, Type
2
4
 
3
- import httpx
4
- from langchain.tools.base import ToolException
5
5
  from langchain_core.runnables import RunnableConfig
6
6
  from pydantic import BaseModel, Field
7
7
 
8
- from .base import ElfaBaseTool, base_url
8
+ from .base import ElfaBaseTool
9
+ from .utils import SmartStatsData, make_elfa_request
9
10
 
10
11
 
11
12
  class ElfaGetSmartStatsInput(BaseModel):
12
- username: str = Field(description="username to get stats for")
13
-
13
+ """Input parameters for smart stats."""
14
14
 
15
- class SmartStatsData(BaseModel):
16
- followerEngagementRatio: float | None = Field(
17
- description="the ratio of engagement by followers"
18
- )
19
- averageEngagement: float | None = Field(
20
- description="the average engagement of acount"
21
- )
22
- smartFollowingCount: float | None = Field(
23
- description="the count of smart followings"
24
- )
15
+ username: str = Field(description="Account username to get stats for")
25
16
 
26
17
 
27
18
  class ElfaGetSmartStatsOutput(BaseModel):
19
+ """Output structure for smart stats response."""
20
+
28
21
  success: bool
29
- data: SmartStatsData | None = Field(None, description="The stats data")
22
+ data: Optional[SmartStatsData] = Field(None, description="Smart stats data")
23
+ metadata: Optional[Dict[str, Any]] = Field(None, description="Response metadata")
30
24
 
31
25
 
32
26
  class ElfaGetSmartStats(ElfaBaseTool):
33
27
  """
34
- This tool uses the Elfa API to retrieve key social media metrics for a given username. These metrics include:
35
-
36
- * **Smart Following Count:** A metric representing the number of high-quality or influential followers.
37
- * **Engagement Score:** A composite score reflecting the level of interaction with the user's content.
38
- * **Engagement Ratio:** The ratio of engagement (likes, comments, shares) to the number of followers.
39
-
40
- This tool is useful for:
41
-
42
- * **Competitor Analysis:** Compare your social media performance to that of your competitors.
43
- * **Influencer Identification:** Identify influential users in your niche.
44
- * **Social Media Audits:** Assess the overall health and effectiveness of a social media presence.
45
-
46
- To use this tool, simply provide the desired username. The tool will return the requested metrics.
47
-
48
- Attributes:
49
- name (str): Name of the tool, specifically "elfa_get_smart_stats".
50
- description (str): Comprehensive description of the tool's purpose and functionality.
51
- args_schema (Type[BaseModel]): Schema for input arguments, specifying expected parameters.
28
+ Get smart stats for a specific username.
29
+
30
+ This tool uses the Elfa API to retrieve key social media metrics for a given username including:
31
+ - Smart Following Count: Number of high-quality or influential followers
32
+ - Average Engagement: Composite score reflecting interaction with user's content
33
+ - Average Reach: Average reach of the user's content
34
+ - Smart Follower Count: Number of smart followers
35
+ - Follower Count: Total follower count
36
+
37
+ Use Cases:
38
+ - Competitor analysis: Compare social media performance to competitors
39
+ - Influencer identification: Identify influential users in your niche
40
+ - Social media audits: Assess overall health and effectiveness of social media presence
52
41
  """
53
42
 
54
43
  name: str = "elfa_get_smart_stats"
55
- description: str = """This tool uses the Elfa API to retrieve key social media metrics for a given username. These metrics include:
56
-
57
- * **Smart Following Count:** A metric representing the number of high-quality or influential followers.
58
- * **Engagement Score:** A composite score reflecting the level of interaction with the user's content.
59
- * **Engagement Ratio:** The ratio of engagement (likes, comments, shares) to the number of followers.
60
-
61
- This tool is useful for:
62
-
63
- * **Competitor Analysis:** Compare your social media performance to that of your competitors.
64
- * **Influencer Identification:** Identify influential users in your niche.
65
- * **Social Media Audits:** Assess the overall health and effectiveness of a social media presence.
66
- """
44
+ description: str = """Get comprehensive social media metrics for a username including smart following count,
45
+ engagement scores, and follower analytics. Use this for competitor analysis, influencer identification,
46
+ and social media performance audits."""
67
47
  args_schema: Type[BaseModel] = ElfaGetSmartStatsInput
68
48
 
69
49
  async def _arun(
70
- self, username: str, config: RunnableConfig, **kwargs
50
+ self, username: str, config: RunnableConfig = None, **kwargs
71
51
  ) -> ElfaGetSmartStatsOutput:
72
- """Run the tool retrieve smart stats (smart following count) and social metrics (engagement score and ratio) for a given username.
52
+ """
53
+ Execute the smart stats request.
73
54
 
74
55
  Args:
75
- username (str): The username to check stats for.
76
- config: The configuration for the runnable, containing agent context.
77
- **kwargs: Additional parameters.
56
+ username: The username to check stats for
57
+ config: LangChain runnable configuration
58
+ **kwargs: Additional parameters
78
59
 
79
60
  Returns:
80
- ElfaGetSmartStatsOutput: A structured output containing output of Elfa get mentions API.
61
+ ElfaGetSmartStatsOutput: Structured response with smart stats
81
62
 
82
63
  Raises:
83
- Exception: If there's an error accessing the Elfa API.
64
+ ValueError: If API key is not found
65
+ ToolException: If there's an error with the API request
84
66
  """
85
67
  context = self.context_from_config(config)
86
68
  api_key = self.get_api_key(context)
87
- if not api_key:
88
- raise ValueError("Elfa API key not found")
89
-
90
- url = f"{base_url}/v1/account/smart-stats"
91
- headers = {
92
- "accept": "application/json",
93
- "x-elfa-api-key": api_key,
94
- }
95
-
96
- params = ElfaGetSmartStatsInput(username=username).model_dump(exclude_none=True)
97
-
98
- async with httpx.AsyncClient() as client:
99
- try:
100
- response = await client.get(
101
- url, headers=headers, timeout=30, params=params
102
- )
103
- response.raise_for_status()
104
- json_dict = response.json()
105
-
106
- res = ElfaGetSmartStatsOutput(**json_dict)
107
-
108
- return res
109
- except httpx.RequestError as req_err:
110
- raise ToolException(
111
- f"request error from Elfa API: {req_err}"
112
- ) from req_err
113
- except httpx.HTTPStatusError as http_err:
114
- raise ToolException(
115
- f"http error from Elfa API: {http_err}"
116
- ) from http_err
117
- except Exception as e:
118
- raise ToolException(f"error from Elfa API: {e}") from e
69
+
70
+ # Prepare parameters according to API spec
71
+ params = {"username": username}
72
+
73
+ # Make API request using shared utility
74
+ response = await make_elfa_request(
75
+ endpoint="account/smart-stats", api_key=api_key, params=params
76
+ )
77
+
78
+ # Parse response data into SmartStatsData object
79
+ stats_data = None
80
+ if response.data and isinstance(response.data, dict):
81
+ stats_data = SmartStatsData(**response.data)
82
+
83
+ return ElfaGetSmartStatsOutput(
84
+ success=response.success, data=stats_data, metadata=response.metadata
85
+ )
@@ -1,126 +1,122 @@
1
- from typing import Type
1
+ """Trending tokens skill for Elfa AI API."""
2
+
3
+ from typing import Any, Dict, List, Optional, Type
2
4
 
3
- import httpx
4
- from langchain.tools.base import ToolException
5
5
  from langchain_core.runnables import RunnableConfig
6
6
  from pydantic import BaseModel, Field
7
7
 
8
- from .base import ElfaBaseTool, base_url
8
+ from .base import ElfaBaseTool
9
+ from .utils import make_elfa_request
9
10
 
10
11
 
11
12
  class ElfaGetTrendingTokensInput(BaseModel):
12
- timeWindow: str | None = Field(
13
- "24h", description="Time window for trending analysis"
13
+ """Input parameters for trending tokens."""
14
+
15
+ timeWindow: Optional[str] = Field(
16
+ "7d",
17
+ description="Time window for trending analysis (e.g., '30m', '1h', '4h', '24h', '7d', '30d')",
14
18
  )
15
- minMentions: int | None = Field(
19
+ page: Optional[int] = Field(1, description="Page number for pagination")
20
+ pageSize: Optional[int] = Field(50, description="Number of items per page")
21
+ minMentions: Optional[int] = Field(
16
22
  5, description="Minimum number of mentions required"
17
23
  )
18
24
 
19
25
 
20
- class Trends(BaseModel):
21
- change_percent: int | None = Field(description="change percentage of token trend")
22
- previous_count: int | None = Field(description="previous count")
23
- current_count: int | None = Field(description="current count")
24
- token: str | None = Field(description="token")
26
+ class TrendingToken(BaseModel):
27
+ """Individual trending token data."""
25
28
 
26
-
27
- class TrendsData(BaseModel):
28
- data: list[Trends] | None = Field(None, description="trending tokens")
29
+ token: Optional[str] = Field(None, description="Token symbol")
30
+ current_count: Optional[int] = Field(None, description="Current mention count")
31
+ previous_count: Optional[int] = Field(None, description="Previous mention count")
32
+ change_percent: Optional[float] = Field(None, description="Change percentage")
29
33
 
30
34
 
31
35
  class ElfaGetTrendingTokensOutput(BaseModel):
36
+ """Output structure for trending tokens response."""
37
+
32
38
  success: bool
33
- data: TrendsData | None = Field(None, description="The result")
39
+ data: Optional[List[TrendingToken]] = Field(
40
+ None, description="List of trending tokens"
41
+ )
42
+ metadata: Optional[Dict[str, Any]] = Field(None, description="Response metadata")
34
43
 
35
44
 
36
45
  class ElfaGetTrendingTokens(ElfaBaseTool):
37
46
  """
38
- This tool ranks the most discussed tokens based on smart mentions count for a given period, with updates every 5 minutes via the Elfa API. Smart mentions provide a more sophisticated measure of discussion volume than simple keyword counts.
39
-
40
- **Use Cases:**
41
-
42
- * Identify trending tokens: Quickly see which tokens are gaining traction in online discussions.
43
- * Gauge market sentiment: Track changes in smart mention counts to understand shifts in market opinion.
44
- * Research potential investments: Use the ranking as a starting point for further due diligence.
47
+ Get trending tokens based on smart mentions count.
45
48
 
46
- **Example Usage:**
49
+ This tool ranks the most discussed tokens based on smart mentions count for a given period,
50
+ with updates every 5 minutes via the Elfa API. Smart mentions provide a more sophisticated
51
+ measure of discussion volume than simple keyword counts.
47
52
 
48
- To use this tool, you would typically specify a time window (e.g., the last hour, the last 24 hours). The tool will then return a ranked list of tokens, along with their corresponding smart mention counts.
49
-
50
- Attributes:
51
- name (str): Name of the tool, specifically "elfa_get_trending_tokens".
52
- description (str): Comprehensive description of the tool's purpose and functionality.
53
- args_schema (Type[BaseModel]): Schema for input arguments, specifying expected parameters.
53
+ Use Cases:
54
+ - Identify trending tokens: Quickly see which tokens are gaining traction in online discussions
55
+ - Gauge market sentiment: Track changes in smart mention counts to understand shifts in market opinion
56
+ - Research potential investments: Use the ranking as a starting point for further due diligence
54
57
  """
55
58
 
56
59
  name: str = "elfa_get_trending_tokens"
57
- description: str = """This tool ranks the most discussed tokens based on smart mentions count for a given period, with updates every 5 minutes via the Elfa API. Smart mentions provide a more sophisticated measure of discussion volume than simple keyword counts.
58
-
59
- **Use Cases:**
60
-
61
- * Identify trending tokens: Quickly see which tokens are gaining traction in online discussions.
62
- * Gauge market sentiment: Track changes in smart mention counts to understand shifts in market opinion.
63
- * Research potential investments: Use the ranking as a starting point for further due diligence.
64
-
65
- **Example Usage:**
66
-
67
- To use this tool, you would typically specify a time window (e.g., the last hour, the last 24 hours). The tool will then return a ranked list of tokens, along with their corresponding smart mention counts."""
60
+ description: str = """Get trending tokens ranked by smart mentions count for a given time period.
61
+ Updated every 5 minutes. Smart mentions provide sophisticated discussion volume measurement beyond simple keyword counts.
62
+
63
+ Use this to identify tokens gaining traction, gauge market sentiment, and research potential investments."""
68
64
  args_schema: Type[BaseModel] = ElfaGetTrendingTokensInput
69
65
 
70
66
  async def _arun(
71
67
  self,
72
- timeWindow: str = "24h",
68
+ timeWindow: str = "7d",
69
+ page: int = 1,
70
+ pageSize: int = 50,
73
71
  minMentions: int = 5,
74
72
  config: RunnableConfig = None,
75
73
  **kwargs,
76
74
  ) -> ElfaGetTrendingTokensOutput:
77
- """Run the tool to ranks the most discussed tokens by smart mentions count for a given period, updated every 5 minutes via the Elfa API.
75
+ """
76
+ Execute the trending tokens request.
78
77
 
79
78
  Args:
80
- timeWindow: Time window for trending tokens (e.g., '1h', '24h', '7d').
81
- minMentions: Minimum number of mentions for a token.
82
- config: The configuration for the runnable, containing agent context.
83
- **kwargs: Additional parameters.
79
+ timeWindow: Time window for analysis (default: 7d)
80
+ page: Page number for pagination (default: 1)
81
+ pageSize: Items per page (default: 50)
82
+ minMentions: Minimum mentions required (default: 5)
83
+ config: LangChain runnable configuration
84
+ **kwargs: Additional parameters
84
85
 
85
86
  Returns:
86
- ElfaGetMentionsOutput: A structured output containing output of Elfa get mentions API.
87
+ ElfaGetTrendingTokensOutput: Structured response with trending tokens
87
88
 
88
89
  Raises:
89
- Exception: If there's an error accessing the Elfa API.
90
+ ValueError: If API key is not found
91
+ ToolException: If there's an error with the API request
90
92
  """
91
93
  context = self.context_from_config(config)
92
94
  api_key = self.get_api_key(context)
93
- if not api_key:
94
- raise ValueError("Elfa API key not found")
95
95
 
96
- url = f"{base_url}/v1/trending-tokens"
97
- headers = {
98
- "accept": "application/json",
99
- "x-elfa-api-key": api_key,
96
+ # Prepare parameters according to API spec
97
+ params = {
98
+ "timeWindow": timeWindow,
99
+ "page": page,
100
+ "pageSize": pageSize,
101
+ "minMentions": minMentions,
100
102
  }
101
103
 
102
- params = ElfaGetTrendingTokensInput(
103
- timeWindow=timeWindow, page=1, pageSize=50, minMentions=minMentions
104
- ).model_dump(exclude_none=True)
105
-
106
- async with httpx.AsyncClient() as client:
107
- try:
108
- response = await client.get(
109
- url, headers=headers, timeout=30, params=params
110
- )
111
- response.raise_for_status()
112
- json_dict = response.json()
113
-
114
- res = ElfaGetTrendingTokensOutput(**json_dict)
115
-
116
- return res
117
- except httpx.RequestError as req_err:
118
- raise ToolException(
119
- f"request error from Elfa API: {req_err}"
120
- ) from req_err
121
- except httpx.HTTPStatusError as http_err:
122
- raise ToolException(
123
- f"http error from Elfa API: {http_err}"
124
- ) from http_err
125
- except Exception as e:
126
- raise ToolException(f"error from Elfa API: {e}") from e
104
+ # Make API request using shared utility
105
+ response = await make_elfa_request(
106
+ endpoint="aggregations/trending-tokens", api_key=api_key, params=params
107
+ )
108
+
109
+ # Parse response data into TrendingToken objects
110
+ trending_tokens = []
111
+ if response.data:
112
+ if isinstance(response.data, list):
113
+ trending_tokens = [TrendingToken(**item) for item in response.data]
114
+ elif isinstance(response.data, dict) and "data" in response.data:
115
+ # Handle nested data structure if present
116
+ trending_tokens = [
117
+ TrendingToken(**item) for item in response.data["data"]
118
+ ]
119
+
120
+ return ElfaGetTrendingTokensOutput(
121
+ success=response.success, data=trending_tokens, metadata=response.metadata
122
+ )
@@ -0,0 +1,129 @@
1
+ """Utility functions for Elfa skills."""
2
+
3
+ from typing import Any, Dict, Optional
4
+
5
+ import httpx
6
+ from langchain.tools.base import ToolException
7
+ from pydantic import BaseModel, Field
8
+
9
+ from .base import base_url
10
+
11
+
12
+ class ElfaResponse(BaseModel):
13
+ """Standard Elfa API v2 response format."""
14
+
15
+ success: bool
16
+ data: Any = None
17
+ metadata: Optional[Dict[str, Any]] = None
18
+
19
+
20
+ async def make_elfa_request(
21
+ endpoint: str,
22
+ api_key: str,
23
+ params: Optional[Dict[str, Any]] = None,
24
+ timeout: int = 30,
25
+ ) -> ElfaResponse:
26
+ """
27
+ Make a standardized request to the Elfa API.
28
+
29
+ Args:
30
+ endpoint: API endpoint path (e.g., "aggregations/trending-tokens")
31
+ api_key: Elfa API key
32
+ params: Query parameters
33
+ timeout: Request timeout in seconds
34
+
35
+ Returns:
36
+ ElfaResponse: Standardized response object
37
+
38
+ Raises:
39
+ ToolException: If there's an error with the API request
40
+ """
41
+ if not api_key:
42
+ raise ValueError("Elfa API key not found")
43
+
44
+ url = f"{base_url}/{endpoint}"
45
+ headers = {
46
+ "accept": "application/json",
47
+ "x-elfa-api-key": api_key,
48
+ }
49
+
50
+ # Clean up params - remove None values
51
+ if params:
52
+ params = {k: v for k, v in params.items() if v is not None}
53
+
54
+ async with httpx.AsyncClient() as client:
55
+ try:
56
+ response = await client.get(
57
+ url, headers=headers, timeout=timeout, params=params
58
+ )
59
+ response.raise_for_status()
60
+ json_dict = response.json()
61
+
62
+ # Handle v2 response format
63
+ if isinstance(json_dict, dict) and "success" in json_dict:
64
+ return ElfaResponse(
65
+ success=json_dict["success"],
66
+ data=json_dict.get("data"),
67
+ metadata=json_dict.get("metadata", {}),
68
+ )
69
+ else:
70
+ # Fallback for unexpected format
71
+ return ElfaResponse(success=True, data=json_dict, metadata={})
72
+
73
+ except httpx.RequestError as req_err:
74
+ raise ToolException(f"Request error from Elfa API: {req_err}") from req_err
75
+ except httpx.HTTPStatusError as http_err:
76
+ raise ToolException(f"HTTP error from Elfa API: {http_err}") from http_err
77
+ except Exception as e:
78
+ raise ToolException(f"Error from Elfa API: {e}") from e
79
+
80
+
81
+ # Common Pydantic models for v2 API responses
82
+ class RepostBreakdown(BaseModel):
83
+ """Repost breakdown data."""
84
+
85
+ smart: Optional[int] = None
86
+ ct: Optional[int] = None
87
+
88
+
89
+ class Account(BaseModel):
90
+ """Account information."""
91
+
92
+ username: Optional[str] = None
93
+ isVerified: Optional[bool] = None
94
+
95
+
96
+ class MentionData(BaseModel):
97
+ """Base mention data structure used across multiple endpoints."""
98
+
99
+ tweetId: Optional[str] = Field(None, description="Tweet ID")
100
+ link: Optional[str] = Field(None, description="Link to the tweet")
101
+ likeCount: Optional[int] = Field(None, description="Number of likes")
102
+ repostCount: Optional[int] = Field(None, description="Number of reposts")
103
+ viewCount: Optional[int] = Field(None, description="Number of views")
104
+ quoteCount: Optional[int] = Field(None, description="Number of quotes")
105
+ replyCount: Optional[int] = Field(None, description="Number of replies")
106
+ bookmarkCount: Optional[int] = Field(None, description="Number of bookmarks")
107
+ mentionedAt: Optional[str] = Field(None, description="When mentioned")
108
+ type: Optional[str] = Field(None, description="Post type")
109
+ account: Optional[Account] = Field(None, description="Account information")
110
+ repostBreakdown: Optional[RepostBreakdown] = Field(
111
+ None, description="Repost breakdown"
112
+ )
113
+
114
+
115
+ class SmartStatsData(BaseModel):
116
+ """Smart stats data structure."""
117
+
118
+ smartFollowingCount: Optional[int] = Field(
119
+ None, description="Smart following count"
120
+ )
121
+ averageEngagement: Optional[float] = Field(None, description="Average engagement")
122
+ averageReach: Optional[float] = Field(None, description="Average reach")
123
+ smartFollowerCount: Optional[int] = Field(None, description="Smart follower count")
124
+ followerCount: Optional[int] = Field(None, description="Total follower count")
125
+
126
+
127
+ def clean_params(params: Dict[str, Any]) -> Dict[str, Any]:
128
+ """Remove None values from parameters dict."""
129
+ return {k: v for k, v in params.items() if v is not None}
@@ -58,7 +58,7 @@ class TwitterGetMentions(TwitterBaseTool):
58
58
  await self.check_rate_limit(
59
59
  context.agent_id,
60
60
  max_requests=1,
61
- interval=59, # TODO: tmp to 59, back to 240 later
61
+ interval=15,
62
62
  )
63
63
 
64
64
  # get since id from store
@@ -53,7 +53,7 @@ class TwitterGetTimeline(TwitterBaseTool):
53
53
  # Check rate limit only when not using OAuth
54
54
  if not twitter.use_key:
55
55
  await self.check_rate_limit(
56
- context.agent_id, max_requests=3, interval=60 * 24
56
+ context.agent_id, max_requests=1, interval=15
57
57
  )
58
58
 
59
59
  # get since id from store
@@ -51,7 +51,7 @@ class TwitterGetUserByUsername(TwitterBaseTool):
51
51
  # Check rate limit only when not using OAuth
52
52
  if not twitter.use_key:
53
53
  await self.check_rate_limit(
54
- context.agent_id, max_requests=3, interval=60 * 24
54
+ context.agent_id, max_requests=5, interval=60 * 24
55
55
  )
56
56
 
57
57
  user_data = await client.get_user(
@@ -67,7 +67,7 @@ class TwitterGetUserTweets(TwitterBaseTool):
67
67
  # Check rate limit only when not using OAuth
68
68
  if not twitter.use_key:
69
69
  await self.check_rate_limit(
70
- context.agent_id, max_requests=3, interval=60 * 24
70
+ context.agent_id, max_requests=1, interval=15
71
71
  )
72
72
 
73
73
  # get since id from store
@@ -50,7 +50,7 @@ class TwitterSearchTweets(TwitterBaseTool):
50
50
  # Check rate limit only when not using OAuth
51
51
  if not twitter.use_key:
52
52
  await self.check_rate_limit(
53
- context.agent_id, max_requests=3, interval=60 * 24
53
+ context.agent_id, max_requests=1, interval=15
54
54
  )
55
55
 
56
56
  # Get since_id from store to avoid duplicate results
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: intentkit
3
- Version: 0.6.7.dev7
3
+ Version: 0.6.7.dev9
4
4
  Summary: Intent-based AI Agent Platform - Core Package
5
5
  Project-URL: Homepage, https://github.com/crestal-network/intentkit
6
6
  Project-URL: Repository, https://github.com/crestal-network/intentkit