intentkit 0.6.0.dev11__py3-none-any.whl → 0.6.0.dev12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of intentkit might be problematic. Click here for more details.

@@ -0,0 +1,324 @@
1
+ import logging
2
+ from typing import List, Optional, Type
3
+
4
+ import httpx
5
+ from langchain_core.documents import Document
6
+ from langchain_core.runnables import RunnableConfig
7
+ from pydantic import BaseModel, Field
8
+
9
+ from intentkit.skills.firecrawl.base import FirecrawlBaseTool
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ class FirecrawlScrapeInput(BaseModel):
15
+ """Input for Firecrawl scrape tool."""
16
+
17
+ url: str = Field(
18
+ description="The URL to scrape. Must be a valid HTTP or HTTPS URL."
19
+ )
20
+ formats: List[str] = Field(
21
+ description="Output formats to include in the response. Options: 'markdown', 'html', 'rawHtml', 'screenshot', 'links', 'json'",
22
+ default=["markdown"],
23
+ )
24
+ only_main_content: bool = Field(
25
+ description="Whether to extract only the main content (excluding headers, footers, navigation, etc.)",
26
+ default=True,
27
+ )
28
+ include_tags: Optional[List[str]] = Field(
29
+ description="HTML tags, classes, or IDs to include in the response (e.g., ['h1', 'p', '.main-content'])",
30
+ default=None,
31
+ )
32
+ exclude_tags: Optional[List[str]] = Field(
33
+ description="HTML tags, classes, or IDs to exclude from the response (e.g., ['#ad', '#footer'])",
34
+ default=None,
35
+ )
36
+ wait_for: int = Field(
37
+ description="Wait time in milliseconds before scraping (use only as last resort)",
38
+ default=0,
39
+ ge=0,
40
+ )
41
+ timeout: int = Field(
42
+ description="Maximum timeout in milliseconds for the scraping operation",
43
+ default=30000,
44
+ ge=1000,
45
+ le=120000,
46
+ )
47
+ index_content: bool = Field(
48
+ description="Whether to index the scraped content for later querying (default: True)",
49
+ default=True,
50
+ )
51
+ chunk_size: int = Field(
52
+ description="Size of text chunks for indexing (default: 1000)",
53
+ default=1000,
54
+ ge=100,
55
+ le=4000,
56
+ )
57
+ chunk_overlap: int = Field(
58
+ description="Overlap between chunks (default: 200)",
59
+ default=200,
60
+ ge=0,
61
+ le=1000,
62
+ )
63
+
64
+
65
+ class FirecrawlScrape(FirecrawlBaseTool):
66
+ """Tool for scraping web pages using Firecrawl.
67
+
68
+ This tool uses Firecrawl's API to scrape web pages and convert them into clean,
69
+ LLM-ready formats like markdown, HTML, or structured JSON data.
70
+
71
+ Attributes:
72
+ name: The name of the tool.
73
+ description: A description of what the tool does.
74
+ args_schema: The schema for the tool's input arguments.
75
+ """
76
+
77
+ name: str = "firecrawl_scrape"
78
+ description: str = (
79
+ "Scrape a single web page and extract its content in various formats (markdown, HTML, JSON, etc.). "
80
+ "This tool can handle JavaScript-rendered content, PDFs, and dynamic websites. "
81
+ "Optionally indexes the content for later querying using the firecrawl_query_indexed_content tool. "
82
+ "Use this when you need to extract clean, structured content from a specific URL."
83
+ )
84
+ args_schema: Type[BaseModel] = FirecrawlScrapeInput
85
+
86
+ async def _arun(
87
+ self,
88
+ url: str,
89
+ formats: List[str] = None,
90
+ only_main_content: bool = True,
91
+ include_tags: Optional[List[str]] = None,
92
+ exclude_tags: Optional[List[str]] = None,
93
+ wait_for: int = 0,
94
+ timeout: int = 30000,
95
+ index_content: bool = True,
96
+ chunk_size: int = 1000,
97
+ chunk_overlap: int = 200,
98
+ config: RunnableConfig = None,
99
+ **kwargs,
100
+ ) -> str:
101
+ """Implementation of the Firecrawl scrape tool.
102
+
103
+ Args:
104
+ url: The URL to scrape.
105
+ formats: Output formats to include in the response.
106
+ only_main_content: Whether to extract only main content.
107
+ include_tags: HTML tags/classes/IDs to include.
108
+ exclude_tags: HTML tags/classes/IDs to exclude.
109
+ wait_for: Wait time in milliseconds before scraping.
110
+ timeout: Maximum timeout in milliseconds.
111
+ index_content: Whether to index the content for later querying.
112
+ chunk_size: Size of text chunks for indexing.
113
+ chunk_overlap: Overlap between chunks.
114
+ config: The configuration for the tool call.
115
+
116
+ Returns:
117
+ str: Formatted scraped content based on the requested formats.
118
+ """
119
+ context = self.context_from_config(config)
120
+ logger.debug(f"firecrawl_scrape: Running scrape with context {context}")
121
+
122
+ if context.config.get("api_key_provider") == "agent_owner":
123
+ if context.config.get("rate_limit_number") and context.config.get(
124
+ "rate_limit_minutes"
125
+ ):
126
+ await self.user_rate_limit_by_category(
127
+ context.user_id,
128
+ context.config["rate_limit_number"],
129
+ context.config["rate_limit_minutes"],
130
+ )
131
+
132
+ # Get the API key from the agent's configuration
133
+ api_key = self.get_api_key(context)
134
+ if not api_key:
135
+ return "Error: No Firecrawl API key provided in the configuration."
136
+
137
+ # Validate and set defaults
138
+ if formats is None:
139
+ formats = ["markdown"]
140
+
141
+ # Validate formats
142
+ valid_formats = ["markdown", "html", "rawHtml", "screenshot", "links", "json"]
143
+ formats = [f for f in formats if f in valid_formats]
144
+ if not formats:
145
+ formats = ["markdown"]
146
+
147
+ # Prepare the request payload
148
+ payload = {
149
+ "url": url,
150
+ "formats": formats,
151
+ "onlyMainContent": only_main_content,
152
+ "timeout": timeout,
153
+ }
154
+
155
+ if include_tags:
156
+ payload["includeTags"] = include_tags
157
+ if exclude_tags:
158
+ payload["excludeTags"] = exclude_tags
159
+ if wait_for > 0:
160
+ payload["waitFor"] = wait_for
161
+
162
+ # Call Firecrawl scrape API
163
+ try:
164
+ async with httpx.AsyncClient(timeout=timeout / 1000 + 10) as client:
165
+ response = await client.post(
166
+ "https://api.firecrawl.dev/v1/scrape",
167
+ json=payload,
168
+ headers={
169
+ "Authorization": f"Bearer {api_key}",
170
+ "Content-Type": "application/json",
171
+ },
172
+ )
173
+
174
+ if response.status_code != 200:
175
+ logger.error(
176
+ f"firecrawl_scrape: Error from Firecrawl API: {response.status_code} - {response.text}"
177
+ )
178
+ return (
179
+ f"Error scraping URL: {response.status_code} - {response.text}"
180
+ )
181
+
182
+ data = response.json()
183
+
184
+ if not data.get("success"):
185
+ error_msg = data.get("error", "Unknown error occurred")
186
+ return f"Error scraping URL: {error_msg}"
187
+
188
+ result_data = data.get("data", {})
189
+
190
+ # Format the results based on requested formats
191
+ formatted_result = f"Successfully scraped: {url}\n\n"
192
+
193
+ if "markdown" in formats and result_data.get("markdown"):
194
+ formatted_result += "## Markdown Content\n"
195
+ formatted_result += result_data["markdown"][:2000] # Limit length
196
+ if len(result_data["markdown"]) > 2000:
197
+ formatted_result += "... (content truncated)"
198
+ formatted_result += "\n\n"
199
+
200
+ if "html" in formats and result_data.get("html"):
201
+ formatted_result += "## HTML Content\n"
202
+ formatted_result += f"HTML content available ({len(result_data['html'])} characters)\n\n"
203
+
204
+ if "links" in formats and result_data.get("links"):
205
+ formatted_result += "## Extracted Links\n"
206
+ links = result_data["links"][:10] # Limit to first 10 links
207
+ for link in links:
208
+ formatted_result += f"- {link}\n"
209
+ if len(result_data["links"]) > 10:
210
+ formatted_result += (
211
+ f"... and {len(result_data['links']) - 10} more links\n"
212
+ )
213
+ formatted_result += "\n"
214
+
215
+ if "json" in formats and result_data.get("json"):
216
+ formatted_result += "## Structured Data (JSON)\n"
217
+ formatted_result += str(result_data["json"])[:1000] # Limit length
218
+ if len(str(result_data["json"])) > 1000:
219
+ formatted_result += "... (data truncated)"
220
+ formatted_result += "\n\n"
221
+
222
+ if "screenshot" in formats and result_data.get("screenshot"):
223
+ formatted_result += "## Screenshot\n"
224
+ formatted_result += (
225
+ f"Screenshot available at: {result_data['screenshot']}\n\n"
226
+ )
227
+
228
+ # Add metadata information
229
+ metadata = result_data.get("metadata", {})
230
+ if metadata:
231
+ formatted_result += "## Page Metadata\n"
232
+ if metadata.get("title"):
233
+ formatted_result += f"Title: {metadata['title']}\n"
234
+ if metadata.get("description"):
235
+ formatted_result += f"Description: {metadata['description']}\n"
236
+ if metadata.get("language"):
237
+ formatted_result += f"Language: {metadata['language']}\n"
238
+ formatted_result += "\n"
239
+
240
+ # Index content if requested
241
+ if index_content and result_data.get("markdown"):
242
+ try:
243
+ # Import indexing utilities from firecrawl utils
244
+ from intentkit.skills.firecrawl.utils import (
245
+ FirecrawlMetadataManager,
246
+ index_documents,
247
+ )
248
+
249
+ # Create document from scraped content
250
+ document = Document(
251
+ page_content=result_data["markdown"],
252
+ metadata={
253
+ "source": url,
254
+ "title": metadata.get("title", ""),
255
+ "description": metadata.get("description", ""),
256
+ "language": metadata.get("language", ""),
257
+ "source_type": "firecrawl_scrape",
258
+ "indexed_at": str(
259
+ context.agent.id
260
+ if context and context.agent
261
+ else "unknown"
262
+ ),
263
+ },
264
+ )
265
+
266
+ # Get agent ID for indexing
267
+ agent_id = (
268
+ context.agent.id if context and context.agent else None
269
+ )
270
+ if agent_id:
271
+ # Index the document
272
+ total_chunks, was_merged = await index_documents(
273
+ [document],
274
+ agent_id,
275
+ self.skill_store,
276
+ chunk_size,
277
+ chunk_overlap,
278
+ )
279
+
280
+ # Update metadata
281
+ metadata_manager = FirecrawlMetadataManager(
282
+ self.skill_store
283
+ )
284
+ new_metadata = metadata_manager.create_url_metadata(
285
+ [url], [document], "firecrawl_scrape"
286
+ )
287
+ await metadata_manager.update_metadata(
288
+ agent_id, new_metadata
289
+ )
290
+
291
+ formatted_result += "\n## Content Indexing\n"
292
+ formatted_result += (
293
+ "Successfully indexed content into vector store:\n"
294
+ )
295
+ formatted_result += f"- Chunks created: {total_chunks}\n"
296
+ formatted_result += f"- Chunk size: {chunk_size}\n"
297
+ formatted_result += f"- Chunk overlap: {chunk_overlap}\n"
298
+ formatted_result += f"- Content merged with existing: {'Yes' if was_merged else 'No'}\n"
299
+ formatted_result += "Use the 'firecrawl_query_indexed_content' skill to search this content.\n"
300
+
301
+ logger.info(
302
+ f"firecrawl_scrape: Successfully indexed {url} with {total_chunks} chunks"
303
+ )
304
+ else:
305
+ formatted_result += "\n## Content Indexing\n"
306
+ formatted_result += "Warning: Could not index content - agent ID not available.\n"
307
+
308
+ except Exception as index_error:
309
+ logger.error(
310
+ f"firecrawl_scrape: Error indexing content: {index_error}"
311
+ )
312
+ formatted_result += "\n## Content Indexing\n"
313
+ formatted_result += f"Warning: Failed to index content for later querying: {str(index_error)}\n"
314
+
315
+ return formatted_result.strip()
316
+
317
+ except httpx.TimeoutException:
318
+ logger.error(f"firecrawl_scrape: Timeout scraping URL: {url}")
319
+ return (
320
+ f"Timeout error: The request to scrape {url} took too long to complete."
321
+ )
322
+ except Exception as e:
323
+ logger.error(f"firecrawl_scrape: Error scraping URL: {e}", exc_info=True)
324
+ return f"An error occurred while scraping the URL: {str(e)}"
@@ -0,0 +1,287 @@
1
+ """Utilities for Firecrawl skill content indexing and querying."""
2
+
3
+ import logging
4
+ import pickle
5
+ import re
6
+ from typing import Any, Dict, List, Optional, Tuple
7
+
8
+ import faiss
9
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
10
+ from langchain_community.vectorstores import FAISS
11
+ from langchain_core.documents import Document
12
+ from langchain_openai import OpenAIEmbeddings
13
+
14
+ from intentkit.abstracts.skill import SkillStoreABC
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ class FirecrawlDocumentProcessor:
20
+ """Handles document processing and sanitization for Firecrawl content."""
21
+
22
+ @staticmethod
23
+ def sanitize_for_database(text: str) -> str:
24
+ """Sanitize text content to prevent database storage errors."""
25
+ if not text:
26
+ return ""
27
+
28
+ # Remove null bytes and other problematic characters
29
+ text = text.replace("\x00", "")
30
+ text = re.sub(r"[\x00-\x08\x0b\x0c\x0e-\x1f\x7f-\x9f]", "", text)
31
+
32
+ # Normalize whitespace
33
+ text = re.sub(r"\s+", " ", text)
34
+ text = text.strip()
35
+
36
+ return text
37
+
38
+ @staticmethod
39
+ def split_documents(
40
+ documents: List[Document], chunk_size: int = 1000, chunk_overlap: int = 200
41
+ ) -> List[Document]:
42
+ """Split documents into smaller chunks for better indexing."""
43
+ text_splitter = RecursiveCharacterTextSplitter(
44
+ chunk_size=chunk_size,
45
+ chunk_overlap=chunk_overlap,
46
+ length_function=len,
47
+ )
48
+
49
+ split_docs = []
50
+ for doc in documents:
51
+ # Sanitize content before splitting
52
+ sanitized_content = FirecrawlDocumentProcessor.sanitize_for_database(
53
+ doc.page_content
54
+ )
55
+ doc.page_content = sanitized_content
56
+
57
+ # Split the document
58
+ chunks = text_splitter.split_documents([doc])
59
+ split_docs.extend(chunks)
60
+
61
+ return split_docs
62
+
63
+
64
+ class FirecrawlVectorStoreManager:
65
+ """Manages vector store operations for Firecrawl content."""
66
+
67
+ def __init__(self, skill_store: SkillStoreABC):
68
+ self.skill_store = skill_store
69
+
70
+ def create_embeddings(self) -> OpenAIEmbeddings:
71
+ """Create OpenAI embeddings instance."""
72
+ openai_api_key = self.skill_store.get_system_config("openai_api_key")
73
+ if not openai_api_key:
74
+ raise ValueError("OpenAI API key not found in system configuration")
75
+
76
+ return OpenAIEmbeddings(
77
+ openai_api_key=openai_api_key, model="text-embedding-3-small"
78
+ )
79
+
80
+ def encode_vector_store(self, vector_store: FAISS) -> Dict[str, bytes]:
81
+ """Encode FAISS vector store to bytes for storage."""
82
+ try:
83
+ # Serialize the index
84
+ index_bytes = faiss.serialize_index(vector_store.index)
85
+
86
+ # Serialize the docstore and index_to_docstore_id
87
+ docstore_bytes = pickle.dumps(vector_store.docstore)
88
+ index_to_docstore_bytes = pickle.dumps(vector_store.index_to_docstore_id)
89
+
90
+ return {
91
+ "index": index_bytes,
92
+ "docstore": docstore_bytes,
93
+ "index_to_docstore": index_to_docstore_bytes,
94
+ }
95
+ except Exception as e:
96
+ logger.error(f"Error encoding vector store: {e}")
97
+ raise
98
+
99
+ def decode_vector_store(
100
+ self, faiss_data: Dict[str, bytes], embeddings: OpenAIEmbeddings
101
+ ) -> FAISS:
102
+ """Decode FAISS vector store from stored bytes."""
103
+ try:
104
+ # Deserialize the index
105
+ index = faiss.deserialize_index(faiss_data["index"])
106
+
107
+ # Deserialize the docstore and index_to_docstore_id
108
+ docstore = pickle.loads(faiss_data["docstore"])
109
+ index_to_docstore_id = pickle.loads(faiss_data["index_to_docstore"])
110
+
111
+ # Create FAISS vector store
112
+ vector_store = FAISS(
113
+ embedding_function=embeddings,
114
+ index=index,
115
+ docstore=docstore,
116
+ index_to_docstore_id=index_to_docstore_id,
117
+ )
118
+
119
+ return vector_store
120
+ except Exception as e:
121
+ logger.error(f"Error decoding vector store: {e}")
122
+ raise
123
+
124
+ async def load_vector_store(self, agent_id: str) -> Optional[FAISS]:
125
+ """Load existing vector store for an agent."""
126
+ try:
127
+ vector_store_key = f"firecrawl_vector_store_{agent_id}"
128
+ stored_data = await self.skill_store.get_agent_skill_data(
129
+ agent_id, "firecrawl", vector_store_key
130
+ )
131
+
132
+ if not stored_data or "faiss_files" not in stored_data:
133
+ return None
134
+
135
+ embeddings = self.create_embeddings()
136
+ return self.decode_vector_store(stored_data["faiss_files"], embeddings)
137
+
138
+ except Exception as e:
139
+ logger.error(f"Error loading vector store for agent {agent_id}: {e}")
140
+ return None
141
+
142
+ async def save_vector_store(self, agent_id: str, vector_store: FAISS) -> None:
143
+ """Save vector store for an agent."""
144
+ try:
145
+ vector_store_key = f"firecrawl_vector_store_{agent_id}"
146
+ encoded_data = self.encode_vector_store(vector_store)
147
+
148
+ await self.skill_store.save_agent_skill_data(
149
+ agent_id, "firecrawl", vector_store_key, {"faiss_files": encoded_data}
150
+ )
151
+
152
+ except Exception as e:
153
+ logger.error(f"Error saving vector store for agent {agent_id}: {e}")
154
+ raise
155
+
156
+
157
+ class FirecrawlMetadataManager:
158
+ """Manages metadata for Firecrawl indexed content."""
159
+
160
+ def __init__(self, skill_store: SkillStoreABC):
161
+ self.skill_store = skill_store
162
+
163
+ def create_url_metadata(
164
+ self, urls: List[str], documents: List[Document], source_type: str
165
+ ) -> Dict[str, Any]:
166
+ """Create metadata for indexed URLs."""
167
+ return {
168
+ "urls": urls,
169
+ "document_count": len(documents),
170
+ "source_type": source_type,
171
+ "indexed_at": str(len(urls)), # Simple counter
172
+ }
173
+
174
+ async def update_metadata(
175
+ self, agent_id: str, new_metadata: Dict[str, Any]
176
+ ) -> None:
177
+ """Update metadata for an agent."""
178
+ try:
179
+ metadata_key = f"firecrawl_metadata_{agent_id}"
180
+ await self.skill_store.save_agent_skill_data(
181
+ agent_id, "firecrawl", metadata_key, new_metadata
182
+ )
183
+ except Exception as e:
184
+ logger.error(f"Error updating metadata for agent {agent_id}: {e}")
185
+ raise
186
+
187
+
188
+ async def index_documents(
189
+ documents: List[Document],
190
+ agent_id: str,
191
+ skill_store: SkillStoreABC,
192
+ chunk_size: int = 1000,
193
+ chunk_overlap: int = 200,
194
+ ) -> Tuple[int, bool]:
195
+ """
196
+ Index documents into the Firecrawl vector store.
197
+
198
+ Args:
199
+ documents: List of documents to index
200
+ agent_id: Agent ID for storage
201
+ skill_store: Skill store for persistence
202
+ chunk_size: Size of text chunks
203
+ chunk_overlap: Overlap between chunks
204
+
205
+ Returns:
206
+ Tuple of (total_chunks, was_merged_with_existing)
207
+ """
208
+ try:
209
+ # Initialize managers
210
+ vs_manager = FirecrawlVectorStoreManager(skill_store)
211
+
212
+ # Split documents into chunks
213
+ split_docs = FirecrawlDocumentProcessor.split_documents(
214
+ documents, chunk_size, chunk_overlap
215
+ )
216
+
217
+ if not split_docs:
218
+ logger.warning("No documents to index after splitting")
219
+ return 0, False
220
+
221
+ # Create embeddings
222
+ embeddings = vs_manager.create_embeddings()
223
+
224
+ # Try to load existing vector store
225
+ existing_vector_store = await vs_manager.load_vector_store(agent_id)
226
+
227
+ if existing_vector_store:
228
+ # Add to existing vector store
229
+ existing_vector_store.add_documents(split_docs)
230
+ vector_store = existing_vector_store
231
+ was_merged = True
232
+ else:
233
+ # Create new vector store
234
+ vector_store = FAISS.from_documents(split_docs, embeddings)
235
+ was_merged = False
236
+
237
+ # Save the vector store
238
+ await vs_manager.save_vector_store(agent_id, vector_store)
239
+
240
+ logger.info(
241
+ f"Successfully indexed {len(split_docs)} chunks for agent {agent_id}"
242
+ )
243
+ return len(split_docs), was_merged
244
+
245
+ except Exception as e:
246
+ logger.error(f"Error indexing documents for agent {agent_id}: {e}")
247
+ raise
248
+
249
+
250
+ async def query_indexed_content(
251
+ query: str,
252
+ agent_id: str,
253
+ skill_store: SkillStoreABC,
254
+ max_results: int = 4,
255
+ ) -> List[Document]:
256
+ """
257
+ Query the Firecrawl indexed content.
258
+
259
+ Args:
260
+ query: Search query
261
+ agent_id: Agent ID
262
+ skill_store: Skill store for persistence
263
+ max_results: Maximum number of results to return
264
+
265
+ Returns:
266
+ List of relevant documents
267
+ """
268
+ try:
269
+ # Initialize vector store manager
270
+ vs_manager = FirecrawlVectorStoreManager(skill_store)
271
+
272
+ # Load vector store
273
+ vector_store = await vs_manager.load_vector_store(agent_id)
274
+
275
+ if not vector_store:
276
+ logger.warning(f"No vector store found for agent {agent_id}")
277
+ return []
278
+
279
+ # Perform similarity search
280
+ docs = vector_store.similarity_search(query, k=max_results)
281
+
282
+ logger.info(f"Found {len(docs)} documents for query: {query}")
283
+ return docs
284
+
285
+ except Exception as e:
286
+ logger.error(f"Error querying indexed content for agent {agent_id}: {e}")
287
+ raise
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: intentkit
3
- Version: 0.6.0.dev11
3
+ Version: 0.6.0.dev12
4
4
  Summary: Intent-based AI Agent Platform - Core Package
5
5
  Project-URL: Homepage, https://github.com/crestal-network/intentkit
6
6
  Project-URL: Repository, https://github.com/crestal-network/intentkit
@@ -1,4 +1,4 @@
1
- intentkit/__init__.py,sha256=ocMohkspIwMPfLvDLTEEblprk6kxb8zfmxqcpsvAJXY,385
1
+ intentkit/__init__.py,sha256=9Y0g_rS1HE5FwTHW4j_FiaRZlhJBAplh56BkufeEKjU,385
2
2
  intentkit/abstracts/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  intentkit/abstracts/agent.py,sha256=108gb5W8Q1Sy4G55F2_ZFv2-_CnY76qrBtpIr0Oxxqk,1489
4
4
  intentkit/abstracts/api.py,sha256=ZUc24vaQvQVbbjznx7bV0lbbQxdQPfEV8ZxM2R6wZWo,166
@@ -11,7 +11,7 @@ intentkit/clients/__init__.py,sha256=sQ_6_bRC2MPWLPH-skQ3qsEe8ce-dUGL7i8VJOautHg
11
11
  intentkit/clients/cdp.py,sha256=9iK5t-zSITIWS-bdv1ymF-86FeblAh9QI9GjNP2Mg5Y,3783
12
12
  intentkit/clients/twitter.py,sha256=JAc-skIhZZjAFcwzLSTiOPOonteGjrl_JwXoA8IVBmI,16934
13
13
  intentkit/config/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
- intentkit/config/config.py,sha256=q601dwkcEmD0DC24Rbmmp66KHRzp5m3wbBwN7DshaQc,7389
14
+ intentkit/config/config.py,sha256=6RreVvQH1xuHVOnIJ3AcaRYzdMw1RLo0vYYtvPKvTds,7453
15
15
  intentkit/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  intentkit/core/agent.py,sha256=PTHsFV_EbsfPUcTI0ErkebWurjxVykP9ISbNUsqV44Q,7764
17
17
  intentkit/core/api.py,sha256=3GIMJpwduLUSbVPNW6mQVxZncYHH3OlLwdiqFtYtEAE,1208
@@ -23,7 +23,7 @@ intentkit/core/prompt.py,sha256=9jxRYUUqSdBj8bdmCUAa-5yTbiQFVInOJsDqbAuUcfo,3512
23
23
  intentkit/core/skill.py,sha256=rE37qwDmpnHnIG0MwKxuonVO_lOq47gy-tvXMOz9VZs,3498
24
24
  intentkit/models/agent.py,sha256=5amc4rjPHmZp7a4Z6chomDiFHaxgyr0Md5mKeI90jWQ,57487
25
25
  intentkit/models/agent_data.py,sha256=h__b3658ZOclV1Pwpp3UCCu0Nt49CKYfc2JZKG1dKeE,26929
26
- intentkit/models/agent_schema.json,sha256=kacuFT9gPuj16vhZg8Yg1kBJ-3La4uCzBa__uTnQmfY,21368
26
+ intentkit/models/agent_schema.json,sha256=SCJZOKh1sYaFLUqyW85C7LE8TPWlPA5yk1vVTgjVOYY,21499
27
27
  intentkit/models/app_setting.py,sha256=WgW-9t0EbiVemRLrVaC6evdfRU5QFSDK0elsnUU5nIo,5008
28
28
  intentkit/models/base.py,sha256=o-zRjVrak-f5Jokdvj8BjLm8gcC3yYiYMCTLegwT2lA,185
29
29
  intentkit/models/chat.py,sha256=H4fKBgrseOaFIp83sYYiuyYpYufQAvnoca6V4TVbibE,18013
@@ -188,6 +188,15 @@ intentkit/skills/enso/abi/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJW
188
188
  intentkit/skills/enso/abi/approval.py,sha256=IsyQLFxzAttocrtCB2PhbgprA7Vqujzpxvg0hJbeJ00,9867
189
189
  intentkit/skills/enso/abi/erc20.py,sha256=IScqZhHpMt_eFfYtMXw0-w5jptkAK0xsqqUDjbWdb2s,439
190
190
  intentkit/skills/enso/abi/route.py,sha256=ng9U2RSyS5R3d-b0m5ELa4rFpaUDO9HcgSoX9P_wWZo,4746
191
+ intentkit/skills/firecrawl/README.md,sha256=yuJHQMRODVFLAOcVGlmH1192n1xLdPbY0awamJwI2hc,5903
192
+ intentkit/skills/firecrawl/__init__.py,sha256=wKWTAPti-FGTvcLIl8vsmCYaLnUGSYxG4yF-v9viRPQ,2821
193
+ intentkit/skills/firecrawl/base.py,sha256=5OlRsM551MKt1nHLzg0u3UR16UIBqsEqNZlim03ZdiA,953
194
+ intentkit/skills/firecrawl/crawl.py,sha256=p99fCLLyDPQo31uZCJafx7NtM7QsACk6Tgp42vxHg4I,18768
195
+ intentkit/skills/firecrawl/firecrawl.png,sha256=6GoGlIMYuIDo-TqMlZbD4QYkmxvQ7krqAa5MANumJqk,5065
196
+ intentkit/skills/firecrawl/query.py,sha256=K6KXEBJvH0f7MurqAw8v8G-3jgYBmEtXoYIlauQa7rY,4542
197
+ intentkit/skills/firecrawl/schema.json,sha256=9Tv9vchZuwrp9TNQDOPEdptsEYl9B8fIGI4Jd_jmSag,3814
198
+ intentkit/skills/firecrawl/scrape.py,sha256=JCCjYU3YDZYay-v9_5hoid0ZLKOmsqzfQNs2Ol3AS6w,13980
199
+ intentkit/skills/firecrawl/utils.py,sha256=PPanodhRKf75z_ho2zgJCQTTlRQnyGgEHBV31XKehS0,9495
191
200
  intentkit/skills/github/README.md,sha256=SzYGJ9qSPaZl68iD8AQJGKTMLv0keQZesnSK-VhrAfs,1802
192
201
  intentkit/skills/github/__init__.py,sha256=Vva9jMtACSM_cZXy5JY0h6Q1ejR1jm-Xu3Q6PwyB72o,1471
193
202
  intentkit/skills/github/base.py,sha256=Mzlv_cqe307asPEtBEZ6dGDDWa5vPi48dJWu2SwW6lk,590
@@ -362,7 +371,7 @@ intentkit/utils/random.py,sha256=DymMxu9g0kuQLgJUqalvgksnIeLdS-v0aRk5nQU0mLI,452
362
371
  intentkit/utils/s3.py,sha256=9trQNkKQ5VgxWsewVsV8Y0q_pXzGRvsCYP8xauyUYkg,8549
363
372
  intentkit/utils/slack_alert.py,sha256=s7UpRgyzLW7Pbmt8cKzTJgMA9bm4EP-1rQ5KXayHu6E,2264
364
373
  intentkit/utils/tx.py,sha256=2yLLGuhvfBEY5n_GJ8wmIWLCzn0FsYKv5kRNzw_sLUI,1454
365
- intentkit-0.6.0.dev11.dist-info/METADATA,sha256=Xl2J9CcvUf2ur5LdCGYdk2zRNZVFye_70l3ZgN6uJlo,7286
366
- intentkit-0.6.0.dev11.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
367
- intentkit-0.6.0.dev11.dist-info/licenses/LICENSE,sha256=Bln6DhK-LtcO4aXy-PBcdZv2f24MlJFm_qn222biJtE,1071
368
- intentkit-0.6.0.dev11.dist-info/RECORD,,
374
+ intentkit-0.6.0.dev12.dist-info/METADATA,sha256=kZRuRKtmmXN1F9HAQPEi8lci-AjlDhV_kASrHnCsUrY,7286
375
+ intentkit-0.6.0.dev12.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
376
+ intentkit-0.6.0.dev12.dist-info/licenses/LICENSE,sha256=Bln6DhK-LtcO4aXy-PBcdZv2f24MlJFm_qn222biJtE,1071
377
+ intentkit-0.6.0.dev12.dist-info/RECORD,,