intentkit 0.8.16.dev1__py3-none-any.whl → 0.8.17.dev2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of intentkit might be problematic. Click here for more details.
- intentkit/__init__.py +1 -1
- intentkit/abstracts/agent.py +4 -5
- intentkit/abstracts/engine.py +5 -5
- intentkit/abstracts/graph.py +6 -5
- intentkit/abstracts/skill.py +5 -5
- intentkit/abstracts/twitter.py +4 -5
- intentkit/clients/cdp.py +19 -77
- intentkit/clients/twitter.py +26 -34
- intentkit/clients/web3.py +1 -3
- intentkit/config/config.py +4 -0
- intentkit/core/agent.py +15 -15
- intentkit/core/asset.py +1 -2
- intentkit/core/client.py +1 -1
- intentkit/core/credit.py +19 -20
- intentkit/core/engine.py +2 -4
- intentkit/core/node.py +2 -1
- intentkit/core/prompt.py +3 -4
- intentkit/core/scheduler.py +1 -1
- intentkit/core/statistics.py +6 -7
- intentkit/models/agent.py +125 -92
- intentkit/models/agent_data.py +62 -36
- intentkit/models/app_setting.py +6 -6
- intentkit/models/chat.py +27 -24
- intentkit/models/conversation.py +8 -8
- intentkit/models/credit.py +62 -64
- intentkit/models/db.py +8 -7
- intentkit/models/db_mig.py +2 -2
- intentkit/models/llm.py +12 -14
- intentkit/models/redis.py +2 -3
- intentkit/models/skill.py +25 -27
- intentkit/models/skills.csv +29 -28
- intentkit/models/user.py +21 -22
- intentkit/skills/acolyt/ask.py +3 -4
- intentkit/skills/acolyt/base.py +1 -3
- intentkit/skills/aixbt/base.py +1 -3
- intentkit/skills/aixbt/projects.py +13 -13
- intentkit/skills/allora/base.py +1 -3
- intentkit/skills/allora/price.py +2 -3
- intentkit/skills/base.py +15 -22
- intentkit/skills/basename/__init__.py +3 -5
- intentkit/skills/carv/__init__.py +7 -8
- intentkit/skills/carv/base.py +6 -6
- intentkit/skills/carv/fetch_news.py +3 -3
- intentkit/skills/carv/onchain_query.py +4 -4
- intentkit/skills/carv/token_info_and_price.py +5 -5
- intentkit/skills/casino/base.py +1 -3
- intentkit/skills/casino/deck_draw.py +1 -2
- intentkit/skills/casino/deck_shuffle.py +1 -2
- intentkit/skills/casino/dice_roll.py +1 -2
- intentkit/skills/cdp/__init__.py +3 -5
- intentkit/skills/cdp/base.py +1 -3
- intentkit/skills/chainlist/base.py +1 -3
- intentkit/skills/chainlist/chain_lookup.py +18 -18
- intentkit/skills/common/base.py +1 -3
- intentkit/skills/common/current_time.py +1 -2
- intentkit/skills/cookiefun/base.py +1 -2
- intentkit/skills/cookiefun/get_account_details.py +7 -7
- intentkit/skills/cookiefun/get_account_feed.py +19 -19
- intentkit/skills/cookiefun/get_account_smart_followers.py +7 -7
- intentkit/skills/cookiefun/get_sectors.py +3 -3
- intentkit/skills/cookiefun/search_accounts.py +9 -9
- intentkit/skills/cryptocompare/api.py +2 -3
- intentkit/skills/cryptocompare/base.py +6 -6
- intentkit/skills/cryptocompare/fetch_news.py +3 -4
- intentkit/skills/cryptocompare/fetch_price.py +5 -6
- intentkit/skills/cryptocompare/fetch_top_exchanges.py +3 -4
- intentkit/skills/cryptocompare/fetch_top_market_cap.py +3 -4
- intentkit/skills/cryptocompare/fetch_top_volume.py +3 -4
- intentkit/skills/cryptocompare/fetch_trading_signals.py +4 -5
- intentkit/skills/cryptopanic/__init__.py +4 -4
- intentkit/skills/cryptopanic/base.py +1 -3
- intentkit/skills/cryptopanic/fetch_crypto_news.py +3 -5
- intentkit/skills/cryptopanic/fetch_crypto_sentiment.py +3 -3
- intentkit/skills/dapplooker/base.py +1 -3
- intentkit/skills/dapplooker/dapplooker_token_data.py +7 -7
- intentkit/skills/defillama/api.py +6 -9
- intentkit/skills/defillama/base.py +5 -6
- intentkit/skills/defillama/coins/fetch_batch_historical_prices.py +6 -8
- intentkit/skills/defillama/coins/fetch_block.py +4 -6
- intentkit/skills/defillama/coins/fetch_current_prices.py +6 -8
- intentkit/skills/defillama/coins/fetch_first_price.py +5 -7
- intentkit/skills/defillama/coins/fetch_historical_prices.py +7 -9
- intentkit/skills/defillama/coins/fetch_price_chart.py +7 -9
- intentkit/skills/defillama/coins/fetch_price_percentage.py +5 -7
- intentkit/skills/defillama/config/chains.py +1 -3
- intentkit/skills/defillama/fees/fetch_fees_overview.py +22 -24
- intentkit/skills/defillama/stablecoins/fetch_stablecoin_chains.py +14 -16
- intentkit/skills/defillama/stablecoins/fetch_stablecoin_charts.py +6 -8
- intentkit/skills/defillama/stablecoins/fetch_stablecoin_prices.py +3 -5
- intentkit/skills/defillama/stablecoins/fetch_stablecoins.py +5 -7
- intentkit/skills/defillama/tests/api_integration.test.py +1 -1
- intentkit/skills/defillama/tvl/fetch_chain_historical_tvl.py +2 -4
- intentkit/skills/defillama/tvl/fetch_chains.py +7 -9
- intentkit/skills/defillama/tvl/fetch_historical_tvl.py +2 -4
- intentkit/skills/defillama/tvl/fetch_protocol.py +30 -36
- intentkit/skills/defillama/tvl/fetch_protocol_current_tvl.py +1 -3
- intentkit/skills/defillama/tvl/fetch_protocols.py +35 -43
- intentkit/skills/defillama/volumes/fetch_dex_overview.py +40 -46
- intentkit/skills/defillama/volumes/fetch_dex_summary.py +33 -35
- intentkit/skills/defillama/volumes/fetch_options_overview.py +22 -26
- intentkit/skills/defillama/yields/fetch_pool_chart.py +8 -10
- intentkit/skills/defillama/yields/fetch_pools.py +24 -28
- intentkit/skills/dexscreener/__init__.py +2 -2
- intentkit/skills/dexscreener/base.py +3 -3
- intentkit/skills/dexscreener/get_pair_info.py +2 -2
- intentkit/skills/dexscreener/get_token_pairs.py +2 -2
- intentkit/skills/dexscreener/get_tokens_info.py +5 -5
- intentkit/skills/dexscreener/model/search_token_response.py +80 -82
- intentkit/skills/dexscreener/search_token.py +182 -182
- intentkit/skills/dexscreener/utils.py +15 -14
- intentkit/skills/dune_analytics/__init__.py +4 -4
- intentkit/skills/dune_analytics/base.py +1 -3
- intentkit/skills/dune_analytics/fetch_kol_buys.py +4 -4
- intentkit/skills/dune_analytics/fetch_nation_metrics.py +5 -5
- intentkit/skills/elfa/base.py +1 -3
- intentkit/skills/elfa/mention.py +19 -21
- intentkit/skills/elfa/stats.py +4 -4
- intentkit/skills/elfa/tokens.py +12 -12
- intentkit/skills/elfa/utils.py +25 -27
- intentkit/skills/enso/__init__.py +2 -2
- intentkit/skills/enso/base.py +5 -8
- intentkit/skills/enso/best_yield.py +4 -6
- intentkit/skills/enso/networks.py +1 -2
- intentkit/skills/enso/prices.py +1 -3
- intentkit/skills/enso/route.py +1 -3
- intentkit/skills/enso/tokens.py +1 -3
- intentkit/skills/enso/wallet.py +5 -5
- intentkit/skills/erc20/__init__.py +4 -6
- intentkit/skills/erc721/__init__.py +4 -6
- intentkit/skills/firecrawl/base.py +1 -3
- intentkit/skills/firecrawl/clear.py +1 -2
- intentkit/skills/firecrawl/crawl.py +9 -10
- intentkit/skills/firecrawl/query.py +1 -2
- intentkit/skills/firecrawl/scrape.py +7 -8
- intentkit/skills/firecrawl/utils.py +13 -13
- intentkit/skills/github/base.py +1 -3
- intentkit/skills/github/github_search.py +1 -2
- intentkit/skills/heurist/base.py +1 -3
- intentkit/skills/heurist/image_generation_animagine_xl.py +7 -8
- intentkit/skills/heurist/image_generation_arthemy_comics.py +7 -8
- intentkit/skills/heurist/image_generation_arthemy_real.py +7 -8
- intentkit/skills/heurist/image_generation_braindance.py +7 -8
- intentkit/skills/heurist/image_generation_cyber_realistic_xl.py +7 -8
- intentkit/skills/heurist/image_generation_flux_1_dev.py +7 -8
- intentkit/skills/heurist/image_generation_sdxl.py +7 -8
- intentkit/skills/http/base.py +1 -3
- intentkit/skills/http/get.py +7 -7
- intentkit/skills/http/post.py +9 -9
- intentkit/skills/http/put.py +9 -9
- intentkit/skills/lifi/__init__.py +4 -4
- intentkit/skills/lifi/base.py +1 -3
- intentkit/skills/lifi/token_execute.py +13 -13
- intentkit/skills/lifi/token_quote.py +6 -6
- intentkit/skills/lifi/utils.py +16 -16
- intentkit/skills/moralis/__init__.py +3 -3
- intentkit/skills/moralis/api.py +6 -7
- intentkit/skills/moralis/base.py +2 -4
- intentkit/skills/moralis/fetch_chain_portfolio.py +10 -11
- intentkit/skills/moralis/fetch_nft_portfolio.py +22 -22
- intentkit/skills/moralis/fetch_solana_portfolio.py +11 -12
- intentkit/skills/moralis/fetch_wallet_portfolio.py +8 -9
- intentkit/skills/morpho/__init__.py +4 -6
- intentkit/skills/nation/__init__.py +2 -2
- intentkit/skills/nation/base.py +1 -3
- intentkit/skills/nation/nft_check.py +3 -4
- intentkit/skills/onchain.py +2 -6
- intentkit/skills/openai/base.py +1 -3
- intentkit/skills/openai/dalle_image_generation.py +1 -3
- intentkit/skills/openai/gpt_image_generation.py +2 -3
- intentkit/skills/openai/gpt_image_to_image.py +2 -3
- intentkit/skills/openai/image_to_text.py +1 -2
- intentkit/skills/portfolio/base.py +6 -6
- intentkit/skills/portfolio/token_balances.py +21 -21
- intentkit/skills/portfolio/wallet_approvals.py +7 -7
- intentkit/skills/portfolio/wallet_defi_positions.py +3 -3
- intentkit/skills/portfolio/wallet_history.py +21 -21
- intentkit/skills/portfolio/wallet_net_worth.py +13 -13
- intentkit/skills/portfolio/wallet_nfts.py +19 -19
- intentkit/skills/portfolio/wallet_profitability.py +7 -7
- intentkit/skills/portfolio/wallet_profitability_summary.py +5 -5
- intentkit/skills/portfolio/wallet_stats.py +3 -3
- intentkit/skills/portfolio/wallet_swaps.py +19 -19
- intentkit/skills/pyth/__init__.py +3 -5
- intentkit/skills/slack/base.py +2 -4
- intentkit/skills/slack/get_channel.py +8 -8
- intentkit/skills/slack/get_message.py +9 -9
- intentkit/skills/slack/schedule_message.py +5 -5
- intentkit/skills/slack/send_message.py +3 -5
- intentkit/skills/supabase/base.py +1 -3
- intentkit/skills/supabase/delete_data.py +4 -4
- intentkit/skills/supabase/fetch_data.py +12 -12
- intentkit/skills/supabase/insert_data.py +4 -4
- intentkit/skills/supabase/invoke_function.py +6 -6
- intentkit/skills/supabase/update_data.py +6 -6
- intentkit/skills/supabase/upsert_data.py +4 -4
- intentkit/skills/superfluid/__init__.py +4 -6
- intentkit/skills/system/add_autonomous_task.py +8 -10
- intentkit/skills/system/edit_autonomous_task.py +12 -14
- intentkit/skills/system/list_autonomous_tasks.py +1 -3
- intentkit/skills/tavily/base.py +1 -3
- intentkit/skills/tavily/tavily_extract.py +1 -2
- intentkit/skills/tavily/tavily_search.py +1 -3
- intentkit/skills/token/base.py +5 -5
- intentkit/skills/token/erc20_transfers.py +19 -19
- intentkit/skills/token/token_analytics.py +3 -3
- intentkit/skills/token/token_price.py +13 -13
- intentkit/skills/token/token_search.py +9 -9
- intentkit/skills/twitter/base.py +3 -4
- intentkit/skills/twitter/follow_user.py +1 -2
- intentkit/skills/twitter/get_mentions.py +3 -4
- intentkit/skills/twitter/get_timeline.py +1 -2
- intentkit/skills/twitter/get_user_by_username.py +1 -2
- intentkit/skills/twitter/get_user_tweets.py +2 -3
- intentkit/skills/twitter/like_tweet.py +1 -2
- intentkit/skills/twitter/post_tweet.py +3 -4
- intentkit/skills/twitter/reply_tweet.py +3 -4
- intentkit/skills/twitter/retweet.py +1 -2
- intentkit/skills/twitter/search_tweets.py +1 -2
- intentkit/skills/unrealspeech/base.py +1 -3
- intentkit/skills/unrealspeech/text_to_speech.py +8 -8
- intentkit/skills/venice_audio/__init__.py +8 -9
- intentkit/skills/venice_audio/base.py +3 -4
- intentkit/skills/venice_audio/input.py +41 -41
- intentkit/skills/venice_audio/venice_audio.py +6 -6
- intentkit/skills/venice_image/__init__.py +5 -5
- intentkit/skills/venice_image/api.py +138 -138
- intentkit/skills/venice_image/base.py +3 -3
- intentkit/skills/venice_image/config.py +33 -35
- intentkit/skills/venice_image/image_enhance/image_enhance.py +2 -3
- intentkit/skills/venice_image/image_enhance/image_enhance_base.py +21 -23
- intentkit/skills/venice_image/image_enhance/image_enhance_input.py +38 -40
- intentkit/skills/venice_image/image_generation/image_generation_base.py +9 -9
- intentkit/skills/venice_image/image_generation/image_generation_fluently_xl.py +26 -26
- intentkit/skills/venice_image/image_generation/image_generation_flux_dev.py +27 -27
- intentkit/skills/venice_image/image_generation/image_generation_flux_dev_uncensored.py +26 -26
- intentkit/skills/venice_image/image_generation/image_generation_input.py +158 -158
- intentkit/skills/venice_image/image_generation/image_generation_lustify_sdxl.py +26 -26
- intentkit/skills/venice_image/image_generation/image_generation_pony_realism.py +26 -26
- intentkit/skills/venice_image/image_generation/image_generation_stable_diffusion_3_5.py +28 -28
- intentkit/skills/venice_image/image_generation/image_generation_venice_sd35.py +28 -28
- intentkit/skills/venice_image/image_upscale/image_upscale.py +3 -3
- intentkit/skills/venice_image/image_upscale/image_upscale_base.py +21 -23
- intentkit/skills/venice_image/image_upscale/image_upscale_input.py +22 -22
- intentkit/skills/venice_image/image_vision/image_vision.py +2 -2
- intentkit/skills/venice_image/image_vision/image_vision_base.py +17 -17
- intentkit/skills/venice_image/image_vision/image_vision_input.py +9 -9
- intentkit/skills/venice_image/utils.py +77 -78
- intentkit/skills/web_scraper/base.py +1 -3
- intentkit/skills/web_scraper/document_indexer.py +1 -2
- intentkit/skills/web_scraper/scrape_and_index.py +4 -5
- intentkit/skills/web_scraper/utils.py +25 -26
- intentkit/skills/web_scraper/website_indexer.py +10 -11
- intentkit/skills/weth/__init__.py +4 -6
- intentkit/skills/wow/__init__.py +4 -6
- intentkit/skills/x402/__init__.py +11 -3
- intentkit/skills/x402/ask_agent.py +12 -78
- intentkit/skills/x402/base.py +90 -0
- intentkit/skills/x402/http_request.py +117 -0
- intentkit/skills/x402/schema.json +15 -10
- intentkit/skills/xmtp/base.py +3 -3
- intentkit/skills/xmtp/price.py +2 -2
- intentkit/skills/xmtp/swap.py +2 -4
- intentkit/skills/xmtp/transfer.py +4 -6
- intentkit/utils/error.py +2 -2
- intentkit/utils/logging.py +2 -4
- intentkit/utils/s3.py +8 -9
- intentkit/utils/schema.py +5 -5
- intentkit/utils/slack_alert.py +7 -8
- {intentkit-0.8.16.dev1.dist-info → intentkit-0.8.17.dev2.dist-info}/METADATA +3 -4
- intentkit-0.8.17.dev2.dist-info/RECORD +464 -0
- intentkit/models/generator.py +0 -347
- intentkit-0.8.16.dev1.dist-info/RECORD +0 -464
- {intentkit-0.8.16.dev1.dist-info → intentkit-0.8.17.dev2.dist-info}/WHEEL +0 -0
- {intentkit-0.8.16.dev1.dist-info → intentkit-0.8.17.dev2.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import logging
|
|
2
|
-
from typing import Literal
|
|
2
|
+
from typing import Literal
|
|
3
3
|
|
|
4
4
|
from pydantic import HttpUrl
|
|
5
5
|
|
|
@@ -32,7 +32,7 @@ class ImageUpscale(VeniceImageUpscaleBaseTool):
|
|
|
32
32
|
self,
|
|
33
33
|
image_url: HttpUrl,
|
|
34
34
|
scale: Literal[2, 4],
|
|
35
|
-
replication:
|
|
35
|
+
replication: float | None = 0.35,
|
|
36
36
|
**kwargs,
|
|
37
37
|
) -> dict:
|
|
38
38
|
"""
|
|
@@ -41,7 +41,7 @@ class ImageUpscale(VeniceImageUpscaleBaseTool):
|
|
|
41
41
|
Args:
|
|
42
42
|
image_url (HttpUrl): The public URL of the image to upscale.
|
|
43
43
|
scale (Literal[2, 4]): The scale factor for upscaling (2x or 4x).
|
|
44
|
-
replication (
|
|
44
|
+
replication (float | None): The replication factor for the upscale process, defaults to 0.35.
|
|
45
45
|
config (RunnableConfig, optional): Configuration for the runnable, if any.
|
|
46
46
|
**kwargs: Additional keyword arguments.
|
|
47
47
|
|
|
@@ -1,23 +1,21 @@
|
|
|
1
|
-
from
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
class
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
description="A description of what the image upscaling tool does."
|
|
23
|
-
)
|
|
1
|
+
from pydantic import BaseModel, Field
|
|
2
|
+
|
|
3
|
+
# Import the generic base and shared input
|
|
4
|
+
from intentkit.skills.venice_image.base import VeniceImageBaseTool
|
|
5
|
+
from intentkit.skills.venice_image.image_upscale.image_upscale_input import (
|
|
6
|
+
VeniceImageUpscaleInput,
|
|
7
|
+
)
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class VeniceImageUpscaleBaseTool(VeniceImageBaseTool):
|
|
11
|
+
"""
|
|
12
|
+
Base class for Venice AI *Image Upscaling* tools.
|
|
13
|
+
Inherits from VeniceAIBaseTool and handles specifics of the
|
|
14
|
+
/image/upscale endpoint
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
args_schema: type[BaseModel] = VeniceImageUpscaleInput
|
|
18
|
+
name: str = Field(description="The unique name of the image upscaling tool.")
|
|
19
|
+
description: str = Field(
|
|
20
|
+
description="A description of what the image upscaling tool does."
|
|
21
|
+
)
|
|
@@ -1,22 +1,22 @@
|
|
|
1
|
-
from typing import Literal
|
|
2
|
-
|
|
3
|
-
from pydantic import BaseModel, Field, HttpUrl
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
class VeniceImageUpscaleInput(BaseModel):
|
|
7
|
-
"""Input for the Image Upscale tool."""
|
|
8
|
-
|
|
9
|
-
image_url: HttpUrl = Field(
|
|
10
|
-
description="The URL of the image to upscale. Must be a publicly accessible URL.",
|
|
11
|
-
)
|
|
12
|
-
replication:
|
|
13
|
-
default=0.35,
|
|
14
|
-
description=(
|
|
15
|
-
'How strongly lines and noise in the base image are preserved. Higher values are noisier but less plastic/AI "generated"/hallucinated. Must be between 0.1 and 1.'
|
|
16
|
-
"Required range: 0.1 <= x <= 1"
|
|
17
|
-
),
|
|
18
|
-
)
|
|
19
|
-
scale: Literal[2, 4] = Field(
|
|
20
|
-
default=2,
|
|
21
|
-
description="The factor by which to upscale the image (either 2 or 4). Defaults to 2.",
|
|
22
|
-
)
|
|
1
|
+
from typing import Literal
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel, Field, HttpUrl
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class VeniceImageUpscaleInput(BaseModel):
|
|
7
|
+
"""Input for the Image Upscale tool."""
|
|
8
|
+
|
|
9
|
+
image_url: HttpUrl = Field(
|
|
10
|
+
description="The URL of the image to upscale. Must be a publicly accessible URL.",
|
|
11
|
+
)
|
|
12
|
+
replication: float | None = Field(
|
|
13
|
+
default=0.35,
|
|
14
|
+
description=(
|
|
15
|
+
'How strongly lines and noise in the base image are preserved. Higher values are noisier but less plastic/AI "generated"/hallucinated. Must be between 0.1 and 1.'
|
|
16
|
+
"Required range: 0.1 <= x <= 1"
|
|
17
|
+
),
|
|
18
|
+
)
|
|
19
|
+
scale: Literal[2, 4] = Field(
|
|
20
|
+
default=2,
|
|
21
|
+
description="The factor by which to upscale the image (either 2 or 4). Defaults to 2.",
|
|
22
|
+
)
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import logging
|
|
2
|
-
from typing import Any
|
|
2
|
+
from typing import Any
|
|
3
3
|
|
|
4
4
|
from pydantic import BaseModel, HttpUrl
|
|
5
5
|
|
|
@@ -27,7 +27,7 @@ class ImageVision(VeniceImageVisionBaseTool):
|
|
|
27
27
|
"Provide the public URL of the image to describe.\n"
|
|
28
28
|
"Returns a descriptive text of the image."
|
|
29
29
|
)
|
|
30
|
-
args_schema:
|
|
30
|
+
args_schema: type[BaseModel] = VeniceImageVision
|
|
31
31
|
# No model_id needed for the generic vision endpoint currently
|
|
32
32
|
|
|
33
33
|
async def _arun(
|
|
@@ -1,17 +1,17 @@
|
|
|
1
|
-
from pydantic import Field
|
|
2
|
-
|
|
3
|
-
# Import the generic base and shared input
|
|
4
|
-
from intentkit.skills.venice_image.base import VeniceImageBaseTool
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
class VeniceImageVisionBaseTool(VeniceImageBaseTool):
|
|
8
|
-
"""
|
|
9
|
-
Base class for Venice AI *Image Vision* tools.
|
|
10
|
-
Inherits from VeniceAIBaseTool and handles specifics of the
|
|
11
|
-
/chat/completions endpoint.
|
|
12
|
-
"""
|
|
13
|
-
|
|
14
|
-
name: str = Field(description="The unique name of the image vision tool.")
|
|
15
|
-
description: str = Field(
|
|
16
|
-
description="A description of what the image vision tool does."
|
|
17
|
-
)
|
|
1
|
+
from pydantic import Field
|
|
2
|
+
|
|
3
|
+
# Import the generic base and shared input
|
|
4
|
+
from intentkit.skills.venice_image.base import VeniceImageBaseTool
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class VeniceImageVisionBaseTool(VeniceImageBaseTool):
|
|
8
|
+
"""
|
|
9
|
+
Base class for Venice AI *Image Vision* tools.
|
|
10
|
+
Inherits from VeniceAIBaseTool and handles specifics of the
|
|
11
|
+
/chat/completions endpoint.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
name: str = Field(description="The unique name of the image vision tool.")
|
|
15
|
+
description: str = Field(
|
|
16
|
+
description="A description of what the image vision tool does."
|
|
17
|
+
)
|
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
from pydantic import BaseModel, Field, HttpUrl
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
class VeniceImageVision(BaseModel):
|
|
5
|
-
"""Input for the Image Vision tool."""
|
|
6
|
-
|
|
7
|
-
image_url: HttpUrl = Field(
|
|
8
|
-
description="The URL of the image to to be described by the Vision model. Must be a publicly accessible URL.",
|
|
9
|
-
)
|
|
1
|
+
from pydantic import BaseModel, Field, HttpUrl
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class VeniceImageVision(BaseModel):
|
|
5
|
+
"""Input for the Image Vision tool."""
|
|
6
|
+
|
|
7
|
+
image_url: HttpUrl = Field(
|
|
8
|
+
description="The URL of the image to to be described by the Vision model. Must be a publicly accessible URL.",
|
|
9
|
+
)
|
|
@@ -1,78 +1,77 @@
|
|
|
1
|
-
import base64
|
|
2
|
-
import io
|
|
3
|
-
import logging
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
import
|
|
7
|
-
import
|
|
8
|
-
from
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
response
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
msg
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
)
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
msg =
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
msg
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
msg
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
msg =
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
return base64.b64encode(image_bytes).decode("utf-8")
|
|
1
|
+
import base64
|
|
2
|
+
import io
|
|
3
|
+
import logging
|
|
4
|
+
|
|
5
|
+
import filetype
|
|
6
|
+
import httpx
|
|
7
|
+
from PIL import Image
|
|
8
|
+
from pydantic import HttpUrl
|
|
9
|
+
|
|
10
|
+
from intentkit.skills.base import ToolException
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
async def fetch_image_as_bytes(image_url: HttpUrl) -> bytes:
|
|
16
|
+
"""Fetches image bytes from a given URL. Converts unsupported formats to PNG using Pillow.
|
|
17
|
+
|
|
18
|
+
Raises:
|
|
19
|
+
ToolException: If fetching or converting the image fails.
|
|
20
|
+
"""
|
|
21
|
+
try:
|
|
22
|
+
async with httpx.AsyncClient(timeout=90) as client:
|
|
23
|
+
response = await client.get(str(image_url), follow_redirects=True)
|
|
24
|
+
response.raise_for_status()
|
|
25
|
+
|
|
26
|
+
original_bytes = response.content
|
|
27
|
+
|
|
28
|
+
# Guess file type from content
|
|
29
|
+
kind = filetype.guess(original_bytes)
|
|
30
|
+
detected_ext = kind.extension if kind else None
|
|
31
|
+
detected_mime = kind.mime if kind else "unknown"
|
|
32
|
+
|
|
33
|
+
if not detected_ext or not detected_mime.startswith("image/"):
|
|
34
|
+
msg = f"URL {image_url} did not return a recognizable image format. Detected: {detected_mime}"
|
|
35
|
+
logger.error(msg)
|
|
36
|
+
raise ToolException(msg)
|
|
37
|
+
|
|
38
|
+
if detected_ext in ("jpg", "jpeg", "png"):
|
|
39
|
+
return original_bytes
|
|
40
|
+
|
|
41
|
+
# Convert unsupported image to PNG
|
|
42
|
+
try:
|
|
43
|
+
img = Image.open(io.BytesIO(original_bytes)).convert("RGBA")
|
|
44
|
+
with io.BytesIO() as output:
|
|
45
|
+
img.save(output, format="PNG")
|
|
46
|
+
logger.info(
|
|
47
|
+
f"Converted unsupported image type '{detected_ext}' to PNG."
|
|
48
|
+
)
|
|
49
|
+
return output.getvalue()
|
|
50
|
+
except Exception as e:
|
|
51
|
+
msg = f"Failed to convert image ({detected_ext}) to PNG: {e}"
|
|
52
|
+
logger.error(msg, exc_info=True)
|
|
53
|
+
raise ToolException(msg) from e
|
|
54
|
+
|
|
55
|
+
except httpx.HTTPStatusError as e:
|
|
56
|
+
msg = f"HTTP error fetching image {image_url}: Status {e.response.status_code}"
|
|
57
|
+
logger.error(msg)
|
|
58
|
+
raise ToolException(msg) from e
|
|
59
|
+
except httpx.RequestError as e:
|
|
60
|
+
msg = f"Network error fetching image {image_url}: {e}"
|
|
61
|
+
logger.error(msg)
|
|
62
|
+
raise ToolException(msg) from e
|
|
63
|
+
except Exception as e:
|
|
64
|
+
msg = f"Unexpected error fetching image {image_url}: {e}"
|
|
65
|
+
logger.error(msg, exc_info=True)
|
|
66
|
+
raise ToolException(msg) from e
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
async def fetch_image_as_base64(image_url: HttpUrl) -> str | None:
|
|
70
|
+
"""Fetches an image from the URL and returns the image as a Base64-encoded string."""
|
|
71
|
+
image_bytes = await fetch_image_as_bytes(image_url)
|
|
72
|
+
|
|
73
|
+
if image_bytes is None:
|
|
74
|
+
return None
|
|
75
|
+
|
|
76
|
+
# Convert image bytes to a Base64-encoded string
|
|
77
|
+
return base64.b64encode(image_bytes).decode("utf-8")
|
|
@@ -1,5 +1,3 @@
|
|
|
1
|
-
from typing import Type
|
|
2
|
-
|
|
3
1
|
from langchain_core.tools.base import ToolException
|
|
4
2
|
from pydantic import BaseModel, Field
|
|
5
3
|
|
|
@@ -12,7 +10,7 @@ class WebScraperBaseTool(IntentKitSkill):
|
|
|
12
10
|
|
|
13
11
|
name: str = Field(description="The name of the tool")
|
|
14
12
|
description: str = Field(description="A description of what the tool does")
|
|
15
|
-
args_schema:
|
|
13
|
+
args_schema: type[BaseModel]
|
|
16
14
|
|
|
17
15
|
@property
|
|
18
16
|
def category(self) -> str:
|
|
@@ -1,5 +1,4 @@
|
|
|
1
1
|
import logging
|
|
2
|
-
from typing import Type
|
|
3
2
|
|
|
4
3
|
from pydantic import BaseModel, Field
|
|
5
4
|
|
|
@@ -65,7 +64,7 @@ class DocumentIndexer(WebScraperBaseTool):
|
|
|
65
64
|
"Perfect for adding content from Google Docs, Notion pages, PDFs, or any other document sources. "
|
|
66
65
|
"The indexed content can then be queried using the query_indexed_content tool."
|
|
67
66
|
)
|
|
68
|
-
args_schema:
|
|
67
|
+
args_schema: type[BaseModel] = DocumentIndexerInput
|
|
69
68
|
|
|
70
69
|
async def _arun(
|
|
71
70
|
self,
|
|
@@ -1,5 +1,4 @@
|
|
|
1
1
|
import logging
|
|
2
|
-
from typing import List, Type
|
|
3
2
|
|
|
4
3
|
from pydantic import BaseModel, Field
|
|
5
4
|
|
|
@@ -19,7 +18,7 @@ logger = logging.getLogger(__name__)
|
|
|
19
18
|
class ScrapeAndIndexInput(BaseModel):
|
|
20
19
|
"""Input for ScrapeAndIndex tool."""
|
|
21
20
|
|
|
22
|
-
urls:
|
|
21
|
+
urls: list[str] = Field(
|
|
23
22
|
description="List of URLs to scrape and index. Each URL should be a valid web address starting with http:// or https://",
|
|
24
23
|
min_items=1,
|
|
25
24
|
max_items=25,
|
|
@@ -67,11 +66,11 @@ class ScrapeAndIndex(WebScraperBaseTool):
|
|
|
67
66
|
"Use this tool to collect and index web content that you want to reference later.\n"
|
|
68
67
|
"The indexed content can then be queried using the query_indexed_content tool."
|
|
69
68
|
)
|
|
70
|
-
args_schema:
|
|
69
|
+
args_schema: type[BaseModel] = ScrapeAndIndexInput
|
|
71
70
|
|
|
72
71
|
async def _arun(
|
|
73
72
|
self,
|
|
74
|
-
urls:
|
|
73
|
+
urls: list[str],
|
|
75
74
|
chunk_size: int = DEFAULT_CHUNK_SIZE,
|
|
76
75
|
chunk_overlap: int = DEFAULT_CHUNK_OVERLAP,
|
|
77
76
|
**kwargs,
|
|
@@ -171,7 +170,7 @@ class QueryIndexedContent(WebScraperBaseTool):
|
|
|
171
170
|
"Use this tool to search through content that was previously scraped and indexed.\n"
|
|
172
171
|
"This tool can help answer questions based on the indexed web content."
|
|
173
172
|
)
|
|
174
|
-
args_schema:
|
|
173
|
+
args_schema: type[BaseModel] = QueryIndexInput
|
|
175
174
|
|
|
176
175
|
async def _arun(
|
|
177
176
|
self,
|
|
@@ -10,7 +10,6 @@ import base64
|
|
|
10
10
|
import logging
|
|
11
11
|
import os
|
|
12
12
|
import tempfile
|
|
13
|
-
from typing import Dict, List, Optional, Tuple
|
|
14
13
|
|
|
15
14
|
from langchain_community.vectorstores import FAISS
|
|
16
15
|
from langchain_core.documents import Document
|
|
@@ -63,7 +62,7 @@ METADATA_KEY_PREFIX = "indexed_urls"
|
|
|
63
62
|
class VectorStoreManager:
|
|
64
63
|
"""Manages vector store operations including creation, saving, loading, and merging."""
|
|
65
64
|
|
|
66
|
-
def __init__(self, embedding_api_key:
|
|
65
|
+
def __init__(self, embedding_api_key: str | None = None):
|
|
67
66
|
self._embedding_api_key = embedding_api_key
|
|
68
67
|
|
|
69
68
|
def _resolve_api_key(self) -> str:
|
|
@@ -79,13 +78,13 @@ class VectorStoreManager:
|
|
|
79
78
|
api_key = self._resolve_api_key()
|
|
80
79
|
return OpenAIEmbeddings(api_key=api_key)
|
|
81
80
|
|
|
82
|
-
def get_storage_keys(self, agent_id: str) ->
|
|
81
|
+
def get_storage_keys(self, agent_id: str) -> tuple[str, str]:
|
|
83
82
|
"""Get storage keys for vector store and metadata."""
|
|
84
83
|
vector_store_key = f"{VECTOR_STORE_KEY_PREFIX}_{agent_id}"
|
|
85
84
|
metadata_key = f"{METADATA_KEY_PREFIX}_{agent_id}"
|
|
86
85
|
return vector_store_key, metadata_key
|
|
87
86
|
|
|
88
|
-
def encode_vector_store(self, vector_store: FAISS) ->
|
|
87
|
+
def encode_vector_store(self, vector_store: FAISS) -> dict[str, str]:
|
|
89
88
|
"""Encode FAISS vector store to base64 for storage."""
|
|
90
89
|
with tempfile.TemporaryDirectory() as temp_dir:
|
|
91
90
|
vector_store.save_local(temp_dir)
|
|
@@ -102,7 +101,7 @@ class VectorStoreManager:
|
|
|
102
101
|
return encoded_files
|
|
103
102
|
|
|
104
103
|
def decode_vector_store(
|
|
105
|
-
self, encoded_files:
|
|
104
|
+
self, encoded_files: dict[str, str], embeddings: OpenAIEmbeddings
|
|
106
105
|
) -> FAISS:
|
|
107
106
|
"""Decode base64 files back to FAISS vector store."""
|
|
108
107
|
with tempfile.TemporaryDirectory() as temp_dir:
|
|
@@ -119,18 +118,18 @@ class VectorStoreManager:
|
|
|
119
118
|
allow_dangerous_deserialization=True,
|
|
120
119
|
)
|
|
121
120
|
|
|
122
|
-
async def get_existing_vector_store(self, agent_id: str) ->
|
|
121
|
+
async def get_existing_vector_store(self, agent_id: str) -> dict | None:
|
|
123
122
|
"""Get existing vector store data if it exists."""
|
|
124
123
|
vector_store_key, _ = self.get_storage_keys(agent_id)
|
|
125
124
|
return await AgentSkillData.get(agent_id, "web_scraper", vector_store_key)
|
|
126
125
|
|
|
127
126
|
async def merge_with_existing(
|
|
128
127
|
self,
|
|
129
|
-
new_documents:
|
|
128
|
+
new_documents: list[Document],
|
|
130
129
|
agent_id: str,
|
|
131
130
|
chunk_size: int = DEFAULT_CHUNK_SIZE,
|
|
132
131
|
chunk_overlap: int = DEFAULT_CHUNK_OVERLAP,
|
|
133
|
-
) ->
|
|
132
|
+
) -> tuple[FAISS, bool]:
|
|
134
133
|
"""
|
|
135
134
|
Merge new documents with existing vector store or create new one.
|
|
136
135
|
|
|
@@ -205,7 +204,7 @@ class VectorStoreManager:
|
|
|
205
204
|
logger.error(f"[{agent_id}] Failed to save vector store: {e}")
|
|
206
205
|
raise
|
|
207
206
|
|
|
208
|
-
async def load_vector_store(self, agent_id: str) ->
|
|
207
|
+
async def load_vector_store(self, agent_id: str) -> FAISS | None:
|
|
209
208
|
"""Load vector store for an agent."""
|
|
210
209
|
stored_data = await self.get_existing_vector_store(agent_id)
|
|
211
210
|
|
|
@@ -250,10 +249,10 @@ class DocumentProcessor:
|
|
|
250
249
|
|
|
251
250
|
@staticmethod
|
|
252
251
|
def create_chunks(
|
|
253
|
-
documents:
|
|
252
|
+
documents: list[Document],
|
|
254
253
|
chunk_size: int = DEFAULT_CHUNK_SIZE,
|
|
255
254
|
chunk_overlap: int = DEFAULT_CHUNK_OVERLAP,
|
|
256
|
-
) ->
|
|
255
|
+
) -> list[Document]:
|
|
257
256
|
"""Split documents into chunks."""
|
|
258
257
|
text_splitter = RecursiveCharacterTextSplitter(
|
|
259
258
|
chunk_size=chunk_size,
|
|
@@ -292,7 +291,7 @@ class DocumentProcessor:
|
|
|
292
291
|
title: str,
|
|
293
292
|
source: str,
|
|
294
293
|
tags: str = "",
|
|
295
|
-
extra_metadata:
|
|
294
|
+
extra_metadata: dict | None = None,
|
|
296
295
|
) -> Document:
|
|
297
296
|
"""Create a Document with standardized metadata."""
|
|
298
297
|
cleaned_content = DocumentProcessor.clean_text(content)
|
|
@@ -324,18 +323,18 @@ class MetadataManager:
|
|
|
324
323
|
def __init__(self, vector_manager: VectorStoreManager):
|
|
325
324
|
self._vector_manager = vector_manager
|
|
326
325
|
|
|
327
|
-
async def get_existing_metadata(self, agent_id: str) ->
|
|
326
|
+
async def get_existing_metadata(self, agent_id: str) -> dict:
|
|
328
327
|
"""Get existing metadata for an agent."""
|
|
329
328
|
_, metadata_key = self._vector_manager.get_storage_keys(agent_id)
|
|
330
329
|
return await AgentSkillData.get(agent_id, "web_scraper", metadata_key) or {}
|
|
331
330
|
|
|
332
331
|
def create_url_metadata(
|
|
333
332
|
self,
|
|
334
|
-
urls:
|
|
335
|
-
split_docs:
|
|
333
|
+
urls: list[str],
|
|
334
|
+
split_docs: list[Document],
|
|
336
335
|
source_type: str = "web_scraper",
|
|
337
|
-
extra_fields:
|
|
338
|
-
) ->
|
|
336
|
+
extra_fields: dict | None = None,
|
|
337
|
+
) -> dict:
|
|
339
338
|
"""Create metadata for a list of URLs."""
|
|
340
339
|
metadata = {}
|
|
341
340
|
current_time = str(asyncio.get_event_loop().time())
|
|
@@ -361,9 +360,9 @@ class MetadataManager:
|
|
|
361
360
|
title: str,
|
|
362
361
|
source: str,
|
|
363
362
|
tags: str,
|
|
364
|
-
split_docs:
|
|
363
|
+
split_docs: list[Document],
|
|
365
364
|
document_length: int,
|
|
366
|
-
) ->
|
|
365
|
+
) -> dict:
|
|
367
366
|
"""Create metadata for a document."""
|
|
368
367
|
# Generate unique key
|
|
369
368
|
key = f"document_{title.lower().replace(' ', '_')}"
|
|
@@ -382,7 +381,7 @@ class MetadataManager:
|
|
|
382
381
|
}
|
|
383
382
|
}
|
|
384
383
|
|
|
385
|
-
async def update_metadata(self, agent_id: str, new_metadata:
|
|
384
|
+
async def update_metadata(self, agent_id: str, new_metadata: dict) -> None:
|
|
386
385
|
"""Update metadata for an agent."""
|
|
387
386
|
_, metadata_key = self._vector_manager.get_storage_keys(agent_id)
|
|
388
387
|
|
|
@@ -408,12 +407,12 @@ class ResponseFormatter:
|
|
|
408
407
|
@staticmethod
|
|
409
408
|
def format_indexing_response(
|
|
410
409
|
operation_type: str,
|
|
411
|
-
urls_or_content:
|
|
410
|
+
urls_or_content: list[str] | str,
|
|
412
411
|
total_chunks: int,
|
|
413
412
|
chunk_size: int,
|
|
414
413
|
chunk_overlap: int,
|
|
415
414
|
was_merged: bool,
|
|
416
|
-
extra_info:
|
|
415
|
+
extra_info: dict | None = None,
|
|
417
416
|
current_size_bytes: int = 0,
|
|
418
417
|
size_limit_reached: bool = False,
|
|
419
418
|
total_requested_urls: int = 0,
|
|
@@ -481,13 +480,13 @@ class ResponseFormatter:
|
|
|
481
480
|
|
|
482
481
|
|
|
483
482
|
async def scrape_and_index_urls(
|
|
484
|
-
urls:
|
|
483
|
+
urls: list[str],
|
|
485
484
|
agent_id: str,
|
|
486
485
|
vector_manager: VectorStoreManager,
|
|
487
486
|
chunk_size: int = DEFAULT_CHUNK_SIZE,
|
|
488
487
|
chunk_overlap: int = DEFAULT_CHUNK_OVERLAP,
|
|
489
488
|
requests_per_second: int = DEFAULT_REQUESTS_PER_SECOND,
|
|
490
|
-
) ->
|
|
489
|
+
) -> tuple[int, bool, list[str]]:
|
|
491
490
|
"""
|
|
492
491
|
Scrape URLs and index their content into vector store with size limits.
|
|
493
492
|
|
|
@@ -636,12 +635,12 @@ async def scrape_and_index_urls(
|
|
|
636
635
|
|
|
637
636
|
# Convenience function that combines all operations
|
|
638
637
|
async def index_documents(
|
|
639
|
-
documents:
|
|
638
|
+
documents: list[Document],
|
|
640
639
|
agent_id: str,
|
|
641
640
|
vector_manager: VectorStoreManager,
|
|
642
641
|
chunk_size: int = DEFAULT_CHUNK_SIZE,
|
|
643
642
|
chunk_overlap: int = DEFAULT_CHUNK_OVERLAP,
|
|
644
|
-
) ->
|
|
643
|
+
) -> tuple[int, bool]:
|
|
645
644
|
"""
|
|
646
645
|
Complete document indexing workflow.
|
|
647
646
|
|