intentkit 0.7.5.dev3__py3-none-any.whl → 0.8.34.dev7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- intentkit/MANIFEST.in +14 -0
- intentkit/README.md +88 -0
- intentkit/__init__.py +6 -4
- intentkit/abstracts/agent.py +4 -5
- intentkit/abstracts/engine.py +5 -5
- intentkit/abstracts/graph.py +15 -8
- intentkit/abstracts/skill.py +6 -144
- intentkit/abstracts/twitter.py +4 -5
- intentkit/clients/__init__.py +9 -2
- intentkit/clients/cdp.py +129 -153
- intentkit/{utils → clients}/s3.py +109 -34
- intentkit/clients/twitter.py +83 -62
- intentkit/clients/web3.py +4 -7
- intentkit/config/config.py +123 -90
- intentkit/core/account_checking.py +802 -0
- intentkit/core/agent.py +313 -498
- intentkit/core/asset.py +267 -0
- intentkit/core/chat.py +5 -3
- intentkit/core/client.py +1 -1
- intentkit/core/credit.py +49 -41
- intentkit/core/draft.py +201 -0
- intentkit/core/draft_chat.py +118 -0
- intentkit/core/engine.py +378 -287
- intentkit/core/manager/__init__.py +25 -0
- intentkit/core/manager/engine.py +220 -0
- intentkit/core/manager/service.py +172 -0
- intentkit/core/manager/skills.py +178 -0
- intentkit/core/middleware.py +231 -0
- intentkit/core/prompt.py +74 -114
- intentkit/core/scheduler.py +143 -0
- intentkit/core/statistics.py +168 -0
- intentkit/models/agent.py +931 -518
- intentkit/models/agent_data.py +165 -106
- intentkit/models/agent_schema.json +38 -251
- intentkit/models/app_setting.py +15 -13
- intentkit/models/chat.py +86 -140
- intentkit/models/credit.py +182 -162
- intentkit/models/db.py +42 -23
- intentkit/models/db_mig.py +120 -3
- intentkit/models/draft.py +222 -0
- intentkit/models/llm.csv +31 -0
- intentkit/models/llm.py +262 -370
- intentkit/models/redis.py +6 -4
- intentkit/models/skill.py +222 -101
- intentkit/models/skills.csv +173 -0
- intentkit/models/team.py +189 -0
- intentkit/models/user.py +103 -31
- intentkit/skills/acolyt/__init__.py +2 -9
- intentkit/skills/acolyt/ask.py +3 -4
- intentkit/skills/acolyt/base.py +4 -9
- intentkit/skills/acolyt/schema.json +4 -3
- intentkit/skills/aixbt/__init__.py +2 -13
- intentkit/skills/aixbt/base.py +1 -7
- intentkit/skills/aixbt/projects.py +14 -15
- intentkit/skills/aixbt/schema.json +4 -4
- intentkit/skills/allora/__init__.py +2 -9
- intentkit/skills/allora/base.py +4 -9
- intentkit/skills/allora/price.py +3 -4
- intentkit/skills/allora/schema.json +3 -2
- intentkit/skills/base.py +241 -41
- intentkit/skills/basename/__init__.py +51 -0
- intentkit/skills/basename/base.py +11 -0
- intentkit/skills/basename/basename.svg +11 -0
- intentkit/skills/basename/schema.json +58 -0
- intentkit/skills/carv/__init__.py +115 -121
- intentkit/skills/carv/base.py +184 -185
- intentkit/skills/carv/fetch_news.py +3 -3
- intentkit/skills/carv/onchain_query.py +4 -4
- intentkit/skills/carv/schema.json +134 -137
- intentkit/skills/carv/token_info_and_price.py +6 -6
- intentkit/skills/casino/__init__.py +4 -15
- intentkit/skills/casino/base.py +1 -7
- intentkit/skills/casino/deck_draw.py +5 -8
- intentkit/skills/casino/deck_shuffle.py +6 -6
- intentkit/skills/casino/dice_roll.py +2 -4
- intentkit/skills/casino/schema.json +0 -1
- intentkit/skills/cdp/__init__.py +22 -84
- intentkit/skills/cdp/base.py +1 -7
- intentkit/skills/cdp/schema.json +11 -314
- intentkit/skills/chainlist/__init__.py +2 -7
- intentkit/skills/chainlist/base.py +1 -7
- intentkit/skills/chainlist/chain_lookup.py +18 -18
- intentkit/skills/chainlist/schema.json +3 -5
- intentkit/skills/common/__init__.py +2 -9
- intentkit/skills/common/base.py +1 -7
- intentkit/skills/common/current_time.py +1 -2
- intentkit/skills/common/schema.json +2 -2
- intentkit/skills/cookiefun/__init__.py +6 -9
- intentkit/skills/cookiefun/base.py +2 -7
- intentkit/skills/cookiefun/get_account_details.py +7 -7
- intentkit/skills/cookiefun/get_account_feed.py +19 -19
- intentkit/skills/cookiefun/get_account_smart_followers.py +7 -7
- intentkit/skills/cookiefun/get_sectors.py +3 -3
- intentkit/skills/cookiefun/schema.json +1 -3
- intentkit/skills/cookiefun/search_accounts.py +9 -9
- intentkit/skills/cryptocompare/__init__.py +7 -24
- intentkit/skills/cryptocompare/api.py +2 -3
- intentkit/skills/cryptocompare/base.py +10 -24
- intentkit/skills/cryptocompare/fetch_news.py +4 -5
- intentkit/skills/cryptocompare/fetch_price.py +6 -7
- intentkit/skills/cryptocompare/fetch_top_exchanges.py +4 -5
- intentkit/skills/cryptocompare/fetch_top_market_cap.py +4 -5
- intentkit/skills/cryptocompare/fetch_top_volume.py +4 -5
- intentkit/skills/cryptocompare/fetch_trading_signals.py +5 -6
- intentkit/skills/cryptocompare/schema.json +3 -3
- intentkit/skills/cryptopanic/__init__.py +7 -10
- intentkit/skills/cryptopanic/base.py +51 -55
- intentkit/skills/cryptopanic/fetch_crypto_news.py +4 -8
- intentkit/skills/cryptopanic/fetch_crypto_sentiment.py +5 -7
- intentkit/skills/cryptopanic/schema.json +105 -103
- intentkit/skills/dapplooker/__init__.py +2 -9
- intentkit/skills/dapplooker/base.py +4 -9
- intentkit/skills/dapplooker/dapplooker_token_data.py +7 -7
- intentkit/skills/dapplooker/schema.json +3 -5
- intentkit/skills/defillama/__init__.py +24 -74
- intentkit/skills/defillama/api.py +6 -9
- intentkit/skills/defillama/base.py +8 -19
- intentkit/skills/defillama/coins/fetch_batch_historical_prices.py +8 -10
- intentkit/skills/defillama/coins/fetch_block.py +6 -8
- intentkit/skills/defillama/coins/fetch_current_prices.py +8 -10
- intentkit/skills/defillama/coins/fetch_first_price.py +7 -9
- intentkit/skills/defillama/coins/fetch_historical_prices.py +9 -11
- intentkit/skills/defillama/coins/fetch_price_chart.py +9 -11
- intentkit/skills/defillama/coins/fetch_price_percentage.py +7 -9
- intentkit/skills/defillama/config/chains.py +1 -3
- intentkit/skills/defillama/fees/fetch_fees_overview.py +24 -26
- intentkit/skills/defillama/schema.json +5 -1
- intentkit/skills/defillama/stablecoins/fetch_stablecoin_chains.py +16 -18
- intentkit/skills/defillama/stablecoins/fetch_stablecoin_charts.py +8 -10
- intentkit/skills/defillama/stablecoins/fetch_stablecoin_prices.py +5 -7
- intentkit/skills/defillama/stablecoins/fetch_stablecoins.py +7 -9
- intentkit/skills/defillama/tests/api_integration.test.py +1 -1
- intentkit/skills/defillama/tvl/fetch_chain_historical_tvl.py +4 -6
- intentkit/skills/defillama/tvl/fetch_chains.py +9 -11
- intentkit/skills/defillama/tvl/fetch_historical_tvl.py +4 -6
- intentkit/skills/defillama/tvl/fetch_protocol.py +32 -38
- intentkit/skills/defillama/tvl/fetch_protocol_current_tvl.py +3 -5
- intentkit/skills/defillama/tvl/fetch_protocols.py +37 -45
- intentkit/skills/defillama/volumes/fetch_dex_overview.py +42 -48
- intentkit/skills/defillama/volumes/fetch_dex_summary.py +35 -37
- intentkit/skills/defillama/volumes/fetch_options_overview.py +24 -28
- intentkit/skills/defillama/yields/fetch_pool_chart.py +10 -12
- intentkit/skills/defillama/yields/fetch_pools.py +26 -30
- intentkit/skills/dexscreener/__init__.py +97 -102
- intentkit/skills/dexscreener/base.py +125 -130
- intentkit/skills/dexscreener/get_pair_info.py +4 -5
- intentkit/skills/dexscreener/get_token_pairs.py +4 -5
- intentkit/skills/dexscreener/get_tokens_info.py +7 -8
- intentkit/skills/dexscreener/model/search_token_response.py +80 -82
- intentkit/skills/dexscreener/schema.json +91 -93
- intentkit/skills/dexscreener/search_token.py +182 -184
- intentkit/skills/dexscreener/utils.py +15 -14
- intentkit/skills/dune_analytics/__init__.py +7 -9
- intentkit/skills/dune_analytics/base.py +48 -52
- intentkit/skills/dune_analytics/fetch_kol_buys.py +5 -7
- intentkit/skills/dune_analytics/fetch_nation_metrics.py +6 -8
- intentkit/skills/dune_analytics/schema.json +104 -99
- intentkit/skills/elfa/__init__.py +5 -18
- intentkit/skills/elfa/base.py +10 -14
- intentkit/skills/elfa/mention.py +19 -21
- intentkit/skills/elfa/schema.json +3 -2
- intentkit/skills/elfa/stats.py +4 -4
- intentkit/skills/elfa/tokens.py +12 -12
- intentkit/skills/elfa/utils.py +26 -28
- intentkit/skills/enso/__init__.py +11 -31
- intentkit/skills/enso/base.py +54 -35
- intentkit/skills/enso/best_yield.py +16 -24
- intentkit/skills/enso/networks.py +6 -11
- intentkit/skills/enso/prices.py +11 -13
- intentkit/skills/enso/route.py +34 -38
- intentkit/skills/enso/schema.json +3 -2
- intentkit/skills/enso/tokens.py +29 -38
- intentkit/skills/enso/wallet.py +76 -191
- intentkit/skills/erc20/__init__.py +50 -0
- intentkit/skills/erc20/base.py +11 -0
- intentkit/skills/erc20/erc20.svg +5 -0
- intentkit/skills/erc20/schema.json +74 -0
- intentkit/skills/erc721/__init__.py +53 -0
- intentkit/skills/erc721/base.py +11 -0
- intentkit/skills/erc721/erc721.svg +5 -0
- intentkit/skills/erc721/schema.json +90 -0
- intentkit/skills/firecrawl/__init__.py +5 -18
- intentkit/skills/firecrawl/base.py +4 -9
- intentkit/skills/firecrawl/clear.py +4 -8
- intentkit/skills/firecrawl/crawl.py +19 -19
- intentkit/skills/firecrawl/query.py +4 -3
- intentkit/skills/firecrawl/schema.json +2 -6
- intentkit/skills/firecrawl/scrape.py +17 -22
- intentkit/skills/firecrawl/utils.py +50 -42
- intentkit/skills/github/__init__.py +2 -7
- intentkit/skills/github/base.py +1 -7
- intentkit/skills/github/github_search.py +1 -2
- intentkit/skills/github/schema.json +3 -4
- intentkit/skills/heurist/__init__.py +8 -27
- intentkit/skills/heurist/base.py +4 -9
- intentkit/skills/heurist/image_generation_animagine_xl.py +13 -15
- intentkit/skills/heurist/image_generation_arthemy_comics.py +13 -15
- intentkit/skills/heurist/image_generation_arthemy_real.py +13 -15
- intentkit/skills/heurist/image_generation_braindance.py +13 -15
- intentkit/skills/heurist/image_generation_cyber_realistic_xl.py +13 -15
- intentkit/skills/heurist/image_generation_flux_1_dev.py +13 -15
- intentkit/skills/heurist/image_generation_sdxl.py +13 -15
- intentkit/skills/heurist/schema.json +2 -2
- intentkit/skills/http/__init__.py +4 -15
- intentkit/skills/http/base.py +1 -7
- intentkit/skills/http/get.py +21 -16
- intentkit/skills/http/post.py +23 -18
- intentkit/skills/http/put.py +23 -18
- intentkit/skills/http/schema.json +4 -5
- intentkit/skills/lifi/__init__.py +8 -13
- intentkit/skills/lifi/base.py +3 -9
- intentkit/skills/lifi/schema.json +17 -8
- intentkit/skills/lifi/token_execute.py +150 -60
- intentkit/skills/lifi/token_quote.py +8 -10
- intentkit/skills/lifi/utils.py +104 -51
- intentkit/skills/moralis/__init__.py +6 -10
- intentkit/skills/moralis/api.py +6 -7
- intentkit/skills/moralis/base.py +5 -10
- intentkit/skills/moralis/fetch_chain_portfolio.py +10 -11
- intentkit/skills/moralis/fetch_nft_portfolio.py +22 -22
- intentkit/skills/moralis/fetch_solana_portfolio.py +11 -12
- intentkit/skills/moralis/fetch_wallet_portfolio.py +8 -9
- intentkit/skills/moralis/schema.json +7 -2
- intentkit/skills/morpho/__init__.py +52 -0
- intentkit/skills/morpho/base.py +11 -0
- intentkit/skills/morpho/morpho.svg +12 -0
- intentkit/skills/morpho/schema.json +73 -0
- intentkit/skills/nation/__init__.py +4 -9
- intentkit/skills/nation/base.py +5 -10
- intentkit/skills/nation/nft_check.py +3 -4
- intentkit/skills/nation/schema.json +4 -3
- intentkit/skills/onchain.py +30 -0
- intentkit/skills/openai/__init__.py +17 -18
- intentkit/skills/openai/base.py +10 -14
- intentkit/skills/openai/dalle_image_generation.py +4 -9
- intentkit/skills/openai/gpt_avatar_generator.py +102 -0
- intentkit/skills/openai/gpt_image_generation.py +5 -9
- intentkit/skills/openai/gpt_image_mini_generator.py +92 -0
- intentkit/skills/openai/gpt_image_to_image.py +5 -9
- intentkit/skills/openai/image_to_text.py +3 -7
- intentkit/skills/openai/schema.json +34 -3
- intentkit/skills/portfolio/__init__.py +11 -35
- intentkit/skills/portfolio/base.py +33 -19
- intentkit/skills/portfolio/schema.json +3 -5
- intentkit/skills/portfolio/token_balances.py +21 -21
- intentkit/skills/portfolio/wallet_approvals.py +17 -18
- intentkit/skills/portfolio/wallet_defi_positions.py +3 -3
- intentkit/skills/portfolio/wallet_history.py +31 -31
- intentkit/skills/portfolio/wallet_net_worth.py +13 -13
- intentkit/skills/portfolio/wallet_nfts.py +19 -19
- intentkit/skills/portfolio/wallet_profitability.py +18 -18
- intentkit/skills/portfolio/wallet_profitability_summary.py +5 -5
- intentkit/skills/portfolio/wallet_stats.py +3 -3
- intentkit/skills/portfolio/wallet_swaps.py +19 -19
- intentkit/skills/pyth/__init__.py +50 -0
- intentkit/skills/pyth/base.py +11 -0
- intentkit/skills/pyth/pyth.svg +6 -0
- intentkit/skills/pyth/schema.json +75 -0
- intentkit/skills/skills.toml +36 -0
- intentkit/skills/slack/__init__.py +5 -17
- intentkit/skills/slack/base.py +3 -9
- intentkit/skills/slack/get_channel.py +8 -8
- intentkit/skills/slack/get_message.py +9 -9
- intentkit/skills/slack/schedule_message.py +5 -5
- intentkit/skills/slack/schema.json +2 -2
- intentkit/skills/slack/send_message.py +3 -5
- intentkit/skills/supabase/__init__.py +7 -23
- intentkit/skills/supabase/base.py +1 -7
- intentkit/skills/supabase/delete_data.py +4 -4
- intentkit/skills/supabase/fetch_data.py +12 -12
- intentkit/skills/supabase/insert_data.py +4 -4
- intentkit/skills/supabase/invoke_function.py +6 -6
- intentkit/skills/supabase/schema.json +2 -3
- intentkit/skills/supabase/update_data.py +6 -6
- intentkit/skills/supabase/upsert_data.py +4 -4
- intentkit/skills/superfluid/__init__.py +53 -0
- intentkit/skills/superfluid/base.py +11 -0
- intentkit/skills/superfluid/schema.json +89 -0
- intentkit/skills/superfluid/superfluid.svg +6 -0
- intentkit/skills/system/__init__.py +7 -24
- intentkit/skills/system/add_autonomous_task.py +10 -12
- intentkit/skills/system/delete_autonomous_task.py +2 -2
- intentkit/skills/system/edit_autonomous_task.py +14 -18
- intentkit/skills/system/list_autonomous_tasks.py +3 -5
- intentkit/skills/system/read_agent_api_key.py +6 -4
- intentkit/skills/system/regenerate_agent_api_key.py +6 -4
- intentkit/skills/system/schema.json +6 -8
- intentkit/skills/tavily/__init__.py +3 -12
- intentkit/skills/tavily/base.py +4 -9
- intentkit/skills/tavily/schema.json +3 -5
- intentkit/skills/tavily/tavily_extract.py +2 -4
- intentkit/skills/tavily/tavily_search.py +4 -6
- intentkit/skills/token/__init__.py +5 -10
- intentkit/skills/token/base.py +7 -11
- intentkit/skills/token/erc20_transfers.py +19 -19
- intentkit/skills/token/schema.json +3 -6
- intentkit/skills/token/token_analytics.py +3 -3
- intentkit/skills/token/token_price.py +13 -13
- intentkit/skills/token/token_search.py +9 -9
- intentkit/skills/twitter/__init__.py +11 -35
- intentkit/skills/twitter/base.py +22 -34
- intentkit/skills/twitter/follow_user.py +2 -6
- intentkit/skills/twitter/get_mentions.py +5 -12
- intentkit/skills/twitter/get_timeline.py +4 -12
- intentkit/skills/twitter/get_user_by_username.py +2 -6
- intentkit/skills/twitter/get_user_tweets.py +5 -13
- intentkit/skills/twitter/like_tweet.py +2 -6
- intentkit/skills/twitter/post_tweet.py +6 -9
- intentkit/skills/twitter/reply_tweet.py +6 -9
- intentkit/skills/twitter/retweet.py +2 -6
- intentkit/skills/twitter/schema.json +1 -0
- intentkit/skills/twitter/search_tweets.py +4 -12
- intentkit/skills/unrealspeech/__init__.py +2 -7
- intentkit/skills/unrealspeech/base.py +2 -8
- intentkit/skills/unrealspeech/schema.json +2 -5
- intentkit/skills/unrealspeech/text_to_speech.py +8 -8
- intentkit/skills/venice_audio/__init__.py +98 -106
- intentkit/skills/venice_audio/base.py +117 -121
- intentkit/skills/venice_audio/input.py +41 -41
- intentkit/skills/venice_audio/schema.json +151 -152
- intentkit/skills/venice_audio/venice_audio.py +38 -21
- intentkit/skills/venice_image/__init__.py +147 -154
- intentkit/skills/venice_image/api.py +138 -138
- intentkit/skills/venice_image/base.py +185 -192
- intentkit/skills/venice_image/config.py +33 -35
- intentkit/skills/venice_image/image_enhance/image_enhance.py +2 -3
- intentkit/skills/venice_image/image_enhance/image_enhance_base.py +21 -23
- intentkit/skills/venice_image/image_enhance/image_enhance_input.py +38 -40
- intentkit/skills/venice_image/image_generation/image_generation_base.py +11 -10
- intentkit/skills/venice_image/image_generation/image_generation_fluently_xl.py +26 -26
- intentkit/skills/venice_image/image_generation/image_generation_flux_dev.py +27 -27
- intentkit/skills/venice_image/image_generation/image_generation_flux_dev_uncensored.py +26 -26
- intentkit/skills/venice_image/image_generation/image_generation_input.py +158 -158
- intentkit/skills/venice_image/image_generation/image_generation_lustify_sdxl.py +26 -26
- intentkit/skills/venice_image/image_generation/image_generation_pony_realism.py +26 -26
- intentkit/skills/venice_image/image_generation/image_generation_stable_diffusion_3_5.py +28 -28
- intentkit/skills/venice_image/image_generation/image_generation_venice_sd35.py +28 -28
- intentkit/skills/venice_image/image_upscale/image_upscale.py +3 -3
- intentkit/skills/venice_image/image_upscale/image_upscale_base.py +21 -23
- intentkit/skills/venice_image/image_upscale/image_upscale_input.py +22 -22
- intentkit/skills/venice_image/image_vision/image_vision.py +2 -2
- intentkit/skills/venice_image/image_vision/image_vision_base.py +17 -17
- intentkit/skills/venice_image/image_vision/image_vision_input.py +9 -9
- intentkit/skills/venice_image/schema.json +267 -267
- intentkit/skills/venice_image/utils.py +77 -78
- intentkit/skills/web_scraper/__init__.py +5 -18
- intentkit/skills/web_scraper/base.py +21 -7
- intentkit/skills/web_scraper/document_indexer.py +7 -6
- intentkit/skills/web_scraper/schema.json +2 -6
- intentkit/skills/web_scraper/scrape_and_index.py +15 -15
- intentkit/skills/web_scraper/utils.py +62 -63
- intentkit/skills/web_scraper/website_indexer.py +17 -19
- intentkit/skills/weth/__init__.py +49 -0
- intentkit/skills/weth/base.py +11 -0
- intentkit/skills/weth/schema.json +58 -0
- intentkit/skills/weth/weth.svg +6 -0
- intentkit/skills/wow/__init__.py +51 -0
- intentkit/skills/wow/base.py +11 -0
- intentkit/skills/wow/schema.json +89 -0
- intentkit/skills/wow/wow.svg +7 -0
- intentkit/skills/x402/__init__.py +58 -0
- intentkit/skills/x402/base.py +99 -0
- intentkit/skills/x402/http_request.py +117 -0
- intentkit/skills/x402/schema.json +40 -0
- intentkit/skills/x402/x402.webp +0 -0
- intentkit/skills/xmtp/__init__.py +4 -15
- intentkit/skills/xmtp/base.py +5 -5
- intentkit/skills/xmtp/price.py +7 -6
- intentkit/skills/xmtp/schema.json +69 -71
- intentkit/skills/xmtp/swap.py +6 -8
- intentkit/skills/xmtp/transfer.py +4 -6
- intentkit/utils/__init__.py +4 -0
- intentkit/utils/chain.py +198 -96
- intentkit/utils/ens.py +135 -0
- intentkit/utils/error.py +5 -2
- intentkit/utils/logging.py +9 -11
- intentkit/utils/schema.py +100 -0
- intentkit/utils/slack_alert.py +8 -8
- intentkit/utils/tx.py +16 -8
- intentkit/uv.lock +3377 -0
- {intentkit-0.7.5.dev3.dist-info → intentkit-0.8.34.dev7.dist-info}/METADATA +13 -15
- intentkit-0.8.34.dev7.dist-info/RECORD +478 -0
- intentkit-0.8.34.dev7.dist-info/licenses/LICENSE +21 -0
- intentkit/core/node.py +0 -215
- intentkit/models/conversation.py +0 -286
- intentkit/models/generator.py +0 -347
- intentkit/skills/cdp/get_balance.py +0 -110
- intentkit/skills/cdp/swap.py +0 -121
- intentkit/skills/moralis/tests/__init__.py +0 -0
- intentkit/skills/moralis/tests/test_wallet.py +0 -511
- intentkit-0.7.5.dev3.dist-info/RECORD +0 -424
- {intentkit-0.7.5.dev3.dist-info/licenses → intentkit}/LICENSE +0 -0
- {intentkit-0.7.5.dev3.dist-info → intentkit-0.8.34.dev7.dist-info}/WHEEL +0 -0
intentkit/models/llm.py
CHANGED
|
@@ -1,18 +1,24 @@
|
|
|
1
|
+
import csv
|
|
1
2
|
import json
|
|
2
3
|
import logging
|
|
3
|
-
from datetime import
|
|
4
|
+
from datetime import UTC, datetime
|
|
4
5
|
from decimal import ROUND_HALF_UP, Decimal
|
|
5
6
|
from enum import Enum
|
|
6
|
-
from
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Annotated, Any
|
|
7
9
|
|
|
10
|
+
from langchain.chat_models.base import BaseChatModel
|
|
11
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
12
|
+
from sqlalchemy import Boolean, DateTime, Integer, Numeric, String, func, select
|
|
13
|
+
from sqlalchemy.ext.asyncio import AsyncSession
|
|
14
|
+
from sqlalchemy.orm import Mapped, mapped_column
|
|
15
|
+
|
|
16
|
+
from intentkit.config.config import config
|
|
8
17
|
from intentkit.models.app_setting import AppSetting
|
|
9
18
|
from intentkit.models.base import Base
|
|
10
19
|
from intentkit.models.db import get_session
|
|
11
20
|
from intentkit.models.redis import get_redis
|
|
12
|
-
from intentkit.utils.error import
|
|
13
|
-
from langchain_core.language_models import LanguageModelLike
|
|
14
|
-
from pydantic import BaseModel, ConfigDict, Field
|
|
15
|
-
from sqlalchemy import Boolean, Column, DateTime, Integer, Numeric, String, func, select
|
|
21
|
+
from intentkit.utils.error import IntentKitAPIError
|
|
16
22
|
|
|
17
23
|
logger = logging.getLogger(__name__)
|
|
18
24
|
|
|
@@ -20,13 +26,85 @@ _credit_per_usdc = None
|
|
|
20
26
|
FOURPLACES = Decimal("0.0001")
|
|
21
27
|
|
|
22
28
|
|
|
29
|
+
def _parse_bool(value: str | None) -> bool:
|
|
30
|
+
if value is None:
|
|
31
|
+
return False
|
|
32
|
+
return value.strip().lower() in {"true", "1", "yes"}
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _parse_optional_int(value: str | None) -> int | None:
|
|
36
|
+
if value is None:
|
|
37
|
+
return None
|
|
38
|
+
value = value.strip()
|
|
39
|
+
return int(value) if value else None
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _load_default_llm_models() -> dict[str, "LLMModelInfo"]:
|
|
43
|
+
"""Load default LLM models from a CSV file."""
|
|
44
|
+
|
|
45
|
+
path = Path(__file__).with_name("llm.csv")
|
|
46
|
+
if not path.exists():
|
|
47
|
+
logger.warning("Default LLM CSV not found at %s", path)
|
|
48
|
+
return {}
|
|
49
|
+
|
|
50
|
+
defaults: dict[str, LLMModelInfo] = {}
|
|
51
|
+
with path.open(newline="", encoding="utf-8") as csvfile:
|
|
52
|
+
reader = csv.DictReader(csvfile)
|
|
53
|
+
for row in reader:
|
|
54
|
+
try:
|
|
55
|
+
timestamp = datetime.now(UTC)
|
|
56
|
+
model = LLMModelInfo(
|
|
57
|
+
id=row["id"],
|
|
58
|
+
name=row["name"],
|
|
59
|
+
provider=LLMProvider(row["provider"]),
|
|
60
|
+
enabled=_parse_bool(row.get("enabled")),
|
|
61
|
+
input_price=Decimal(row["input_price"]),
|
|
62
|
+
output_price=Decimal(row["output_price"]),
|
|
63
|
+
price_level=_parse_optional_int(row.get("price_level")),
|
|
64
|
+
context_length=int(row["context_length"]),
|
|
65
|
+
output_length=int(row["output_length"]),
|
|
66
|
+
intelligence=int(row["intelligence"]),
|
|
67
|
+
speed=int(row["speed"]),
|
|
68
|
+
supports_image_input=_parse_bool(row.get("supports_image_input")),
|
|
69
|
+
supports_skill_calls=_parse_bool(row.get("supports_skill_calls")),
|
|
70
|
+
supports_structured_output=_parse_bool(
|
|
71
|
+
row.get("supports_structured_output")
|
|
72
|
+
),
|
|
73
|
+
has_reasoning=_parse_bool(row.get("has_reasoning")),
|
|
74
|
+
supports_search=_parse_bool(row.get("supports_search")),
|
|
75
|
+
supports_temperature=_parse_bool(row.get("supports_temperature")),
|
|
76
|
+
supports_frequency_penalty=_parse_bool(
|
|
77
|
+
row.get("supports_frequency_penalty")
|
|
78
|
+
),
|
|
79
|
+
supports_presence_penalty=_parse_bool(
|
|
80
|
+
row.get("supports_presence_penalty")
|
|
81
|
+
),
|
|
82
|
+
api_base=row.get("api_base", "").strip() or None,
|
|
83
|
+
timeout=int(row.get("timeout", "") or 180),
|
|
84
|
+
created_at=timestamp,
|
|
85
|
+
updated_at=timestamp,
|
|
86
|
+
)
|
|
87
|
+
if not model.enabled:
|
|
88
|
+
continue
|
|
89
|
+
except Exception as exc:
|
|
90
|
+
logger.error(
|
|
91
|
+
"Failed to load default LLM model %s: %s", row.get("id"), exc
|
|
92
|
+
)
|
|
93
|
+
continue
|
|
94
|
+
defaults[model.id] = model
|
|
95
|
+
|
|
96
|
+
return defaults
|
|
97
|
+
|
|
98
|
+
|
|
23
99
|
class LLMProvider(str, Enum):
|
|
24
100
|
OPENAI = "openai"
|
|
25
101
|
DEEPSEEK = "deepseek"
|
|
26
102
|
XAI = "xai"
|
|
103
|
+
GATEWAYZ = "gatewayz"
|
|
27
104
|
ETERNAL = "eternal"
|
|
28
105
|
REIGENT = "reigent"
|
|
29
106
|
VENICE = "venice"
|
|
107
|
+
OLLAMA = "ollama"
|
|
30
108
|
|
|
31
109
|
def display_name(self) -> str:
|
|
32
110
|
"""Return user-friendly display name for the provider."""
|
|
@@ -34,9 +112,11 @@ class LLMProvider(str, Enum):
|
|
|
34
112
|
self.OPENAI: "OpenAI",
|
|
35
113
|
self.DEEPSEEK: "DeepSeek",
|
|
36
114
|
self.XAI: "xAI",
|
|
37
|
-
self.
|
|
38
|
-
self.
|
|
39
|
-
self.
|
|
115
|
+
self.GATEWAYZ: "Gatewayz",
|
|
116
|
+
self.ETERNAL: "Eternal",
|
|
117
|
+
self.REIGENT: "Reigent",
|
|
118
|
+
self.VENICE: "Venice",
|
|
119
|
+
self.OLLAMA: "Ollama",
|
|
40
120
|
}
|
|
41
121
|
return display_names.get(self, self.value)
|
|
42
122
|
|
|
@@ -46,41 +126,63 @@ class LLMModelInfoTable(Base):
|
|
|
46
126
|
|
|
47
127
|
__tablename__ = "llm_models"
|
|
48
128
|
|
|
49
|
-
id =
|
|
50
|
-
name =
|
|
51
|
-
provider =
|
|
52
|
-
enabled =
|
|
53
|
-
input_price =
|
|
129
|
+
id: Mapped[str] = mapped_column(String, primary_key=True)
|
|
130
|
+
name: Mapped[str] = mapped_column(String, nullable=False)
|
|
131
|
+
provider: Mapped[str] = mapped_column(String, nullable=False) # Stored as enum
|
|
132
|
+
enabled: Mapped[bool] = mapped_column(Boolean, nullable=False, default=True)
|
|
133
|
+
input_price: Mapped[Decimal] = mapped_column(
|
|
54
134
|
Numeric(22, 4), nullable=False
|
|
55
135
|
) # Price per 1M input tokens in USD
|
|
56
|
-
output_price =
|
|
136
|
+
output_price: Mapped[Decimal] = mapped_column(
|
|
57
137
|
Numeric(22, 4), nullable=False
|
|
58
138
|
) # Price per 1M output tokens in USD
|
|
59
|
-
price_level
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
139
|
+
price_level: Mapped[int | None] = mapped_column(
|
|
140
|
+
Integer, nullable=True
|
|
141
|
+
) # Price level rating
|
|
142
|
+
context_length: Mapped[int] = mapped_column(
|
|
143
|
+
Integer, nullable=False
|
|
144
|
+
) # Context length
|
|
145
|
+
output_length: Mapped[int] = mapped_column(Integer, nullable=False) # Output length
|
|
146
|
+
intelligence: Mapped[int] = mapped_column(
|
|
147
|
+
Integer, nullable=False
|
|
148
|
+
) # Intelligence rating
|
|
149
|
+
speed: Mapped[int] = mapped_column(Integer, nullable=False) # Speed rating
|
|
150
|
+
supports_image_input: Mapped[bool] = mapped_column(
|
|
151
|
+
Boolean, nullable=False, default=False
|
|
152
|
+
)
|
|
153
|
+
supports_skill_calls: Mapped[bool] = mapped_column(
|
|
154
|
+
Boolean, nullable=False, default=False
|
|
155
|
+
)
|
|
156
|
+
supports_structured_output: Mapped[bool] = mapped_column(
|
|
157
|
+
Boolean, nullable=False, default=False
|
|
158
|
+
)
|
|
159
|
+
has_reasoning: Mapped[bool] = mapped_column(Boolean, nullable=False, default=False)
|
|
160
|
+
supports_search: Mapped[bool] = mapped_column(
|
|
161
|
+
Boolean, nullable=False, default=False
|
|
162
|
+
)
|
|
163
|
+
supports_temperature: Mapped[bool] = mapped_column(
|
|
164
|
+
Boolean, nullable=False, default=True
|
|
165
|
+
)
|
|
166
|
+
supports_frequency_penalty: Mapped[bool] = mapped_column(
|
|
167
|
+
Boolean, nullable=False, default=True
|
|
168
|
+
)
|
|
169
|
+
supports_presence_penalty: Mapped[bool] = mapped_column(
|
|
170
|
+
Boolean, nullable=False, default=True
|
|
171
|
+
)
|
|
172
|
+
api_base: Mapped[str | None] = mapped_column(String, nullable=True)
|
|
173
|
+
timeout: Mapped[int] = mapped_column(
|
|
174
|
+
Integer, nullable=False, default=180
|
|
175
|
+
) # Timeout seconds
|
|
176
|
+
created_at: Mapped[datetime] = mapped_column(
|
|
75
177
|
DateTime(timezone=True),
|
|
76
178
|
nullable=False,
|
|
77
179
|
server_default=func.now(),
|
|
78
180
|
)
|
|
79
|
-
updated_at =
|
|
181
|
+
updated_at: Mapped[datetime] = mapped_column(
|
|
80
182
|
DateTime(timezone=True),
|
|
81
183
|
nullable=False,
|
|
82
184
|
server_default=func.now(),
|
|
83
|
-
onupdate=lambda: datetime.now(
|
|
185
|
+
onupdate=lambda: datetime.now(UTC),
|
|
84
186
|
)
|
|
85
187
|
|
|
86
188
|
|
|
@@ -99,7 +201,7 @@ class LLMModelInfo(BaseModel):
|
|
|
99
201
|
enabled: bool = Field(default=True)
|
|
100
202
|
input_price: Decimal # Price per 1M input tokens in USD
|
|
101
203
|
output_price: Decimal # Price per 1M output tokens in USD
|
|
102
|
-
price_level:
|
|
204
|
+
price_level: int | None = Field(
|
|
103
205
|
default=None, ge=1, le=5
|
|
104
206
|
) # Price level rating from 1-5
|
|
105
207
|
context_length: int # Maximum context length in tokens
|
|
@@ -124,22 +226,20 @@ class LLMModelInfo(BaseModel):
|
|
|
124
226
|
supports_presence_penalty: bool = (
|
|
125
227
|
True # Whether the model supports presence_penalty parameter
|
|
126
228
|
)
|
|
127
|
-
api_base:
|
|
128
|
-
None # Custom API base URL if not using provider's default
|
|
129
|
-
)
|
|
229
|
+
api_base: str | None = None # Custom API base URL if not using provider's default
|
|
130
230
|
timeout: int = 180 # Default timeout in seconds
|
|
131
231
|
created_at: Annotated[
|
|
132
232
|
datetime,
|
|
133
233
|
Field(
|
|
134
234
|
description="Timestamp when this data was created",
|
|
135
|
-
default=datetime.now(
|
|
235
|
+
default=datetime.now(UTC),
|
|
136
236
|
),
|
|
137
237
|
]
|
|
138
238
|
updated_at: Annotated[
|
|
139
239
|
datetime,
|
|
140
240
|
Field(
|
|
141
241
|
description="Timestamp when this data was updated",
|
|
142
|
-
default=datetime.now(
|
|
242
|
+
default=datetime.now(UTC),
|
|
143
243
|
),
|
|
144
244
|
]
|
|
145
245
|
|
|
@@ -208,7 +308,31 @@ class LLMModelInfo(BaseModel):
|
|
|
208
308
|
return model_info
|
|
209
309
|
|
|
210
310
|
# Not found anywhere
|
|
211
|
-
raise
|
|
311
|
+
raise IntentKitAPIError(
|
|
312
|
+
400,
|
|
313
|
+
"ModelNotFound",
|
|
314
|
+
f"Model {model_id} not found, maybe deprecated, please change it in the agent configuration.",
|
|
315
|
+
)
|
|
316
|
+
|
|
317
|
+
@classmethod
|
|
318
|
+
async def get_all(cls, session: AsyncSession | None = None) -> list["LLMModelInfo"]:
|
|
319
|
+
"""Return all models merged from defaults and database overrides."""
|
|
320
|
+
|
|
321
|
+
if session is None:
|
|
322
|
+
async with get_session() as db:
|
|
323
|
+
return await cls.get_all(session=db)
|
|
324
|
+
|
|
325
|
+
models: dict[str, LLMModelInfo] = {
|
|
326
|
+
model_id: model.model_copy(deep=True)
|
|
327
|
+
for model_id, model in AVAILABLE_MODELS.items()
|
|
328
|
+
}
|
|
329
|
+
|
|
330
|
+
result = await session.execute(select(LLMModelInfoTable))
|
|
331
|
+
for row in result.scalars():
|
|
332
|
+
model_info = cls.model_validate(row)
|
|
333
|
+
models[model_info.id] = model_info
|
|
334
|
+
|
|
335
|
+
return list(models.values())
|
|
212
336
|
|
|
213
337
|
async def calculate_cost(self, input_tokens: int, output_tokens: int) -> Decimal:
|
|
214
338
|
global _credit_per_usdc
|
|
@@ -230,325 +354,8 @@ class LLMModelInfo(BaseModel):
|
|
|
230
354
|
return (input_cost + output_cost).quantize(FOURPLACES, rounding=ROUND_HALF_UP)
|
|
231
355
|
|
|
232
356
|
|
|
233
|
-
#
|
|
234
|
-
AVAILABLE_MODELS =
|
|
235
|
-
# OpenAI models
|
|
236
|
-
"gpt-4o": LLMModelInfo(
|
|
237
|
-
id="gpt-4o",
|
|
238
|
-
name="GPT-4o",
|
|
239
|
-
provider=LLMProvider.OPENAI,
|
|
240
|
-
input_price=Decimal("2.50"), # per 1M input tokens
|
|
241
|
-
output_price=Decimal("10.00"), # per 1M output tokens
|
|
242
|
-
context_length=128000,
|
|
243
|
-
output_length=4096,
|
|
244
|
-
intelligence=4,
|
|
245
|
-
speed=3,
|
|
246
|
-
supports_image_input=True,
|
|
247
|
-
supports_skill_calls=True,
|
|
248
|
-
supports_structured_output=True,
|
|
249
|
-
supports_search=True,
|
|
250
|
-
supports_frequency_penalty=False,
|
|
251
|
-
supports_presence_penalty=False,
|
|
252
|
-
),
|
|
253
|
-
"gpt-4o-mini": LLMModelInfo(
|
|
254
|
-
id="gpt-4o-mini",
|
|
255
|
-
name="GPT-4o Mini",
|
|
256
|
-
provider=LLMProvider.OPENAI,
|
|
257
|
-
input_price=Decimal("0.15"), # per 1M input tokens
|
|
258
|
-
output_price=Decimal("0.60"), # per 1M output tokens
|
|
259
|
-
context_length=128000,
|
|
260
|
-
output_length=4096,
|
|
261
|
-
intelligence=3,
|
|
262
|
-
speed=4,
|
|
263
|
-
supports_image_input=False,
|
|
264
|
-
supports_skill_calls=True,
|
|
265
|
-
supports_structured_output=True,
|
|
266
|
-
supports_search=True,
|
|
267
|
-
supports_frequency_penalty=False,
|
|
268
|
-
supports_presence_penalty=False,
|
|
269
|
-
),
|
|
270
|
-
"gpt-5-nano": LLMModelInfo(
|
|
271
|
-
id="gpt-5-nano",
|
|
272
|
-
name="GPT-5 Nano",
|
|
273
|
-
provider=LLMProvider.OPENAI,
|
|
274
|
-
input_price=Decimal("0.05"), # per 1M input tokens
|
|
275
|
-
output_price=Decimal("0.4"), # per 1M output tokens
|
|
276
|
-
context_length=400000,
|
|
277
|
-
output_length=128000,
|
|
278
|
-
intelligence=3,
|
|
279
|
-
speed=5,
|
|
280
|
-
supports_image_input=True,
|
|
281
|
-
supports_skill_calls=True,
|
|
282
|
-
supports_structured_output=True,
|
|
283
|
-
supports_temperature=False,
|
|
284
|
-
supports_frequency_penalty=False,
|
|
285
|
-
supports_presence_penalty=False,
|
|
286
|
-
),
|
|
287
|
-
"gpt-5-mini": LLMModelInfo(
|
|
288
|
-
id="gpt-5-mini",
|
|
289
|
-
name="GPT-5 Mini",
|
|
290
|
-
provider=LLMProvider.OPENAI,
|
|
291
|
-
input_price=Decimal("0.25"), # per 1M input tokens
|
|
292
|
-
output_price=Decimal("2"), # per 1M output tokens
|
|
293
|
-
context_length=400000,
|
|
294
|
-
output_length=128000,
|
|
295
|
-
intelligence=4,
|
|
296
|
-
speed=4,
|
|
297
|
-
supports_image_input=True,
|
|
298
|
-
supports_skill_calls=True,
|
|
299
|
-
supports_structured_output=True,
|
|
300
|
-
supports_search=True,
|
|
301
|
-
supports_temperature=False,
|
|
302
|
-
supports_frequency_penalty=False,
|
|
303
|
-
supports_presence_penalty=False,
|
|
304
|
-
),
|
|
305
|
-
"gpt-5": LLMModelInfo(
|
|
306
|
-
id="gpt-5",
|
|
307
|
-
name="GPT-5",
|
|
308
|
-
provider=LLMProvider.OPENAI,
|
|
309
|
-
input_price=Decimal("1.25"), # per 1M input tokens
|
|
310
|
-
output_price=Decimal("10.00"), # per 1M output tokens
|
|
311
|
-
context_length=400000,
|
|
312
|
-
output_length=128000,
|
|
313
|
-
intelligence=5,
|
|
314
|
-
speed=3,
|
|
315
|
-
supports_image_input=True,
|
|
316
|
-
supports_skill_calls=True,
|
|
317
|
-
supports_structured_output=True,
|
|
318
|
-
supports_search=True,
|
|
319
|
-
supports_temperature=False,
|
|
320
|
-
supports_frequency_penalty=False,
|
|
321
|
-
supports_presence_penalty=False,
|
|
322
|
-
),
|
|
323
|
-
"gpt-4.1-nano": LLMModelInfo(
|
|
324
|
-
id="gpt-4.1-nano",
|
|
325
|
-
name="GPT-4.1 Nano",
|
|
326
|
-
provider=LLMProvider.OPENAI,
|
|
327
|
-
input_price=Decimal("0.1"), # per 1M input tokens
|
|
328
|
-
output_price=Decimal("0.4"), # per 1M output tokens
|
|
329
|
-
context_length=128000,
|
|
330
|
-
output_length=4096,
|
|
331
|
-
intelligence=3,
|
|
332
|
-
speed=5,
|
|
333
|
-
supports_image_input=False,
|
|
334
|
-
supports_skill_calls=True,
|
|
335
|
-
supports_structured_output=True,
|
|
336
|
-
supports_frequency_penalty=False,
|
|
337
|
-
supports_presence_penalty=False,
|
|
338
|
-
),
|
|
339
|
-
"gpt-4.1-mini": LLMModelInfo(
|
|
340
|
-
id="gpt-4.1-mini",
|
|
341
|
-
name="GPT-4.1 Mini",
|
|
342
|
-
provider=LLMProvider.OPENAI,
|
|
343
|
-
input_price=Decimal("0.4"), # per 1M input tokens
|
|
344
|
-
output_price=Decimal("1.6"), # per 1M output tokens
|
|
345
|
-
context_length=128000,
|
|
346
|
-
output_length=4096,
|
|
347
|
-
intelligence=4,
|
|
348
|
-
speed=4,
|
|
349
|
-
supports_image_input=False,
|
|
350
|
-
supports_skill_calls=True,
|
|
351
|
-
supports_structured_output=True,
|
|
352
|
-
supports_search=True,
|
|
353
|
-
supports_frequency_penalty=False,
|
|
354
|
-
supports_presence_penalty=False,
|
|
355
|
-
),
|
|
356
|
-
"gpt-4.1": LLMModelInfo(
|
|
357
|
-
id="gpt-4.1",
|
|
358
|
-
name="GPT-4.1",
|
|
359
|
-
provider=LLMProvider.OPENAI,
|
|
360
|
-
input_price=Decimal("2.00"), # per 1M input tokens
|
|
361
|
-
output_price=Decimal("8.00"), # per 1M output tokens
|
|
362
|
-
context_length=128000,
|
|
363
|
-
output_length=4096,
|
|
364
|
-
intelligence=5,
|
|
365
|
-
speed=3,
|
|
366
|
-
supports_image_input=True,
|
|
367
|
-
supports_skill_calls=True,
|
|
368
|
-
supports_structured_output=True,
|
|
369
|
-
supports_search=True,
|
|
370
|
-
supports_frequency_penalty=False,
|
|
371
|
-
supports_presence_penalty=False,
|
|
372
|
-
),
|
|
373
|
-
"o4-mini": LLMModelInfo(
|
|
374
|
-
id="o4-mini",
|
|
375
|
-
name="OpenAI o4-mini",
|
|
376
|
-
provider=LLMProvider.OPENAI,
|
|
377
|
-
input_price=Decimal("1.10"), # per 1M input tokens
|
|
378
|
-
output_price=Decimal("4.40"), # per 1M output tokens
|
|
379
|
-
context_length=128000,
|
|
380
|
-
output_length=4096,
|
|
381
|
-
intelligence=4,
|
|
382
|
-
speed=3,
|
|
383
|
-
supports_image_input=False,
|
|
384
|
-
supports_skill_calls=True,
|
|
385
|
-
supports_structured_output=True,
|
|
386
|
-
has_reasoning=True, # Has strong reasoning capabilities
|
|
387
|
-
supports_temperature=False,
|
|
388
|
-
supports_frequency_penalty=False,
|
|
389
|
-
supports_presence_penalty=False,
|
|
390
|
-
),
|
|
391
|
-
# Deepseek models
|
|
392
|
-
"deepseek-chat": LLMModelInfo(
|
|
393
|
-
id="deepseek-chat",
|
|
394
|
-
name="Deepseek V3 (0324)",
|
|
395
|
-
provider=LLMProvider.DEEPSEEK,
|
|
396
|
-
input_price=Decimal("0.27"),
|
|
397
|
-
output_price=Decimal("1.10"),
|
|
398
|
-
context_length=60000,
|
|
399
|
-
output_length=4096,
|
|
400
|
-
intelligence=4,
|
|
401
|
-
speed=3,
|
|
402
|
-
supports_image_input=False,
|
|
403
|
-
supports_skill_calls=True,
|
|
404
|
-
supports_structured_output=True,
|
|
405
|
-
api_base="https://api.deepseek.com",
|
|
406
|
-
timeout=300,
|
|
407
|
-
),
|
|
408
|
-
"deepseek-reasoner": LLMModelInfo(
|
|
409
|
-
id="deepseek-reasoner",
|
|
410
|
-
name="Deepseek R1",
|
|
411
|
-
provider=LLMProvider.DEEPSEEK,
|
|
412
|
-
input_price=Decimal("0.55"),
|
|
413
|
-
output_price=Decimal("2.19"),
|
|
414
|
-
context_length=60000,
|
|
415
|
-
output_length=4096,
|
|
416
|
-
intelligence=4,
|
|
417
|
-
speed=2,
|
|
418
|
-
supports_image_input=False,
|
|
419
|
-
supports_skill_calls=True,
|
|
420
|
-
supports_structured_output=True,
|
|
421
|
-
has_reasoning=True, # Has strong reasoning capabilities
|
|
422
|
-
api_base="https://api.deepseek.com",
|
|
423
|
-
timeout=300,
|
|
424
|
-
),
|
|
425
|
-
# XAI models
|
|
426
|
-
"grok-2": LLMModelInfo(
|
|
427
|
-
id="grok-2",
|
|
428
|
-
name="Grok 2",
|
|
429
|
-
provider=LLMProvider.XAI,
|
|
430
|
-
input_price=Decimal("2"),
|
|
431
|
-
output_price=Decimal("10"),
|
|
432
|
-
context_length=120000,
|
|
433
|
-
output_length=4096,
|
|
434
|
-
intelligence=3,
|
|
435
|
-
speed=3,
|
|
436
|
-
supports_image_input=False,
|
|
437
|
-
supports_skill_calls=True,
|
|
438
|
-
supports_structured_output=True,
|
|
439
|
-
timeout=180,
|
|
440
|
-
),
|
|
441
|
-
"grok-3": LLMModelInfo(
|
|
442
|
-
id="grok-3",
|
|
443
|
-
name="Grok 3",
|
|
444
|
-
provider=LLMProvider.XAI,
|
|
445
|
-
input_price=Decimal("3"),
|
|
446
|
-
output_price=Decimal("15"),
|
|
447
|
-
context_length=131072,
|
|
448
|
-
output_length=4096,
|
|
449
|
-
intelligence=5,
|
|
450
|
-
speed=3,
|
|
451
|
-
supports_image_input=False,
|
|
452
|
-
supports_skill_calls=True,
|
|
453
|
-
supports_structured_output=True,
|
|
454
|
-
supports_search=True,
|
|
455
|
-
timeout=180,
|
|
456
|
-
),
|
|
457
|
-
"grok-3-mini": LLMModelInfo(
|
|
458
|
-
id="grok-3-mini",
|
|
459
|
-
name="Grok 3 Mini",
|
|
460
|
-
provider=LLMProvider.XAI,
|
|
461
|
-
input_price=Decimal("0.3"),
|
|
462
|
-
output_price=Decimal("0.5"),
|
|
463
|
-
context_length=131072,
|
|
464
|
-
output_length=4096,
|
|
465
|
-
intelligence=5,
|
|
466
|
-
speed=3,
|
|
467
|
-
supports_image_input=False,
|
|
468
|
-
supports_skill_calls=True,
|
|
469
|
-
supports_structured_output=True,
|
|
470
|
-
has_reasoning=True, # Has strong reasoning capabilities
|
|
471
|
-
supports_frequency_penalty=False,
|
|
472
|
-
supports_presence_penalty=False, # Grok-3-mini doesn't support presence_penalty
|
|
473
|
-
timeout=180,
|
|
474
|
-
),
|
|
475
|
-
# Eternal AI models
|
|
476
|
-
"eternalai": LLMModelInfo(
|
|
477
|
-
id="eternalai",
|
|
478
|
-
name="Eternal AI (Llama-3.3-70B)",
|
|
479
|
-
provider=LLMProvider.ETERNAL,
|
|
480
|
-
input_price=Decimal("0.25"),
|
|
481
|
-
output_price=Decimal("0.75"),
|
|
482
|
-
context_length=60000,
|
|
483
|
-
output_length=4096,
|
|
484
|
-
intelligence=4,
|
|
485
|
-
speed=3,
|
|
486
|
-
supports_image_input=False,
|
|
487
|
-
supports_skill_calls=True,
|
|
488
|
-
supports_structured_output=True,
|
|
489
|
-
api_base="https://api.eternalai.org/v1",
|
|
490
|
-
timeout=300,
|
|
491
|
-
),
|
|
492
|
-
# Reigent models
|
|
493
|
-
"reigent": LLMModelInfo(
|
|
494
|
-
id="reigent",
|
|
495
|
-
name="REI Network",
|
|
496
|
-
provider=LLMProvider.REIGENT,
|
|
497
|
-
input_price=Decimal("0.50"), # Placeholder price, update with actual pricing
|
|
498
|
-
output_price=Decimal("1.50"), # Placeholder price, update with actual pricing
|
|
499
|
-
context_length=32000,
|
|
500
|
-
output_length=4096,
|
|
501
|
-
intelligence=4,
|
|
502
|
-
speed=3,
|
|
503
|
-
supports_image_input=False,
|
|
504
|
-
supports_skill_calls=True,
|
|
505
|
-
supports_structured_output=True,
|
|
506
|
-
supports_temperature=False,
|
|
507
|
-
supports_frequency_penalty=False,
|
|
508
|
-
supports_presence_penalty=False,
|
|
509
|
-
api_base="https://api.reisearch.box/v1",
|
|
510
|
-
timeout=300,
|
|
511
|
-
),
|
|
512
|
-
# Venice models
|
|
513
|
-
"venice-uncensored": LLMModelInfo(
|
|
514
|
-
id="venice-uncensored",
|
|
515
|
-
name="Venice Uncensored",
|
|
516
|
-
provider=LLMProvider.VENICE,
|
|
517
|
-
input_price=Decimal("0.50"), # Placeholder price, update with actual pricing
|
|
518
|
-
output_price=Decimal("2.00"), # Placeholder price, update with actual pricing
|
|
519
|
-
context_length=32000,
|
|
520
|
-
output_length=4096,
|
|
521
|
-
intelligence=3,
|
|
522
|
-
speed=3,
|
|
523
|
-
supports_image_input=False,
|
|
524
|
-
supports_skill_calls=True,
|
|
525
|
-
supports_structured_output=True,
|
|
526
|
-
supports_temperature=True,
|
|
527
|
-
supports_frequency_penalty=False,
|
|
528
|
-
supports_presence_penalty=False,
|
|
529
|
-
api_base="https://api.venice.ai/api/v1",
|
|
530
|
-
timeout=300,
|
|
531
|
-
),
|
|
532
|
-
"venice-llama-4-maverick-17b": LLMModelInfo(
|
|
533
|
-
id="venice-llama-4-maverick-17b",
|
|
534
|
-
name="Venice Llama-4 Maverick 17B",
|
|
535
|
-
provider=LLMProvider.VENICE,
|
|
536
|
-
input_price=Decimal("1.50"),
|
|
537
|
-
output_price=Decimal("6.00"),
|
|
538
|
-
context_length=32000,
|
|
539
|
-
output_length=4096,
|
|
540
|
-
intelligence=3,
|
|
541
|
-
speed=3,
|
|
542
|
-
supports_image_input=False,
|
|
543
|
-
supports_skill_calls=True,
|
|
544
|
-
supports_structured_output=True,
|
|
545
|
-
supports_temperature=True,
|
|
546
|
-
supports_frequency_penalty=False,
|
|
547
|
-
supports_presence_penalty=False,
|
|
548
|
-
api_base="https://api.venice.ai/api/v1",
|
|
549
|
-
timeout=300,
|
|
550
|
-
),
|
|
551
|
-
}
|
|
357
|
+
# Default models loaded from CSV
|
|
358
|
+
AVAILABLE_MODELS = _load_default_llm_models()
|
|
552
359
|
|
|
553
360
|
|
|
554
361
|
class LLMModel(BaseModel):
|
|
@@ -563,14 +370,14 @@ class LLMModel(BaseModel):
|
|
|
563
370
|
async def model_info(self) -> LLMModelInfo:
|
|
564
371
|
"""Get the model information with caching.
|
|
565
372
|
|
|
566
|
-
First tries to get from cache, then database, then
|
|
373
|
+
First tries to get from cache, then database, then default models loaded from CSV.
|
|
567
374
|
Raises ValueError if model is not found anywhere.
|
|
568
375
|
"""
|
|
569
376
|
model_info = await LLMModelInfo.get(self.model_name)
|
|
570
377
|
return model_info
|
|
571
378
|
|
|
572
379
|
# This will be implemented by subclasses to return the appropriate LLM instance
|
|
573
|
-
async def create_instance(self,
|
|
380
|
+
async def create_instance(self, params: dict[str, Any] = {}) -> BaseChatModel:
|
|
574
381
|
"""Create and return the LLM instance based on the configuration."""
|
|
575
382
|
raise NotImplementedError("Subclasses must implement create_instance")
|
|
576
383
|
|
|
@@ -588,7 +395,7 @@ class LLMModel(BaseModel):
|
|
|
588
395
|
class OpenAILLM(LLMModel):
|
|
589
396
|
"""OpenAI LLM configuration."""
|
|
590
397
|
|
|
591
|
-
async def create_instance(self,
|
|
398
|
+
async def create_instance(self, params: dict[str, Any] = {}) -> BaseChatModel:
|
|
592
399
|
"""Create and return a ChatOpenAI instance."""
|
|
593
400
|
from langchain_openai import ChatOpenAI
|
|
594
401
|
|
|
@@ -598,6 +405,7 @@ class OpenAILLM(LLMModel):
|
|
|
598
405
|
"model_name": self.model_name,
|
|
599
406
|
"openai_api_key": config.openai_api_key,
|
|
600
407
|
"timeout": info.timeout,
|
|
408
|
+
"use_responses_api": True,
|
|
601
409
|
}
|
|
602
410
|
|
|
603
411
|
# Add optional parameters based on model support
|
|
@@ -613,8 +421,13 @@ class OpenAILLM(LLMModel):
|
|
|
613
421
|
if info.api_base:
|
|
614
422
|
kwargs["openai_api_base"] = info.api_base
|
|
615
423
|
|
|
616
|
-
if self.model_name.
|
|
424
|
+
if self.model_name == "gpt-5-mini" or self.model_name == "gpt-5-nano":
|
|
617
425
|
kwargs["reasoning_effort"] = "minimal"
|
|
426
|
+
elif self.model_name == "gpt-5.1-codex":
|
|
427
|
+
kwargs["reasoning_effort"] = "high"
|
|
428
|
+
|
|
429
|
+
# Update kwargs with params to allow overriding
|
|
430
|
+
kwargs.update(params)
|
|
618
431
|
|
|
619
432
|
logger.debug(f"Creating ChatOpenAI instance with kwargs: {kwargs}")
|
|
620
433
|
|
|
@@ -624,7 +437,7 @@ class OpenAILLM(LLMModel):
|
|
|
624
437
|
class DeepseekLLM(LLMModel):
|
|
625
438
|
"""Deepseek LLM configuration."""
|
|
626
439
|
|
|
627
|
-
async def create_instance(self,
|
|
440
|
+
async def create_instance(self, params: dict[str, Any] = {}) -> BaseChatModel:
|
|
628
441
|
"""Create and return a ChatDeepseek instance."""
|
|
629
442
|
|
|
630
443
|
from langchain_deepseek import ChatDeepSeek
|
|
@@ -651,13 +464,16 @@ class DeepseekLLM(LLMModel):
|
|
|
651
464
|
if info.api_base:
|
|
652
465
|
kwargs["api_base"] = info.api_base
|
|
653
466
|
|
|
467
|
+
# Update kwargs with params to allow overriding
|
|
468
|
+
kwargs.update(params)
|
|
469
|
+
|
|
654
470
|
return ChatDeepSeek(**kwargs)
|
|
655
471
|
|
|
656
472
|
|
|
657
473
|
class XAILLM(LLMModel):
|
|
658
474
|
"""XAI (Grok) LLM configuration."""
|
|
659
475
|
|
|
660
|
-
async def create_instance(self,
|
|
476
|
+
async def create_instance(self, params: dict[str, Any] = {}) -> BaseChatModel:
|
|
661
477
|
"""Create and return a ChatXAI instance."""
|
|
662
478
|
|
|
663
479
|
from langchain_xai import ChatXAI
|
|
@@ -680,16 +496,49 @@ class XAILLM(LLMModel):
|
|
|
680
496
|
if info.supports_presence_penalty:
|
|
681
497
|
kwargs["presence_penalty"] = self.presence_penalty
|
|
682
498
|
|
|
683
|
-
|
|
684
|
-
|
|
499
|
+
# Update kwargs with params to allow overriding
|
|
500
|
+
kwargs.update(params)
|
|
685
501
|
|
|
686
502
|
return ChatXAI(**kwargs)
|
|
687
503
|
|
|
688
504
|
|
|
505
|
+
class GatewayzLLM(LLMModel):
|
|
506
|
+
"""Gatewayz AI LLM configuration."""
|
|
507
|
+
|
|
508
|
+
async def create_instance(self, params: dict[str, Any] = {}) -> BaseChatModel:
|
|
509
|
+
"""Create and return a ChatOpenAI instance configured for Eternal AI."""
|
|
510
|
+
from langchain_openai import ChatOpenAI
|
|
511
|
+
|
|
512
|
+
info = await self.model_info()
|
|
513
|
+
|
|
514
|
+
kwargs = {
|
|
515
|
+
"model": self.model_name,
|
|
516
|
+
"api_key": config.gatewayz_api_key,
|
|
517
|
+
"base_url": info.api_base,
|
|
518
|
+
"timeout": info.timeout,
|
|
519
|
+
"max_completion_tokens": 999,
|
|
520
|
+
}
|
|
521
|
+
|
|
522
|
+
# Add optional parameters based on model support
|
|
523
|
+
if info.supports_temperature:
|
|
524
|
+
kwargs["temperature"] = self.temperature
|
|
525
|
+
|
|
526
|
+
if info.supports_frequency_penalty:
|
|
527
|
+
kwargs["frequency_penalty"] = self.frequency_penalty
|
|
528
|
+
|
|
529
|
+
if info.supports_presence_penalty:
|
|
530
|
+
kwargs["presence_penalty"] = self.presence_penalty
|
|
531
|
+
|
|
532
|
+
# Update kwargs with params to allow overriding
|
|
533
|
+
kwargs.update(params)
|
|
534
|
+
|
|
535
|
+
return ChatOpenAI(**kwargs)
|
|
536
|
+
|
|
537
|
+
|
|
689
538
|
class EternalLLM(LLMModel):
|
|
690
539
|
"""Eternal AI LLM configuration."""
|
|
691
540
|
|
|
692
|
-
async def create_instance(self,
|
|
541
|
+
async def create_instance(self, params: dict[str, Any] = {}) -> BaseChatModel:
|
|
693
542
|
"""Create and return a ChatOpenAI instance configured for Eternal AI."""
|
|
694
543
|
from langchain_openai import ChatOpenAI
|
|
695
544
|
|
|
@@ -715,13 +564,16 @@ class EternalLLM(LLMModel):
|
|
|
715
564
|
if info.supports_presence_penalty:
|
|
716
565
|
kwargs["presence_penalty"] = self.presence_penalty
|
|
717
566
|
|
|
567
|
+
# Update kwargs with params to allow overriding
|
|
568
|
+
kwargs.update(params)
|
|
569
|
+
|
|
718
570
|
return ChatOpenAI(**kwargs)
|
|
719
571
|
|
|
720
572
|
|
|
721
573
|
class ReigentLLM(LLMModel):
|
|
722
574
|
"""Reigent LLM configuration."""
|
|
723
575
|
|
|
724
|
-
async def create_instance(self,
|
|
576
|
+
async def create_instance(self, params: dict[str, Any] = {}) -> BaseChatModel:
|
|
725
577
|
"""Create and return a ChatOpenAI instance configured for Reigent."""
|
|
726
578
|
from langchain_openai import ChatOpenAI
|
|
727
579
|
|
|
@@ -737,13 +589,16 @@ class ReigentLLM(LLMModel):
|
|
|
737
589
|
},
|
|
738
590
|
}
|
|
739
591
|
|
|
592
|
+
# Update kwargs with params to allow overriding
|
|
593
|
+
kwargs.update(params)
|
|
594
|
+
|
|
740
595
|
return ChatOpenAI(**kwargs)
|
|
741
596
|
|
|
742
597
|
|
|
743
598
|
class VeniceLLM(LLMModel):
|
|
744
599
|
"""Venice LLM configuration."""
|
|
745
600
|
|
|
746
|
-
async def create_instance(self,
|
|
601
|
+
async def create_instance(self, params: dict[str, Any] = {}) -> BaseChatModel:
|
|
747
602
|
"""Create and return a ChatOpenAI instance configured for Venice."""
|
|
748
603
|
from langchain_openai import ChatOpenAI
|
|
749
604
|
|
|
@@ -755,9 +610,42 @@ class VeniceLLM(LLMModel):
|
|
|
755
610
|
"timeout": info.timeout,
|
|
756
611
|
}
|
|
757
612
|
|
|
613
|
+
# Update kwargs with params to allow overriding
|
|
614
|
+
kwargs.update(params)
|
|
615
|
+
|
|
758
616
|
return ChatOpenAI(**kwargs)
|
|
759
617
|
|
|
760
618
|
|
|
619
|
+
# Factory function to create the appropriate LLM model based on the model name
|
|
620
|
+
class OllamaLLM(LLMModel):
|
|
621
|
+
"""Ollama LLM configuration."""
|
|
622
|
+
|
|
623
|
+
async def create_instance(self, params: dict[str, Any] = {}) -> BaseChatModel:
|
|
624
|
+
"""Create and return a ChatOllama instance."""
|
|
625
|
+
from langchain_ollama import ChatOllama
|
|
626
|
+
|
|
627
|
+
info = await self.model_info()
|
|
628
|
+
|
|
629
|
+
kwargs = {
|
|
630
|
+
"model": self.model_name,
|
|
631
|
+
"base_url": info.api_base or "http://localhost:11434",
|
|
632
|
+
"temperature": self.temperature,
|
|
633
|
+
# Ollama specific parameters
|
|
634
|
+
"keep_alive": -1, # Keep the model loaded indefinitely
|
|
635
|
+
}
|
|
636
|
+
|
|
637
|
+
if info.supports_frequency_penalty:
|
|
638
|
+
kwargs["frequency_penalty"] = self.frequency_penalty
|
|
639
|
+
|
|
640
|
+
if info.supports_presence_penalty:
|
|
641
|
+
kwargs["presence_penalty"] = self.presence_penalty
|
|
642
|
+
|
|
643
|
+
# Update kwargs with params to allow overriding
|
|
644
|
+
kwargs.update(params)
|
|
645
|
+
|
|
646
|
+
return ChatOllama(**kwargs)
|
|
647
|
+
|
|
648
|
+
|
|
761
649
|
# Factory function to create the appropriate LLM model based on the model name
|
|
762
650
|
async def create_llm_model(
|
|
763
651
|
model_name: str,
|
|
@@ -799,6 +687,10 @@ async def create_llm_model(
|
|
|
799
687
|
return ReigentLLM(**base_params)
|
|
800
688
|
elif provider == LLMProvider.VENICE:
|
|
801
689
|
return VeniceLLM(**base_params)
|
|
690
|
+
elif provider == LLMProvider.GATEWAYZ:
|
|
691
|
+
return GatewayzLLM(**base_params)
|
|
692
|
+
elif provider == LLMProvider.OLLAMA:
|
|
693
|
+
return OllamaLLM(**base_params)
|
|
802
694
|
else:
|
|
803
695
|
# Default to OpenAI
|
|
804
696
|
return OpenAILLM(**base_params)
|