intentkit 0.6.21.dev2__py3-none-any.whl → 0.6.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of intentkit might be problematic. Click here for more details.

@@ -0,0 +1,213 @@
1
+ import logging
2
+ from typing import Any, List, Type
3
+
4
+ from pydantic import BaseModel, Field, ValidationError, field_validator
5
+
6
+ from intentkit.skills.dexscreener.base import DexScreenerBaseTool
7
+ from intentkit.skills.dexscreener.model.search_token_response import (
8
+ SearchTokenResponseModel,
9
+ )
10
+ from intentkit.skills.dexscreener.utils import (
11
+ API_ENDPOINTS,
12
+ MAX_TOKENS_BATCH,
13
+ RATE_LIMITS,
14
+ create_error_response,
15
+ create_no_results_response,
16
+ format_success_response,
17
+ get_liquidity_value,
18
+ group_pairs_by_token,
19
+ handle_validation_error,
20
+ truncate_large_fields,
21
+ )
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ class GetTokensInfoInput(BaseModel):
27
+ """Input schema for the DexScreener get_tokens_info tool."""
28
+
29
+ chain_id: str = Field(
30
+ description="The blockchain chain ID (e.g., 'ethereum', 'solana', 'bsc', 'polygon', 'arbitrum', 'base', 'avalanche')"
31
+ )
32
+ token_addresses: List[str] = Field(
33
+ description=f"List of token contract addresses to retrieve info for (maximum {MAX_TOKENS_BATCH} addresses). "
34
+ "Each address should be in the format '0x1234...abcd' for Ethereum-based chains."
35
+ )
36
+
37
+ @field_validator("token_addresses")
38
+ @classmethod
39
+ def validate_token_addresses(cls, v: List[str]) -> List[str]:
40
+ if not v:
41
+ raise ValueError("At least one token address is required")
42
+ if len(v) > MAX_TOKENS_BATCH:
43
+ raise ValueError(f"Maximum {MAX_TOKENS_BATCH} token addresses allowed")
44
+ # Remove duplicates while preserving order
45
+ seen = set()
46
+ unique_addresses = []
47
+ for addr in v:
48
+ if addr not in seen:
49
+ seen.add(addr)
50
+ unique_addresses.append(addr)
51
+ return unique_addresses
52
+
53
+
54
+ class GetTokensInfo(DexScreenerBaseTool):
55
+ """
56
+ Tool to get detailed information for multiple tokens at once on DexScreener.
57
+ """
58
+
59
+ name: str = "dexscreener_get_tokens_info"
60
+ description: str = (
61
+ f"Retrieves detailed trading pair information for multiple tokens (up to {MAX_TOKENS_BATCH}) "
62
+ "using chain ID and a list of token addresses. For each token, returns all available "
63
+ "trading pairs with price, volume, liquidity, market data, and DEX information. "
64
+ "This is more efficient than making individual calls when you need info for multiple tokens. "
65
+ "Use this tool for portfolio analysis or comparing multiple tokens at once."
66
+ )
67
+ args_schema: Type[BaseModel] = GetTokensInfoInput
68
+
69
+ async def _arun(
70
+ self,
71
+ chain_id: str,
72
+ token_addresses: List[str],
73
+ **kwargs: Any,
74
+ ) -> str:
75
+ """Implementation to get information for multiple tokens."""
76
+
77
+ # Apply rate limiting
78
+ await self.user_rate_limit_by_category(
79
+ user_id=f"{self.category}{self.name}",
80
+ limit=RATE_LIMITS["tokens"],
81
+ minutes=1,
82
+ )
83
+
84
+ logger.info(
85
+ f"Executing DexScreener get_tokens_info tool with chain_id: '{chain_id}', "
86
+ f"token_addresses: {len(token_addresses)} tokens"
87
+ )
88
+
89
+ try:
90
+ # Construct API path - addresses are comma-separated
91
+ addresses_param = ",".join(token_addresses)
92
+ api_path = f"{API_ENDPOINTS['tokens']}/{chain_id}/{addresses_param}"
93
+
94
+ data, error_details = await self._get(path=api_path)
95
+
96
+ if error_details:
97
+ return await self._handle_error_response(error_details)
98
+
99
+ if not data:
100
+ logger.error(f"No data returned for tokens on {chain_id}")
101
+ return create_error_response(
102
+ error_type="empty_success",
103
+ message="API call returned empty success response.",
104
+ additional_data={
105
+ "chain_id": chain_id,
106
+ "token_addresses": token_addresses,
107
+ },
108
+ )
109
+
110
+ try:
111
+ # Validate response using SearchTokenResponseModel since API returns similar structure
112
+ result = SearchTokenResponseModel.model_validate(data)
113
+ except ValidationError as e:
114
+ return handle_validation_error(
115
+ e, f"{chain_id}/{len(token_addresses)} tokens", len(str(data))
116
+ )
117
+
118
+ if not result.pairs:
119
+ return create_no_results_response(
120
+ f"{chain_id} - {len(token_addresses)} tokens",
121
+ reason="no trading pairs found for any of the specified tokens",
122
+ additional_data={
123
+ "chain_id": chain_id,
124
+ "requested_addresses": token_addresses,
125
+ "tokens_data": {},
126
+ "all_pairs": [],
127
+ "found_tokens": 0,
128
+ "total_pairs": 0,
129
+ },
130
+ )
131
+
132
+ pairs_list = [p for p in result.pairs if p is not None]
133
+
134
+ if not pairs_list:
135
+ return create_no_results_response(
136
+ f"{chain_id} - {len(token_addresses)} tokens",
137
+ reason="all pairs were null or invalid",
138
+ additional_data={
139
+ "chain_id": chain_id,
140
+ "requested_addresses": token_addresses,
141
+ "tokens_data": {},
142
+ "all_pairs": [],
143
+ "found_tokens": 0,
144
+ "total_pairs": 0,
145
+ },
146
+ )
147
+
148
+ # Group pairs by token address for better organization
149
+ tokens_data = group_pairs_by_token(pairs_list)
150
+
151
+ # Sort pairs within each token by liquidity (highest first)
152
+ for token_addr, pairs in tokens_data.items():
153
+ try:
154
+ pairs.sort(key=get_liquidity_value, reverse=True)
155
+ except Exception as sort_err:
156
+ logger.warning(
157
+ f"Failed to sort pairs for token {token_addr}: {sort_err}"
158
+ )
159
+
160
+ logger.info(
161
+ f"Found {len(pairs_list)} total pairs across {len(tokens_data)} tokens "
162
+ f"for {len(token_addresses)} requested addresses on {chain_id}"
163
+ )
164
+
165
+ return format_success_response(
166
+ {
167
+ "tokens_data": {
168
+ addr: [p.model_dump() for p in pairs]
169
+ for addr, pairs in tokens_data.items()
170
+ },
171
+ "all_pairs": [p.model_dump() for p in pairs_list],
172
+ "chain_id": chain_id,
173
+ "requested_addresses": token_addresses,
174
+ "found_tokens": len(tokens_data),
175
+ "total_pairs": len(pairs_list),
176
+ }
177
+ )
178
+
179
+ except Exception as e:
180
+ return await self._handle_unexpected_runtime_error(
181
+ e, f"{chain_id}/{len(token_addresses)} tokens"
182
+ )
183
+
184
+ async def _handle_error_response(self, error_details: dict) -> str:
185
+ """Formats error details (from _get) into a JSON string."""
186
+ if error_details.get("error_type") in [
187
+ "connection_error",
188
+ "parsing_error",
189
+ "unexpected_error",
190
+ ]:
191
+ logger.error(
192
+ f"DexScreener get_tokens_info tool encountered an error: {error_details}"
193
+ )
194
+ else: # api_error
195
+ logger.warning(f"DexScreener API returned an error: {error_details}")
196
+
197
+ # Truncate potentially large fields before returning to user/LLM
198
+ truncated_details = truncate_large_fields(error_details)
199
+ return format_success_response(truncated_details)
200
+
201
+ async def _handle_unexpected_runtime_error(
202
+ self, e: Exception, query_info: str
203
+ ) -> str:
204
+ """Formats unexpected runtime exception details into a JSON string."""
205
+ logger.exception(
206
+ f"An unexpected runtime error occurred in get_tokens_info tool _arun method for {query_info}: {e}"
207
+ )
208
+ return create_error_response(
209
+ error_type="runtime_error",
210
+ message="An unexpected internal error occurred processing the tokens info request",
211
+ details=str(e),
212
+ additional_data={"query_info": query_info},
213
+ )
@@ -34,7 +34,52 @@
34
34
  "Agent Owner + All Users",
35
35
  "Agent Owner Only"
36
36
  ],
37
- "description": "Searches on DexScreener for token pairs matching a query (symbol, name, address). Returns up to 50 pairs sorted by 'liquidity' or 'volume24h' (required input), including price, volume, etc. Use this tool to find token information based on user queries.",
37
+ "description": "Searches on DexScreener for token pairs matching a query (symbol, name, address). Returns up to 25 pairs sorted by 'liquidity' or 'volume' with timeframe options, including price, volume, etc. Use this tool to find token information based on user queries.",
38
+ "default": "disabled"
39
+ },
40
+ "get_pair_info": {
41
+ "type": "string",
42
+ "enum": [
43
+ "disabled",
44
+ "public",
45
+ "private"
46
+ ],
47
+ "x-enum-title": [
48
+ "Disabled",
49
+ "Agent Owner + All Users",
50
+ "Agent Owner Only"
51
+ ],
52
+ "description": "Retrieves detailed information about a specific trading pair using chain ID and pair address. Returns comprehensive data including current price, volume, liquidity, price changes, market cap, FDV, transaction counts, and social links.",
53
+ "default": "disabled"
54
+ },
55
+ "get_token_pairs": {
56
+ "type": "string",
57
+ "enum": [
58
+ "disabled",
59
+ "public",
60
+ "private"
61
+ ],
62
+ "x-enum-title": [
63
+ "Disabled",
64
+ "Agent Owner + All Users",
65
+ "Agent Owner Only"
66
+ ],
67
+ "description": "Finds all trading pairs for a specific token using chain ID and token address. Returns a list of all pools/pairs where this token is traded, including pair addresses, DEX information, liquidity, volume, and pricing data for each pair.",
68
+ "default": "disabled"
69
+ },
70
+ "get_tokens_info": {
71
+ "type": "string",
72
+ "enum": [
73
+ "disabled",
74
+ "public",
75
+ "private"
76
+ ],
77
+ "x-enum-title": [
78
+ "Disabled",
79
+ "Agent Owner + All Users",
80
+ "Agent Owner Only"
81
+ ],
82
+ "description": "Retrieves detailed trading pair information for multiple tokens (up to 30) using chain ID and a list of token addresses. More efficient than making individual calls when you need info for multiple tokens. Use for portfolio analysis or comparing multiple tokens at once.",
38
83
  "default": "disabled"
39
84
  }
40
85
  }
@@ -1,48 +1,32 @@
1
- import json
2
1
  import logging
3
- from enum import Enum
4
- from typing import (
5
- Any,
6
- Callable,
7
- Literal,
8
- Optional,
9
- Type,
10
- )
2
+ from typing import Any, Optional, Type
11
3
 
12
4
  from pydantic import BaseModel, Field, ValidationError
13
5
 
14
6
  from intentkit.skills.dexscreener.base import DexScreenerBaseTool
15
7
  from intentkit.skills.dexscreener.model.search_token_response import (
16
- PairModel,
17
8
  SearchTokenResponseModel,
18
9
  )
10
+ from intentkit.skills.dexscreener.utils import (
11
+ API_ENDPOINTS,
12
+ MAX_SEARCH_RESULTS,
13
+ SEARCH_DISCLAIMER,
14
+ QueryType,
15
+ SortBy,
16
+ VolumeTimeframe,
17
+ create_error_response,
18
+ create_no_results_response,
19
+ determine_query_type,
20
+ filter_address_pairs,
21
+ filter_ticker_pairs,
22
+ format_success_response,
23
+ handle_validation_error,
24
+ sort_pairs_by_criteria,
25
+ truncate_large_fields,
26
+ )
19
27
 
20
28
  logger = logging.getLogger(__name__)
21
29
 
22
- MAX_RESULTS_LIMIT = 25 # limit to 25 pair entries
23
- SEARCH_TOKEN_API_PATH = "/latest/dex/search"
24
-
25
- # Define the allowed sort options, including multiple volume types
26
- SortByOption = Literal["liquidity", "volume"]
27
- VolumeTimeframeOption = Literal["24_hour", "6_hour", "1_hour", "5_minutes"]
28
-
29
-
30
- class QueryType(str, Enum):
31
- TEXT = "TEXT"
32
- TICKER = "TICKER"
33
- ADDRESS = "ADDRESS"
34
-
35
-
36
- # this will bring aloside with pairs information
37
- DISCLAIMER_TEXT = {
38
- "disclaimer": (
39
- "Search results may include unofficial, duplicate, or potentially malicious tokens. "
40
- "If multiple unrelated tokens share a similar name or ticker, ask the user for the exact token address. "
41
- "If the correct token is not found, re-run the tool using the provided address. "
42
- "Also advise the user to verify the token's legitimacy via its official social links included in the result."
43
- )
44
- }
45
-
46
30
 
47
31
  class SearchTokenInput(BaseModel):
48
32
  """Input schema for the DexScreener search_token tool."""
@@ -50,13 +34,13 @@ class SearchTokenInput(BaseModel):
50
34
  query: str = Field(
51
35
  description="The search query string (e.g., token symbol 'WIF', pair address, token address '0x...', token name 'Dogwifhat', or ticker '$WIF'). Prefixing with '$' filters results to match the base token symbol exactly (case-insensitive)."
52
36
  )
53
- sort_by: Optional[SortByOption] = Field(
54
- default="liquidity",
37
+ sort_by: Optional[SortBy] = Field(
38
+ default=SortBy.LIQUIDITY,
55
39
  description="Sort preference for the results. Options: 'liquidity' (default) or 'volume'",
56
40
  )
57
- volume_timeframe: Optional[VolumeTimeframeOption] = Field(
58
- default="24_hour",
59
- description=f"define which timeframe should we use if the 'sort_by' is `volume` avalable options are {VolumeTimeframeOption}",
41
+ volume_timeframe: Optional[VolumeTimeframe] = Field(
42
+ default=VolumeTimeframe.TWENTY_FOUR_HOUR,
43
+ description="Define which timeframe should we use if the 'sort_by' is 'volume'. Available options: '5_minutes', '1_hour', '6_hour', '24_hour'",
60
44
  )
61
45
 
62
46
 
@@ -72,7 +56,7 @@ class SearchToken(DexScreenerBaseTool):
72
56
  f"If the query starts with '$', it filters results to only include pairs where the base token symbol exactly matches the ticker (case-insensitive). "
73
57
  f"Returns a list of matching pairs with details like price, volume, liquidity, etc., "
74
58
  f"sorted by the specified criteria (via 'sort_by': 'liquidity', 'volume'; defaults to 'liquidity'), "
75
- f"limited to the top {MAX_RESULTS_LIMIT}. "
59
+ f"limited to the top {MAX_SEARCH_RESULTS}. "
76
60
  f"Use this tool to find token information based on user queries."
77
61
  )
78
62
  args_schema: Type[BaseModel] = SearchTokenInput
@@ -80,8 +64,8 @@ class SearchToken(DexScreenerBaseTool):
80
64
  async def _arun(
81
65
  self,
82
66
  query: str,
83
- sort_by: Optional[SortByOption] = "liquidity",
84
- volume_timeframe: Optional[VolumeTimeframeOption] = "24_hour",
67
+ sort_by: Optional[SortBy] = SortBy.LIQUIDITY,
68
+ volume_timeframe: Optional[VolumeTimeframe] = VolumeTimeframe.TWENTY_FOUR_HOUR,
85
69
  **kwargs: Any,
86
70
  ) -> str:
87
71
  """Implementation to search token, with filtering based on query type."""
@@ -95,11 +79,11 @@ class SearchToken(DexScreenerBaseTool):
95
79
  minutes=1,
96
80
  )
97
81
 
98
- sort_by = sort_by or "liquidity"
99
- volume_timeframe = volume_timeframe or "24_hour"
82
+ sort_by = sort_by or SortBy.LIQUIDITY
83
+ volume_timeframe = volume_timeframe or VolumeTimeframe.TWENTY_FOUR_HOUR
100
84
 
101
85
  # Determine query type
102
- query_type = self.get_query_type(query)
86
+ query_type = determine_query_type(query)
103
87
 
104
88
  # Process query based on type
105
89
  if query_type == QueryType.TICKER:
@@ -115,60 +99,28 @@ class SearchToken(DexScreenerBaseTool):
115
99
  f"sort_by: {sort_by}"
116
100
  )
117
101
 
118
- ### --- sort functions ---
119
- def get_liquidity_usd(pair: PairModel) -> float:
120
- return (
121
- pair.liquidity.usd
122
- if pair.liquidity and pair.liquidity.usd is not None
123
- else 0.0
124
- )
125
-
126
- def get_volume(pair: PairModel) -> float:
127
- if not pair.volume:
128
- return 0.0
129
- return {
130
- "24_hour": pair.volume.h24,
131
- "6_hour": pair.volume.h6,
132
- "1_hour": pair.volume.h1,
133
- "5_minutes": pair.volume.m5,
134
- }.get(volume_timeframe, 0.0) or 0.0
135
-
136
- def get_sort_key_func() -> Callable[[PairModel], float]:
137
- if sort_by == "liquidity":
138
- return get_liquidity_usd
139
- if sort_by == "volume":
140
- return get_volume
141
- logger.warning(
142
- f"Invalid sort_by value '{sort_by}', defaulting to liquidity."
143
- )
144
- return get_liquidity_usd
145
-
146
- ### --- END sort functions ---
147
-
148
102
  try:
149
103
  data, error_details = await self._get(
150
- path=SEARCH_TOKEN_API_PATH, params={"q": search_query}
104
+ path=API_ENDPOINTS["search"], params={"q": search_query}
151
105
  )
152
106
 
153
107
  if error_details:
154
108
  return await self._handle_error_response(error_details)
155
109
  if not data:
156
110
  logger.error(f"No data or error details returned for query '{query}'")
157
- return json.dumps(
158
- {
159
- "error": "API call returned empty success response.",
160
- "error_type": "empty_success",
161
- },
162
- indent=2,
111
+ return create_error_response(
112
+ error_type="empty_success",
113
+ message="API call returned empty success response.",
114
+ additional_data={"query": query},
163
115
  )
164
116
 
165
117
  try:
166
118
  result = SearchTokenResponseModel.model_validate(data)
167
119
  except ValidationError as e:
168
- return await self._handle_validation_error(e, query, data)
120
+ return handle_validation_error(e, query, len(str(data)))
169
121
 
170
122
  if not result.pairs:
171
- return await self._no_pairs_found_response(
123
+ return create_no_results_response(
172
124
  query, reason="returned null or empty for pairs"
173
125
  )
174
126
 
@@ -176,83 +128,34 @@ class SearchToken(DexScreenerBaseTool):
176
128
 
177
129
  # Apply filtering based on query type
178
130
  if query_type == QueryType.TICKER and target_ticker:
179
- pairs_list = [
180
- p
181
- for p in pairs_list
182
- if p.baseToken
183
- and p.baseToken.symbol
184
- and p.baseToken.symbol.upper() == target_ticker
185
- ]
131
+ pairs_list = filter_ticker_pairs(pairs_list, target_ticker)
186
132
  if not pairs_list:
187
- return await self._no_pairs_found_response(
133
+ return create_no_results_response(
188
134
  query, reason=f"no match for ticker '${target_ticker}'"
189
135
  )
190
136
  elif query_type == QueryType.ADDRESS:
191
- # Filter by address (checking pairAddress, baseToken.address, quoteToken.address)
192
- pairs_list = [
193
- p
194
- for p in pairs_list
195
- if (p.pairAddress and p.pairAddress.lower() == search_query.lower())
196
- or (
197
- p.baseToken
198
- and p.baseToken.address
199
- and p.baseToken.address.lower() == search_query.lower()
200
- )
201
- or (
202
- p.quoteToken
203
- and p.quoteToken.address
204
- and p.quoteToken.address.lower() == search_query.lower()
205
- )
206
- ]
137
+ pairs_list = filter_address_pairs(pairs_list, search_query)
207
138
  if not pairs_list:
208
- return await self._no_pairs_found_response(
139
+ return create_no_results_response(
209
140
  query, reason=f"no match for address '{search_query}'"
210
141
  )
211
142
 
212
- try:
213
- sort_func = get_sort_key_func()
214
- pairs_list.sort(key=sort_func, reverse=True)
215
- except Exception as sort_err:
216
- logger.error(f"Sorting failed: {sort_err}", exc_info=True)
217
- return json.dumps(
218
- {
219
- "error": "Failed to sort results.",
220
- "error_type": "sorting_error",
221
- "details": str(sort_err),
222
- "unsorted_results": [
223
- p.model_dump() for p in pairs_list[:MAX_RESULTS_LIMIT]
224
- ],
225
- **DISCLAIMER_TEXT,
226
- },
227
- indent=2,
228
- )
143
+ # Sort pairs by specified criteria
144
+ pairs_list = sort_pairs_by_criteria(pairs_list, sort_by, volume_timeframe)
229
145
 
230
- final_count = min(len(pairs_list), MAX_RESULTS_LIMIT)
146
+ # If sorting failed, pairs_list will be returned unchanged by the utility function
147
+
148
+ final_count = min(len(pairs_list), MAX_SEARCH_RESULTS)
231
149
  logger.info(f"Returning {final_count} pairs for query '{query}'")
232
- return json.dumps(
150
+ return format_success_response(
233
151
  {
234
- **DISCLAIMER_TEXT,
235
- "pairs": [p.model_dump() for p in pairs_list[:MAX_RESULTS_LIMIT]],
236
- },
237
- indent=2,
152
+ **SEARCH_DISCLAIMER,
153
+ "pairs": [p.model_dump() for p in pairs_list[:MAX_SEARCH_RESULTS]],
154
+ }
238
155
  )
239
156
  except Exception as e:
240
157
  return await self._handle_unexpected_runtime_error(e, query)
241
158
 
242
- def get_query_type(self, query: str) -> QueryType:
243
- """
244
- Determine whether the query is a TEXT, TICKER, or ADDRESS.
245
-
246
- TICKER: starts with '$'
247
- ADDRESS: starts with '0x'.
248
- TEXT: anything else.
249
- """
250
- if query.startswith("0x"):
251
- return QueryType.ADDRESS
252
- if query.startswith("$"):
253
- return QueryType.TICKER
254
- return QueryType.TEXT
255
-
256
159
  async def _handle_error_response(self, error_details: dict) -> str:
257
160
  """Formats error details (from _get) into a JSON string."""
258
161
  if error_details.get("error_type") in [
@@ -265,57 +168,17 @@ class SearchToken(DexScreenerBaseTool):
265
168
  logger.warning(f"DexScreener API returned an error: {error_details}")
266
169
 
267
170
  # Truncate potentially large fields before returning to user/LLM
268
- for key in ["details", "response_body"]:
269
- if (
270
- isinstance(error_details.get(key), str)
271
- and len(error_details[key]) > 500
272
- ):
273
- error_details[key] = error_details[key][:500] + "... (truncated)"
274
-
275
- return json.dumps(error_details, indent=2)
276
-
277
- async def _handle_validation_error(
278
- self, e: ValidationError, query: str, data: Any
279
- ) -> str:
280
- """Formats validation error details into a JSON string."""
281
- logger.error(
282
- f"Failed to validate DexScreener response structure for query '{query}'. Error: {e}. Raw data length: {len(str(data))}",
283
- exc_info=True,
284
- )
285
- # Avoid sending potentially huge raw data back
286
- return json.dumps(
287
- {
288
- "error": "Failed to parse successful DexScreener API response",
289
- "error_type": "validation_error",
290
- "details": e.errors(),
291
- },
292
- indent=2,
293
- )
171
+ truncated_details = truncate_large_fields(error_details)
172
+ return format_success_response(truncated_details)
294
173
 
295
174
  async def _handle_unexpected_runtime_error(self, e: Exception, query: str) -> str:
296
175
  """Formats unexpected runtime exception details into a JSON string."""
297
176
  logger.exception(
298
177
  f"An unexpected runtime error occurred in search_token tool _arun method for query '{query}': {e}"
299
178
  )
300
- return json.dumps(
301
- {
302
- "error": "An unexpected internal error occurred processing the search request",
303
- "error_type": "runtime_error",
304
- "details": str(e),
305
- },
306
- indent=2,
307
- )
308
-
309
- async def _no_pairs_found_response(
310
- self, query: str, reason: str = "returned no matching pairs"
311
- ) -> str:
312
- """Generates the standard 'no pairs found' JSON response."""
313
- logger.info(f"DexScreener search for query '{query}': {reason}.")
314
- return json.dumps(
315
- {
316
- "message": f"No matching pairs found for the query '{query}'. Reason: {reason}.",
317
- "query": query,
318
- "pairs": [],
319
- },
320
- indent=2,
179
+ return create_error_response(
180
+ error_type="runtime_error",
181
+ message="An unexpected internal error occurred processing the search request",
182
+ details=str(e),
183
+ additional_data={"query": query},
321
184
  )