unrealon 1.0.5__py3-none-any.whl → 1.0.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- unrealon/sdk_config.py +1 -1
- {unrealon-1.0.5.dist-info → unrealon-1.0.7.dist-info}/METADATA +4 -4
- {unrealon-1.0.5.dist-info → unrealon-1.0.7.dist-info}/RECORD +10 -10
- unrealon_driver/src/config/auto_config.py +4 -4
- unrealon_driver/src/execution/daemon_mode.py +10 -10
- unrealon_llm/src/modules/html_processor/base_processor.py +134 -13
- unrealon_llm/src/modules/html_processor/details_processor.py +58 -34
- unrealon_llm/src/modules/html_processor/listing_processor.py +64 -40
- {unrealon-1.0.5.dist-info → unrealon-1.0.7.dist-info}/LICENSE +0 -0
- {unrealon-1.0.5.dist-info → unrealon-1.0.7.dist-info}/WHEEL +0 -0
unrealon/sdk_config.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: unrealon
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.7
|
|
4
4
|
Summary: AI-powered web scraping platform with real-time orchestration
|
|
5
5
|
License: MIT
|
|
6
6
|
Author: Unrealon Team
|
|
@@ -186,7 +186,7 @@ UNREALON_BROWSER_PROFILE_DIR=system/browser_profiles
|
|
|
186
186
|
# API Keys
|
|
187
187
|
UNREALON_OPENROUTER_API_KEY=sk-or-v1-your-openrouter-key
|
|
188
188
|
UNREALON_SERVER_URL=wss://api.unrealon.com
|
|
189
|
-
|
|
189
|
+
UNREALON_API_KEY=up_dev_your-api-key
|
|
190
190
|
|
|
191
191
|
# Runtime Limits
|
|
192
192
|
UNREALON_LLM_DAILY_LIMIT=1.0
|
|
@@ -247,7 +247,7 @@ class ParserSettings(BaseSettings):
|
|
|
247
247
|
# API Keys
|
|
248
248
|
OPENROUTER_API_KEY: str
|
|
249
249
|
SERVER_URL: str
|
|
250
|
-
|
|
250
|
+
API_KEY: str
|
|
251
251
|
|
|
252
252
|
# Runtime Limits
|
|
253
253
|
LLM_DAILY_LIMIT: float = Field(default=1.0)
|
|
@@ -317,7 +317,7 @@ class CustomAutoConfig(AutoConfig):
|
|
|
317
317
|
"""Override daemon config with custom settings."""
|
|
318
318
|
return DaemonModeConfig(
|
|
319
319
|
server_url=parser_settings.SERVER_URL,
|
|
320
|
-
api_key=parser_settings.
|
|
320
|
+
api_key=parser_settings.API_KEY,
|
|
321
321
|
auto_reconnect=True,
|
|
322
322
|
connection_timeout=30,
|
|
323
323
|
heartbeat_interval=30,
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
unrealon/__init__.py,sha256=IWhHl4jLgYR5HjEtHR1_-BF3tUkXJpP67IqqekdsVxk,494
|
|
2
|
-
unrealon/sdk_config.py,sha256=
|
|
2
|
+
unrealon/sdk_config.py,sha256=V9QDoWS74obOJv4PotCHbXSWuI78NeIujzM8Lx_j-pc,2118
|
|
3
3
|
unrealon_browser/README.md,sha256=9pP6RrfMGHtdT5uDLFAUB1e4nNGzZudXViEo1940gKw,396
|
|
4
4
|
unrealon_browser/__init__.py,sha256=pLHhYYhYJmzrED1Chi3uQAETVFxqQrikwEAwO2TTZ70,631
|
|
5
5
|
unrealon_browser/pyproject.toml,sha256=_PTGU6Pwh7antWDqZeA6KU-Vx3Xw4jwTlU_Wgt6t0Cg,4945
|
|
@@ -32,7 +32,7 @@ unrealon_driver/src/cli/__init__.py,sha256=6AE6FJoXxhr5bMGn9PVuavryEsvcjMiGFbQdn
|
|
|
32
32
|
unrealon_driver/src/cli/main.py,sha256=wX0TxXyGG_X1La97zrELOujCI5i7mHnFeSP7d-r9FP8,1725
|
|
33
33
|
unrealon_driver/src/cli/simple.py,sha256=rx8t8FSviddFYNdI18DJB6G3FLKndO_X56z69Iy2Z3Q,20087
|
|
34
34
|
unrealon_driver/src/config/__init__.py,sha256=w-oB-HFHxscdxg9Ayw1t9jkRESDX9pTV5ggDrgt_Mrc,204
|
|
35
|
-
unrealon_driver/src/config/auto_config.py,sha256=
|
|
35
|
+
unrealon_driver/src/config/auto_config.py,sha256=e_7WPfFO8tl3JE-HPSzTfNVGJVEGKl6wuXO3z7qLeUk,16999
|
|
36
36
|
unrealon_driver/src/core/__init__.py,sha256=dMsabllFmGA9PojjS9Y5DzrTzJgKA1BxvayAhl8qi3M,320
|
|
37
37
|
unrealon_driver/src/core/exceptions.py,sha256=k5lqYqD100Q-d7Jyy1-dEm6gGnv-AxHFhpCaZywfxvo,8225
|
|
38
38
|
unrealon_driver/src/core/parser.py,sha256=Wg4qSeCRsZSSLti-vCiYksnGXXR8ez0ZFXJc0PDiYnM,20950
|
|
@@ -43,7 +43,7 @@ unrealon_driver/src/dto/events.py,sha256=w69HZOKiKvopNpfMEhijCSZJWMY19f44wkPMnWH
|
|
|
43
43
|
unrealon_driver/src/dto/execution.py,sha256=WMgKKfYiWHLwQ0sict16_QXbIBJGqLzWDma1F7CHYzQ,10583
|
|
44
44
|
unrealon_driver/src/dto/services.py,sha256=BNqk2SQCRRnrIDsZJ1MbIniwZR6l3-kzqqRJgoAf3o8,10315
|
|
45
45
|
unrealon_driver/src/execution/__init__.py,sha256=3lkrR6HTZL4SMXToyklQPXF4MelmRKVdoWWlnvPp72w,580
|
|
46
|
-
unrealon_driver/src/execution/daemon_mode.py,sha256=
|
|
46
|
+
unrealon_driver/src/execution/daemon_mode.py,sha256=tgC4e-qAQj0R1_NCWwYtEE66yc_aFliVHSG1zGHeJJU,11022
|
|
47
47
|
unrealon_driver/src/execution/interactive_mode.py,sha256=UbHpF6bMIQPrA-VBP3QIZ1KE6wAygwkVMzHoplevf6I,3245
|
|
48
48
|
unrealon_driver/src/execution/modes.py,sha256=CuEJdcCkZSDpp_o668x9IiqOFZpvCgJ15j6bI75HvrA,1404
|
|
49
49
|
unrealon_driver/src/execution/scheduled_mode.py,sha256=r-f2cUHNMxiBibTeta4c0g92eRhIZmSSvKrKnVDnRGI,7323
|
|
@@ -91,9 +91,9 @@ unrealon_llm/src/managers/cost_manager.py,sha256=Bu4LUWcKB9JSwIz2m5FxAe5iEN3dVRX
|
|
|
91
91
|
unrealon_llm/src/managers/request_manager.py,sha256=oMsn2x1P6AF_6C84kmJrl_SCTHpWzgBkR50M9-bIyd0,10702
|
|
92
92
|
unrealon_llm/src/modules/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
93
93
|
unrealon_llm/src/modules/html_processor/__init__.py,sha256=-Z3ExABUbUn0l8GEZhbW3WYFJGv7uheGpftiyOji0yg,552
|
|
94
|
-
unrealon_llm/src/modules/html_processor/base_processor.py,sha256=
|
|
95
|
-
unrealon_llm/src/modules/html_processor/details_processor.py,sha256=
|
|
96
|
-
unrealon_llm/src/modules/html_processor/listing_processor.py,sha256=
|
|
94
|
+
unrealon_llm/src/modules/html_processor/base_processor.py,sha256=azggKCsE52cUnRAFi-7bak7LZPrYrrH5tJFWxI-HoBo,16423
|
|
95
|
+
unrealon_llm/src/modules/html_processor/details_processor.py,sha256=eiayCPIB9p4PSDtMUEgdPhtb8yu-LqN-WkFd86BuhE4,3479
|
|
96
|
+
unrealon_llm/src/modules/html_processor/listing_processor.py,sha256=h8D79XnVsWkEqcbDoVhcsbS1LYV2AngVo1ROy5oRD7o,3669
|
|
97
97
|
unrealon_llm/src/modules/html_processor/models/__init__.py,sha256=Hn-ztJWI4SokVxhx-cakm5h_Xhjg0rn9_IXyToIjwRE,420
|
|
98
98
|
unrealon_llm/src/modules/html_processor/models/processing_models.py,sha256=SoOE8KCwivU3FGLK3fxx0rKtKXyI-C7ibEHyWCApZAQ,1460
|
|
99
99
|
unrealon_llm/src/modules/html_processor/models/universal_model.py,sha256=Zi3L_t4rcwHpbJKADWSdK7yBErVIGBf3ZRCsZxUsMDM,1738
|
|
@@ -240,7 +240,7 @@ unrealon_sdk/src/internal/http_client.py,sha256=uU3BdNYj4ZL16y0BpBxOtWLOo-pE-8LW
|
|
|
240
240
|
unrealon_sdk/src/internal/websocket_client.py,sha256=1TteTv_6dUMXS5xTXwld6XB2Q0hcOsycLv9l_KEB8aA,15700
|
|
241
241
|
unrealon_sdk/src/provider.py,sha256=kyKjUjuo6s8hcTld8gIc7aO4SM7ozhsUlIM0EXOyblw,14104
|
|
242
242
|
unrealon_sdk/src/utils.py,sha256=nj8a83a7p_RXA985yRdHQxPr2S4rwKiwp1wD3qj7EEU,5440
|
|
243
|
-
unrealon-1.0.
|
|
244
|
-
unrealon-1.0.
|
|
245
|
-
unrealon-1.0.
|
|
246
|
-
unrealon-1.0.
|
|
243
|
+
unrealon-1.0.7.dist-info/LICENSE,sha256=eEH8mWZW49YMpl4Sh5MtKqkZ8aVTzKQXiNPEnvL14ns,1070
|
|
244
|
+
unrealon-1.0.7.dist-info/METADATA,sha256=0Ge4C8jAuQrfnYPdBpo8BA60yLUhDGfm8b7gIzG87Cc,29061
|
|
245
|
+
unrealon-1.0.7.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
|
246
|
+
unrealon-1.0.7.dist-info/RECORD,,
|
|
@@ -204,8 +204,8 @@ class AutoConfigBase(BaseModel):
|
|
|
204
204
|
def _create_websocket_config(self) -> WebSocketConfig:
|
|
205
205
|
"""Create WebSocket configuration."""
|
|
206
206
|
return WebSocketConfig(
|
|
207
|
-
server_url=os.getenv("
|
|
208
|
-
api_key=os.getenv("
|
|
207
|
+
server_url=os.getenv("SERVER_URL"),
|
|
208
|
+
api_key=os.getenv("API_KEY"),
|
|
209
209
|
parser_name=self.parser_id,
|
|
210
210
|
auto_reconnect=True,
|
|
211
211
|
max_reconnect_attempts=10 if self.environment == "production" else 5,
|
|
@@ -265,8 +265,8 @@ class AutoConfigBase(BaseModel):
|
|
|
265
265
|
def _create_daemon_config(self) -> DaemonModeConfig:
|
|
266
266
|
"""Create daemon mode configuration."""
|
|
267
267
|
return DaemonModeConfig(
|
|
268
|
-
server_url=os.getenv("
|
|
269
|
-
api_key=os.getenv("
|
|
268
|
+
server_url=os.getenv("SERVER_URL"),
|
|
269
|
+
api_key=os.getenv("API_KEY"),
|
|
270
270
|
auto_reconnect=True,
|
|
271
271
|
connection_timeout=30,
|
|
272
272
|
heartbeat_interval=30,
|
|
@@ -48,12 +48,12 @@ class DaemonMode:
|
|
|
48
48
|
self.logger.info(f"🔌 Starting daemon mode for: {self.parser.parser_name}")
|
|
49
49
|
|
|
50
50
|
# Get connection parameters
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
api_key or self.config.api_key or kwargs.get("
|
|
51
|
+
server_url = server or self.config.server_url or kwargs.get("server_url")
|
|
52
|
+
api_key = (
|
|
53
|
+
api_key or self.config.api_key or kwargs.get("api_key")
|
|
54
54
|
)
|
|
55
55
|
|
|
56
|
-
if not
|
|
56
|
+
if not server_url:
|
|
57
57
|
if self.logger:
|
|
58
58
|
self.logger.error(
|
|
59
59
|
"❌ WebSocket server URL not configured for daemon mode!"
|
|
@@ -61,7 +61,7 @@ class DaemonMode:
|
|
|
61
61
|
self.logger.info(" Set server URL in config or pass as parameter")
|
|
62
62
|
return
|
|
63
63
|
|
|
64
|
-
if not
|
|
64
|
+
if not api_key:
|
|
65
65
|
if self.logger:
|
|
66
66
|
self.logger.error(
|
|
67
67
|
"❌ WebSocket API key not configured for daemon mode!"
|
|
@@ -71,16 +71,16 @@ class DaemonMode:
|
|
|
71
71
|
|
|
72
72
|
if self.logger:
|
|
73
73
|
self.logger.info(f"🔌 Starting WebSocket daemon mode")
|
|
74
|
-
self.logger.info(f" Server: {
|
|
74
|
+
self.logger.info(f" Server: {server_url}")
|
|
75
75
|
self.logger.info(f" Parser: {self.parser.parser_id}")
|
|
76
76
|
self.logger.info(
|
|
77
|
-
f" API Key: {'***' +
|
|
77
|
+
f" API Key: {'***' + api_key[-4:] if len(api_key) > 4 else '***'}"
|
|
78
78
|
)
|
|
79
79
|
|
|
80
80
|
# Setup WebSocket service configuration with type safety
|
|
81
81
|
websocket_config = WebSocketConfig(
|
|
82
|
-
server_url=
|
|
83
|
-
api_key=
|
|
82
|
+
server_url=server_url,
|
|
83
|
+
api_key=api_key,
|
|
84
84
|
parser_name=self.parser.parser_name,
|
|
85
85
|
auto_reconnect=self.config.auto_reconnect,
|
|
86
86
|
health_check_interval=self.config.health_check_interval,
|
|
@@ -110,7 +110,7 @@ class DaemonMode:
|
|
|
110
110
|
|
|
111
111
|
# Connect to WebSocket server
|
|
112
112
|
success = await self.parser.websocket.connect(
|
|
113
|
-
server_url=
|
|
113
|
+
server_url=server_url, api_key=api_key
|
|
114
114
|
)
|
|
115
115
|
|
|
116
116
|
if not success:
|
|
@@ -10,6 +10,7 @@ import json
|
|
|
10
10
|
import random
|
|
11
11
|
from typing import Type
|
|
12
12
|
import traceback
|
|
13
|
+
import re
|
|
13
14
|
|
|
14
15
|
from unrealon_llm.src.core import SmartLLMClient
|
|
15
16
|
from unrealon_llm.src.dto import ChatMessage, MessageRole
|
|
@@ -77,6 +78,10 @@ class BaseHTMLProcessor(ABC):
|
|
|
77
78
|
"""Return extraction prompt template for this processor type"""
|
|
78
79
|
pass
|
|
79
80
|
|
|
81
|
+
def _trim_system_prompt(self, system_prompt: str) -> str:
|
|
82
|
+
"""Trim system prompt to remove empty lines"""
|
|
83
|
+
return "\n".join(system_prompt.split("\n")[1:])
|
|
84
|
+
|
|
80
85
|
async def extract_patterns(self, html_content: str) -> ExtractionResult:
|
|
81
86
|
"""
|
|
82
87
|
Extract patterns from HTML using LLM intelligence
|
|
@@ -116,15 +121,27 @@ class BaseHTMLProcessor(ABC):
|
|
|
116
121
|
prompt_tokens=0,
|
|
117
122
|
details={
|
|
118
123
|
"full_prompt": prompt[:2000] + "..." if len(prompt) > 2000 else prompt,
|
|
119
|
-
"schema_json": json.dumps(
|
|
120
|
-
|
|
124
|
+
"schema_json": json.dumps(
|
|
125
|
+
self.schema_class.model_json_schema(), indent=2
|
|
126
|
+
),
|
|
127
|
+
},
|
|
121
128
|
)
|
|
122
129
|
|
|
130
|
+
# Add critical format requirements to the prompt
|
|
131
|
+
SYSTEM_PROMPT = f"""
|
|
132
|
+
You are an HTML-to-JSON expert at analyzing {self.processor_type} pages.
|
|
133
|
+
You MUST return JSON that EXACTLY matches the Pydantic schema provided.
|
|
134
|
+
RESPOND ONLY WITH VALID JSON.
|
|
135
|
+
NO EXPLANATIONS, NO TEXT, ONLY JSON!
|
|
136
|
+
Include ALL required fields from the schema!
|
|
137
|
+
CRITICAL: The 'selectors' field must be a DICTIONARY/OBJECT, not a list!
|
|
138
|
+
"""
|
|
139
|
+
|
|
123
140
|
# Prepare LLM messages
|
|
124
141
|
messages = [
|
|
125
142
|
ChatMessage(
|
|
126
143
|
role=MessageRole.SYSTEM,
|
|
127
|
-
content=
|
|
144
|
+
content=self._trim_system_prompt(SYSTEM_PROMPT),
|
|
128
145
|
),
|
|
129
146
|
ChatMessage(
|
|
130
147
|
role=MessageRole.USER,
|
|
@@ -144,8 +161,7 @@ class BaseHTMLProcessor(ABC):
|
|
|
144
161
|
try:
|
|
145
162
|
# Call LLM
|
|
146
163
|
response = await self.llm_client.chat_completion(
|
|
147
|
-
messages,
|
|
148
|
-
response_model=self.schema_class
|
|
164
|
+
messages, response_model=self.schema_class
|
|
149
165
|
)
|
|
150
166
|
|
|
151
167
|
# Log full LLM response for debugging
|
|
@@ -167,7 +183,7 @@ class BaseHTMLProcessor(ABC):
|
|
|
167
183
|
)
|
|
168
184
|
|
|
169
185
|
# Use the validated model from LLM response
|
|
170
|
-
if hasattr(response,
|
|
186
|
+
if hasattr(response, "extracted_model") and response.extracted_model:
|
|
171
187
|
validated_model = response.extracted_model
|
|
172
188
|
validated_result = validated_model.model_dump()
|
|
173
189
|
logger.log_html_analysis_completed(
|
|
@@ -203,8 +219,36 @@ class BaseHTMLProcessor(ABC):
|
|
|
203
219
|
"raw_llm_response": result_data,
|
|
204
220
|
},
|
|
205
221
|
)
|
|
206
|
-
|
|
207
|
-
|
|
222
|
+
|
|
223
|
+
# 🔥 SMART FALLBACK: Try to fix common LLM format issues
|
|
224
|
+
try:
|
|
225
|
+
fixed_data = self._fix_llm_response_format(result_data, str(e))
|
|
226
|
+
validated_model = self.schema_class(**fixed_data)
|
|
227
|
+
validated_result = validated_model.model_dump()
|
|
228
|
+
logger.log_html_analysis_completed(
|
|
229
|
+
selectors_generated=len(str(fixed_data)),
|
|
230
|
+
confidence_score=fixed_data.get("confidence", 0.0),
|
|
231
|
+
details={
|
|
232
|
+
"processor_type": self.processor_type,
|
|
233
|
+
"validation_success": True,
|
|
234
|
+
"schema_matched": True,
|
|
235
|
+
"format_fixed": True,
|
|
236
|
+
},
|
|
237
|
+
)
|
|
238
|
+
except Exception as fix_error:
|
|
239
|
+
logger.log_html_analysis_failed(
|
|
240
|
+
error_message=f"Format fixing also failed: {str(fix_error)}",
|
|
241
|
+
details={
|
|
242
|
+
"processor_type": self.processor_type,
|
|
243
|
+
"validation_error": str(e),
|
|
244
|
+
"fix_error": str(fix_error),
|
|
245
|
+
"raw_llm_response": result_data,
|
|
246
|
+
},
|
|
247
|
+
)
|
|
248
|
+
# Final fallback: create minimal valid structure
|
|
249
|
+
validated_result = self._create_fallback_result(
|
|
250
|
+
result_data, str(e)
|
|
251
|
+
)
|
|
208
252
|
|
|
209
253
|
# Create Pydantic processing metadata
|
|
210
254
|
processing_info = ProcessingInfo(
|
|
@@ -253,12 +297,20 @@ class BaseHTMLProcessor(ABC):
|
|
|
253
297
|
|
|
254
298
|
# Add random number to bypass any caching
|
|
255
299
|
cache_buster = random.randint(100000, 999999)
|
|
256
|
-
|
|
257
|
-
schema_prompt = f"""PYDANTIC 2 SCHEMA (Request #{cache_buster}):
|
|
258
|
-
{schema_json}
|
|
259
300
|
|
|
260
|
-
|
|
261
|
-
|
|
301
|
+
schema_prompt = f"""
|
|
302
|
+
PYDANTIC 2 SCHEMA (Request #{cache_buster}):
|
|
303
|
+
{schema_json}
|
|
304
|
+
|
|
305
|
+
🚨 CRITICAL FORMAT REQUIREMENTS:
|
|
306
|
+
1. Return JSON that EXACTLY matches this schema structure!
|
|
307
|
+
2. The response must include ALL required fields: detected_item_type, extraction_strategy, confidence, selectors, documentation
|
|
308
|
+
3. The "selectors" field MUST be a DICTIONARY/OBJECT with field names as keys and arrays of CSS selectors as values
|
|
309
|
+
4. Example: "selectors": {{"title": ["h1.title", ".product-name"], "price": [".price", ".cost"]}}
|
|
310
|
+
5. DO NOT return "selectors" as a list: ❌ ["h1.title", ".price"]
|
|
311
|
+
6. DO return "selectors" as a dictionary: ✅ {{"title": ["h1.title"], "price": [".price"]}}
|
|
312
|
+
"""
|
|
313
|
+
schema_prompt = self._trim_system_prompt(schema_prompt)
|
|
262
314
|
|
|
263
315
|
return prompt_template.format(
|
|
264
316
|
processor_type=self.processor_type,
|
|
@@ -292,3 +344,72 @@ The response must include ALL required fields: detected_item_type, extraction_st
|
|
|
292
344
|
estimated_cost = (total_tokens / 1_000_000) * 0.25
|
|
293
345
|
|
|
294
346
|
return estimated_cost
|
|
347
|
+
|
|
348
|
+
def _fix_llm_response_format(self, result_data: dict, error_message: str) -> dict:
|
|
349
|
+
"""Fix common LLM response format issues."""
|
|
350
|
+
fixed_data = result_data.copy()
|
|
351
|
+
|
|
352
|
+
# Fix selectors if it's a list instead of dict
|
|
353
|
+
if "selectors" in fixed_data and isinstance(fixed_data["selectors"], list):
|
|
354
|
+
logger.log_html_analysis_failed(
|
|
355
|
+
error_message="Fixing selectors format: list -> dict",
|
|
356
|
+
details={
|
|
357
|
+
"processor_type": self.processor_type,
|
|
358
|
+
"original_selectors": fixed_data["selectors"],
|
|
359
|
+
},
|
|
360
|
+
)
|
|
361
|
+
|
|
362
|
+
# Convert list to dict with generic field names
|
|
363
|
+
selectors_list = fixed_data["selectors"]
|
|
364
|
+
fixed_data["selectors"] = {}
|
|
365
|
+
|
|
366
|
+
# Try to intelligently map list items to field names
|
|
367
|
+
field_names = ["item", "title", "price", "description", "image", "link"]
|
|
368
|
+
for i, selector in enumerate(selectors_list):
|
|
369
|
+
if i < len(field_names):
|
|
370
|
+
field_name = field_names[i]
|
|
371
|
+
else:
|
|
372
|
+
field_name = f"field_{i+1}"
|
|
373
|
+
|
|
374
|
+
# Convert single selector to list
|
|
375
|
+
if isinstance(selector, str):
|
|
376
|
+
fixed_data["selectors"][field_name] = [selector]
|
|
377
|
+
elif isinstance(selector, list):
|
|
378
|
+
fixed_data["selectors"][field_name] = selector
|
|
379
|
+
else:
|
|
380
|
+
fixed_data["selectors"][field_name] = [str(selector)]
|
|
381
|
+
|
|
382
|
+
# Ensure all required fields exist
|
|
383
|
+
required_fields = [
|
|
384
|
+
"detected_item_type",
|
|
385
|
+
"extraction_strategy",
|
|
386
|
+
"confidence",
|
|
387
|
+
"selectors",
|
|
388
|
+
"documentation",
|
|
389
|
+
]
|
|
390
|
+
for field in required_fields:
|
|
391
|
+
if field not in fixed_data:
|
|
392
|
+
if field == "detected_item_type":
|
|
393
|
+
fixed_data[field] = "unknown"
|
|
394
|
+
elif field == "extraction_strategy":
|
|
395
|
+
fixed_data[field] = "fallback_strategy"
|
|
396
|
+
elif field == "confidence":
|
|
397
|
+
fixed_data[field] = 0.1
|
|
398
|
+
elif field == "selectors":
|
|
399
|
+
fixed_data[field] = {}
|
|
400
|
+
elif field == "documentation":
|
|
401
|
+
fixed_data[field] = (
|
|
402
|
+
"Extraction completed with fallback processing due to format issues."
|
|
403
|
+
)
|
|
404
|
+
|
|
405
|
+
return fixed_data
|
|
406
|
+
|
|
407
|
+
def _create_fallback_result(self, result_data: dict, error_message: str) -> dict:
|
|
408
|
+
"""Create a minimal valid result when all else fails."""
|
|
409
|
+
return {
|
|
410
|
+
"detected_item_type": "unknown",
|
|
411
|
+
"extraction_strategy": "fallback_strategy",
|
|
412
|
+
"confidence": 0.1,
|
|
413
|
+
"selectors": {},
|
|
414
|
+
"documentation": f"Extraction failed due to validation error: {error_message}. Raw data: {str(result_data)[:500]}...",
|
|
415
|
+
}
|
|
@@ -25,37 +25,61 @@ class DetailsProcessor(BaseHTMLProcessor):
|
|
|
25
25
|
def get_extraction_prompt_template(self) -> str:
|
|
26
26
|
"""Return details-specific extraction prompt template"""
|
|
27
27
|
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
28
|
+
prompt = """{schema}
|
|
29
|
+
[__TASK_DESCRIPTION__]
|
|
30
|
+
Analyze this DETAILS/PRODUCT/ITEM page and generate universal extraction patterns.
|
|
31
|
+
PROCESSOR TYPE: {processor_type}
|
|
32
|
+
THIS IS A DETAILS PAGE containing information about a single item/product/service/article.
|
|
33
|
+
[/__TASK_DESCRIPTION__]
|
|
34
|
+
|
|
35
|
+
[__CRITICAL_FORMAT_REQUIREMENTS__]
|
|
36
|
+
🚨 SELECTORS FORMAT: The "selectors" field MUST be a DICTIONARY/OBJECT, NOT a list!
|
|
37
|
+
Example of CORRECT format:
|
|
38
|
+
"selectors": {{
|
|
39
|
+
"title": ["h1.product-title", "h1.page-title", ".item-name"],
|
|
40
|
+
"price": [".price", ".cost", "span[data-price]", ".product-price"],
|
|
41
|
+
"description": [".description", ".product-desc", ".item-details"],
|
|
42
|
+
"images": ["img.product-image", ".gallery img", "img[src*='product']"],
|
|
43
|
+
"specifications": [".specs", ".product-specs", ".item-specifications"],
|
|
44
|
+
"reviews": [".reviews", ".product-reviews", ".customer-reviews"]
|
|
45
|
+
}}
|
|
46
|
+
|
|
47
|
+
❌ WRONG format (DO NOT USE):
|
|
48
|
+
"selectors": ["h1.title", ".price", ".description"]
|
|
49
|
+
|
|
50
|
+
✅ CORRECT format (USE THIS):
|
|
51
|
+
"selectors": {{
|
|
52
|
+
"title": ["h1.title", ".product-name", "h1[itemprop='name']"],
|
|
53
|
+
"price": [".price", ".cost", "span[data-price]"],
|
|
54
|
+
"description": [".description", ".product-desc", ".item-details"]
|
|
55
|
+
}}
|
|
56
|
+
[/__CRITICAL_FORMAT_REQUIREMENTS__]
|
|
57
|
+
|
|
58
|
+
[__INSTRUCTIONS__]
|
|
59
|
+
YOUR TASK:
|
|
60
|
+
Analyze this details page and generate extraction patterns for ANY type of item.
|
|
61
|
+
This could be: product details, service info, article content, job description, real estate listing, person profile, etc.
|
|
62
|
+
|
|
63
|
+
CRITICAL REQUIREMENTS:
|
|
64
|
+
1. The "selectors" field MUST be a DICTIONARY with field names as keys and arrays of CSS selectors as values
|
|
65
|
+
2. Include comprehensive markdown documentation
|
|
66
|
+
3. Provide real examples from the actual HTML
|
|
67
|
+
4. Explain the page structure and best extraction approach
|
|
68
|
+
5. Include confidence scores and fallback strategies
|
|
69
|
+
6. Document any special handling needed
|
|
70
|
+
|
|
71
|
+
ANALYZE THE HTML AND DETERMINE:
|
|
72
|
+
- What type of item this page describes
|
|
73
|
+
- What information is available (specs, pricing, reviews, etc.)
|
|
74
|
+
- How content is structured and organized
|
|
75
|
+
- What actions are possible (buy, contact, etc.)
|
|
76
|
+
- Best extraction strategy for this specific page
|
|
77
|
+
[/__INSTRUCTIONS__]
|
|
78
|
+
|
|
79
|
+
[__HTML_CONTENT__]
|
|
80
|
+
HTML CONTENT (first 50KB):
|
|
81
|
+
{html_content}
|
|
82
|
+
[/__HTML_CONTENT__]
|
|
83
|
+
"""
|
|
84
|
+
|
|
85
|
+
return self._trim_system_prompt(prompt)
|
|
@@ -25,43 +25,67 @@ class ListingProcessor(BaseHTMLProcessor):
|
|
|
25
25
|
def get_extraction_prompt_template(self) -> str:
|
|
26
26
|
"""Return listing-specific extraction prompt template"""
|
|
27
27
|
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
[__TASK_DESCRIPTION__]
|
|
31
|
-
Analyze this LISTING/CATALOG page and generate universal extraction patterns.
|
|
32
|
-
PROCESSOR TYPE: {processor_type}
|
|
33
|
-
THIS IS A LISTING PAGE containing multiple items arranged in a list or grid.
|
|
34
|
-
[/__TASK_DESCRIPTION__]
|
|
35
|
-
|
|
36
|
-
[
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
28
|
+
prompt = """{schema}
|
|
29
|
+
|
|
30
|
+
[__TASK_DESCRIPTION__]
|
|
31
|
+
Analyze this LISTING/CATALOG page and generate universal extraction patterns.
|
|
32
|
+
PROCESSOR TYPE: {processor_type}
|
|
33
|
+
THIS IS A LISTING PAGE containing multiple items arranged in a list or grid.
|
|
34
|
+
[/__TASK_DESCRIPTION__]
|
|
35
|
+
|
|
36
|
+
[__CRITICAL_FORMAT_REQUIREMENTS__]
|
|
37
|
+
🚨 SELECTORS FORMAT: The "selectors" field MUST be a DICTIONARY/OBJECT, NOT a list!
|
|
38
|
+
Example of CORRECT format:
|
|
39
|
+
"selectors": {{
|
|
40
|
+
"items_container": ["div.product-grid", "ul.product-list", "div.items"],
|
|
41
|
+
"item_title": ["h3.product-title", "a.product-link", ".item-name"],
|
|
42
|
+
"item_price": [".price", ".cost", "span[data-price]"],
|
|
43
|
+
"item_image": ["img.product-image", ".item-img", "img[src*='product']"],
|
|
44
|
+
"pagination": [".pagination", ".page-nav", "nav[aria-label='pagination']"]
|
|
45
|
+
}}
|
|
46
|
+
|
|
47
|
+
❌ WRONG format (DO NOT USE):
|
|
48
|
+
"selectors": ["div.product", "h3.title", ".price"]
|
|
49
|
+
|
|
50
|
+
✅ CORRECT format (USE THIS):
|
|
51
|
+
"selectors": {{
|
|
52
|
+
"items": ["div.product", "li.item", ".product-card"],
|
|
53
|
+
"titles": ["h3.title", ".product-name", "a[title]"],
|
|
54
|
+
"prices": [".price", ".cost", "span[data-price]"]
|
|
55
|
+
}}
|
|
56
|
+
[/__CRITICAL_FORMAT_REQUIREMENTS__]
|
|
57
|
+
|
|
58
|
+
[__INSTRUCTIONS__]
|
|
59
|
+
YOUR TASK:
|
|
60
|
+
Analyze this listing page and generate extraction patterns for ANY type of items.
|
|
61
|
+
This could be: products, services, articles, jobs, real estate, people, cars, etc.
|
|
62
|
+
|
|
63
|
+
CRITICAL REQUIREMENTS:
|
|
64
|
+
1. The "selectors" field MUST be a DICTIONARY with field names as keys and arrays of CSS selectors as values
|
|
65
|
+
2. This is a LISTING PAGE with multiple items
|
|
66
|
+
3. Focus on identifying item containers and individual item patterns
|
|
67
|
+
4. Detect ANY type of items - not just products!
|
|
68
|
+
5. Provide multiple fallback selectors for reliability
|
|
69
|
+
6. Include pagination and navigation patterns
|
|
70
|
+
7. Use realistic confidence scores (0.1-1.0)
|
|
71
|
+
8. Auto-detect what type of content this listing contains
|
|
72
|
+
9. Provide extraction strategy advice
|
|
73
|
+
10. Look for structured data (JSON-LD, microdata)
|
|
74
|
+
11. Generate patterns that work with BeautifulSoup4 .select() method
|
|
75
|
+
12. RETURN JSON that EXACTLY matches the Pydantic schema above!
|
|
76
|
+
|
|
77
|
+
ANALYZE THE HTML AND DETERMINE:
|
|
78
|
+
- What type of items are listed (products, services, articles, etc.)
|
|
79
|
+
- How items are structured and contained
|
|
80
|
+
- What navigation elements exist
|
|
81
|
+
- What metadata is available
|
|
82
|
+
- Best extraction strategy for this specific page
|
|
83
|
+
[/__INSTRUCTIONS__]
|
|
84
|
+
|
|
85
|
+
[__HTML_CONTENT__]
|
|
86
|
+
HTML CONTENT (first 50KB):
|
|
87
|
+
{html_content}
|
|
88
|
+
[/__HTML_CONTENT__]
|
|
89
|
+
"""
|
|
90
|
+
|
|
91
|
+
return self._trim_system_prompt(prompt)
|
|
File without changes
|
|
File without changes
|