rust-crate-pipeline 1.4.0__py3-none-any.whl → 1.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rust_crate_pipeline/__init__.py +18 -27
- rust_crate_pipeline/__main__.py +1 -0
- rust_crate_pipeline/ai_processing.py +718 -596
- rust_crate_pipeline/analysis.py +330 -363
- rust_crate_pipeline/azure_ai_processing.py +462 -0
- rust_crate_pipeline/config.py +46 -28
- rust_crate_pipeline/core/__init__.py +19 -0
- rust_crate_pipeline/core/canon_registry.py +133 -0
- rust_crate_pipeline/core/irl_engine.py +256 -0
- rust_crate_pipeline/core/sacred_chain.py +117 -0
- rust_crate_pipeline/crate_analysis.py +54 -0
- rust_crate_pipeline/crate_list.txt +424 -0
- rust_crate_pipeline/github_token_checker.py +108 -112
- rust_crate_pipeline/main.py +329 -109
- rust_crate_pipeline/network.py +317 -308
- rust_crate_pipeline/pipeline.py +300 -375
- rust_crate_pipeline/production_config.py +24 -27
- rust_crate_pipeline/progress_monitor.py +334 -0
- rust_crate_pipeline/scraping/__init__.py +13 -0
- rust_crate_pipeline/scraping/unified_scraper.py +259 -0
- rust_crate_pipeline/unified_llm_processor.py +637 -0
- rust_crate_pipeline/unified_pipeline.py +548 -0
- rust_crate_pipeline/utils/file_utils.py +32 -5
- rust_crate_pipeline/utils/logging_utils.py +21 -16
- rust_crate_pipeline/version.py +76 -47
- rust_crate_pipeline-1.4.1.dist-info/METADATA +515 -0
- rust_crate_pipeline-1.4.1.dist-info/RECORD +31 -0
- rust_crate_pipeline-1.4.0.dist-info/METADATA +0 -585
- rust_crate_pipeline-1.4.0.dist-info/RECORD +0 -19
- {rust_crate_pipeline-1.4.0.dist-info → rust_crate_pipeline-1.4.1.dist-info}/WHEEL +0 -0
- {rust_crate_pipeline-1.4.0.dist-info → rust_crate_pipeline-1.4.1.dist-info}/entry_points.txt +0 -0
- {rust_crate_pipeline-1.4.0.dist-info → rust_crate_pipeline-1.4.1.dist-info}/licenses/LICENSE +0 -0
- {rust_crate_pipeline-1.4.0.dist-info → rust_crate_pipeline-1.4.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,637 @@
|
|
1
|
+
# unified_llm_processor.py
|
2
|
+
import re
|
3
|
+
import time
|
4
|
+
import logging
|
5
|
+
import json
|
6
|
+
from typing import TypedDict, Union, Optional, Dict, Any, List, TYPE_CHECKING
|
7
|
+
from collections.abc import Callable
|
8
|
+
from dataclasses import dataclass
|
9
|
+
|
10
|
+
if TYPE_CHECKING:
|
11
|
+
from typing import Tuple
|
12
|
+
|
13
|
+
try:
|
14
|
+
import litellm
|
15
|
+
from litellm import completion
|
16
|
+
LITELLM_AVAILABLE = True
|
17
|
+
except ImportError:
|
18
|
+
LITELLM_AVAILABLE = False
|
19
|
+
logging.warning("LiteLLM not available. Install with: pip install litellm")
|
20
|
+
|
21
|
+
from .config import PipelineConfig, CrateMetadata, EnrichedCrate
|
22
|
+
|
23
|
+
|
24
|
+
@dataclass
|
25
|
+
class LLMConfig:
|
26
|
+
"""Configuration for LLM providers"""
|
27
|
+
provider: str # "azure", "ollama", "lmstudio", "openai", "anthropic", etc.
|
28
|
+
model: str # Model name/identifier
|
29
|
+
api_base: Optional[str] = None # Base URL for API
|
30
|
+
api_key: Optional[str] = None # API key if required
|
31
|
+
temperature: float = 0.2
|
32
|
+
max_tokens: int = 256
|
33
|
+
timeout: int = 30
|
34
|
+
max_retries: int = 3
|
35
|
+
|
36
|
+
# Provider-specific settings
|
37
|
+
azure_deployment: Optional[str] = None
|
38
|
+
azure_api_version: Optional[str] = None
|
39
|
+
|
40
|
+
# Ollama specific
|
41
|
+
ollama_host: Optional[str] = None
|
42
|
+
|
43
|
+
# LM Studio specific
|
44
|
+
lmstudio_host: Optional[str] = None
|
45
|
+
|
46
|
+
|
47
|
+
class Section(TypedDict, total=True):
|
48
|
+
heading: str
|
49
|
+
content: str
|
50
|
+
priority: int
|
51
|
+
|
52
|
+
|
53
|
+
class UnifiedLLMProcessor:
|
54
|
+
"""
|
55
|
+
Unified LLM processor supporting all LiteLLM providers:
|
56
|
+
- Azure OpenAI
|
57
|
+
- Ollama (local models)
|
58
|
+
- LM Studio (local models)
|
59
|
+
- OpenAI
|
60
|
+
- Anthropic
|
61
|
+
- Google AI
|
62
|
+
- And all other LiteLLM providers
|
63
|
+
"""
|
64
|
+
|
65
|
+
def __init__(self, config: LLMConfig) -> None:
|
66
|
+
self.config = config
|
67
|
+
self.logger = logging.getLogger(__name__)
|
68
|
+
|
69
|
+
if not LITELLM_AVAILABLE:
|
70
|
+
raise ImportError("LiteLLM is required. Install with: pip install litellm")
|
71
|
+
|
72
|
+
# Configure LiteLLM based on provider
|
73
|
+
self._configure_litellm()
|
74
|
+
|
75
|
+
def _configure_litellm(self) -> None:
|
76
|
+
"""Configure LiteLLM based on the provider"""
|
77
|
+
if self.config.provider == "azure":
|
78
|
+
# Azure OpenAI configuration
|
79
|
+
if self.config.api_base and self.config.api_key:
|
80
|
+
# Azure config is handled in the completion call
|
81
|
+
pass
|
82
|
+
|
83
|
+
elif self.config.provider == "ollama":
|
84
|
+
# Ollama configuration
|
85
|
+
if self.config.ollama_host:
|
86
|
+
litellm.api_base = self.config.ollama_host
|
87
|
+
else:
|
88
|
+
litellm.api_base = "http://localhost:11434"
|
89
|
+
|
90
|
+
elif self.config.provider == "lmstudio":
|
91
|
+
# LM Studio configuration
|
92
|
+
if self.config.lmstudio_host:
|
93
|
+
litellm.api_base = self.config.lmstudio_host
|
94
|
+
else:
|
95
|
+
litellm.api_base = "http://localhost:1234/v1"
|
96
|
+
|
97
|
+
elif self.config.provider in ["openai", "anthropic", "google"]:
|
98
|
+
# These use standard API keys
|
99
|
+
if self.config.api_key:
|
100
|
+
# API key is set in the completion call
|
101
|
+
pass
|
102
|
+
|
103
|
+
def _get_model_name(self) -> str:
|
104
|
+
"""Get the appropriate model name for the provider"""
|
105
|
+
if self.config.provider == "azure":
|
106
|
+
return f"azure/{self.config.model}"
|
107
|
+
elif self.config.provider == "ollama":
|
108
|
+
return self.config.model
|
109
|
+
elif self.config.provider == "lmstudio":
|
110
|
+
return self.config.model
|
111
|
+
else:
|
112
|
+
return self.config.model
|
113
|
+
|
114
|
+
def _get_api_base(self) -> Optional[str]:
|
115
|
+
"""Get the API base URL for the provider"""
|
116
|
+
if self.config.provider == "azure":
|
117
|
+
return self.config.api_base
|
118
|
+
elif self.config.provider == "ollama":
|
119
|
+
return self.config.ollama_host or "http://localhost:11434"
|
120
|
+
elif self.config.provider == "lmstudio":
|
121
|
+
return self.config.lmstudio_host or "http://localhost:1234/v1"
|
122
|
+
else:
|
123
|
+
return self.config.api_base
|
124
|
+
|
125
|
+
def estimate_tokens(self, text: str) -> int:
|
126
|
+
"""Rough token estimation (4 characters per token)"""
|
127
|
+
return len(text) // 4
|
128
|
+
|
129
|
+
def truncate_content(self, content: str, max_tokens: int = 1000) -> str:
|
130
|
+
"""Truncate content to fit within token limit"""
|
131
|
+
paragraphs = content.split("\n\n")
|
132
|
+
result, current_tokens = "", 0
|
133
|
+
|
134
|
+
for para in paragraphs:
|
135
|
+
tokens = self.estimate_tokens(para)
|
136
|
+
if current_tokens + tokens <= max_tokens:
|
137
|
+
result += para + "\n\n"
|
138
|
+
current_tokens += tokens
|
139
|
+
else:
|
140
|
+
break
|
141
|
+
return result.strip()
|
142
|
+
|
143
|
+
def smart_truncate(self, content: str, max_tokens: int = 1000) -> str:
|
144
|
+
"""Intelligently truncate content to preserve the most important parts"""
|
145
|
+
if not content:
|
146
|
+
return ""
|
147
|
+
|
148
|
+
# If content is short enough, return it all
|
149
|
+
if self.estimate_tokens(content) <= max_tokens:
|
150
|
+
return content
|
151
|
+
|
152
|
+
# Split into sections based on markdown headers
|
153
|
+
sections: List[Section] = []
|
154
|
+
current_section: Section = {
|
155
|
+
"heading": "Introduction",
|
156
|
+
"content": "",
|
157
|
+
"priority": 10,
|
158
|
+
}
|
159
|
+
|
160
|
+
for line in content.splitlines():
|
161
|
+
if re.match(r"^#+\s+", line): # It's a header
|
162
|
+
# Save previous section if not empty
|
163
|
+
if current_section["content"].strip():
|
164
|
+
sections.append(current_section)
|
165
|
+
|
166
|
+
# Create new section with appropriate priority
|
167
|
+
heading = re.sub(r"^#+\s+", "", line)
|
168
|
+
priority = 5 # Default priority
|
169
|
+
|
170
|
+
# Assign priority based on content type
|
171
|
+
if re.search(r"\b(usage|example|getting started)\b", heading, re.I):
|
172
|
+
priority = 10
|
173
|
+
elif re.search(r"\b(feature|overview|about)\b", heading, re.I):
|
174
|
+
priority = 9
|
175
|
+
elif re.search(r"\b(install|setup|config)\b", heading, re.I):
|
176
|
+
priority = 8
|
177
|
+
elif re.search(r"\b(api|interface)\b", heading, re.I):
|
178
|
+
priority = 7
|
179
|
+
|
180
|
+
current_section = {
|
181
|
+
"heading": heading,
|
182
|
+
"content": line + "\n",
|
183
|
+
"priority": priority,
|
184
|
+
}
|
185
|
+
else:
|
186
|
+
current_section["content"] += line + "\n"
|
187
|
+
|
188
|
+
# Boost priority if code block is found
|
189
|
+
if "```rust" in line or "```no_run" in line:
|
190
|
+
current_section["priority"] = max(current_section["priority"], 8)
|
191
|
+
|
192
|
+
# Add the last section
|
193
|
+
if current_section["content"].strip():
|
194
|
+
sections.append(current_section)
|
195
|
+
|
196
|
+
# Sort sections by priority (highest first)
|
197
|
+
sections.sort(key=lambda x: x["priority"], reverse=True)
|
198
|
+
|
199
|
+
# Build the result, respecting token limits
|
200
|
+
result = ""
|
201
|
+
tokens_used = 0
|
202
|
+
|
203
|
+
for section in sections:
|
204
|
+
section_text = f'## {section["heading"]}\n{section["content"]}\n'
|
205
|
+
section_tokens = self.estimate_tokens(section_text)
|
206
|
+
|
207
|
+
if tokens_used + section_tokens <= max_tokens:
|
208
|
+
result += section_text
|
209
|
+
tokens_used += section_tokens
|
210
|
+
elif tokens_used < max_tokens - 100: # If we can fit a truncated version
|
211
|
+
# Take what we can
|
212
|
+
remaining_tokens = max_tokens - tokens_used
|
213
|
+
# Simple truncation by characters
|
214
|
+
max_chars = remaining_tokens * 4
|
215
|
+
if len(section_text) > max_chars:
|
216
|
+
result += section_text[:max_chars] + "..."
|
217
|
+
else:
|
218
|
+
result += section_text
|
219
|
+
break
|
220
|
+
|
221
|
+
return result
|
222
|
+
|
223
|
+
def clean_output(self, output: str, task: str = "general") -> str:
|
224
|
+
"""Task-specific output cleaning"""
|
225
|
+
if not output:
|
226
|
+
return ""
|
227
|
+
|
228
|
+
# Remove any remaining prompt artifacts
|
229
|
+
output = output.split("<|end|>")[0].strip()
|
230
|
+
|
231
|
+
if task == "classification":
|
232
|
+
# For classification tasks, extract just the category
|
233
|
+
categories = [
|
234
|
+
"AI",
|
235
|
+
"Database",
|
236
|
+
"Web Framework",
|
237
|
+
"Networking",
|
238
|
+
"Serialization",
|
239
|
+
"Utilities",
|
240
|
+
"DevTools",
|
241
|
+
"ML",
|
242
|
+
"Cryptography",
|
243
|
+
"Unknown",
|
244
|
+
]
|
245
|
+
for category in categories:
|
246
|
+
if re.search(
|
247
|
+
r"\b" + re.escape(category) + r"\b", output, re.IGNORECASE
|
248
|
+
):
|
249
|
+
return category
|
250
|
+
return "Unknown"
|
251
|
+
|
252
|
+
elif task == "factual_pairs":
|
253
|
+
# For factual pairs, ensure proper formatting
|
254
|
+
pairs: List[str] = []
|
255
|
+
facts = re.findall(r"✅\s*Factual:?\s*(.*?)(?=❌|\Z)", output, re.DOTALL)
|
256
|
+
counterfacts = re.findall(
|
257
|
+
r"❌\s*Counterfactual:?\s*(.*?)(?=✅|\Z)", output, re.DOTALL
|
258
|
+
)
|
259
|
+
|
260
|
+
# Pair them up
|
261
|
+
for i in range(min(len(facts), len(counterfacts))):
|
262
|
+
pairs.append(
|
263
|
+
f"✅ Factual: {facts[i].strip()}\n"
|
264
|
+
f"❌ Counterfactual: {counterfacts[i].strip()}"
|
265
|
+
)
|
266
|
+
|
267
|
+
return "\n\n".join(pairs)
|
268
|
+
|
269
|
+
return output
|
270
|
+
|
271
|
+
def call_llm(
|
272
|
+
self,
|
273
|
+
prompt: str,
|
274
|
+
temperature: Optional[float] = None,
|
275
|
+
max_tokens: Optional[int] = None,
|
276
|
+
system_message: str = "You are a helpful AI assistant that analyzes Rust crates and provides insights."
|
277
|
+
) -> Optional[str]:
|
278
|
+
"""Call LLM using LiteLLM with provider-specific configuration"""
|
279
|
+
try:
|
280
|
+
# Use config defaults if not provided
|
281
|
+
temp = temperature if temperature is not None else self.config.temperature
|
282
|
+
tokens = max_tokens if max_tokens is not None else self.config.max_tokens
|
283
|
+
|
284
|
+
# Prepare the completion call parameters
|
285
|
+
completion_params: Dict[str, Any] = {
|
286
|
+
"model": self._get_model_name(),
|
287
|
+
"messages": [
|
288
|
+
{"role": "system", "content": system_message},
|
289
|
+
{"role": "user", "content": prompt}
|
290
|
+
],
|
291
|
+
"temperature": temp,
|
292
|
+
"max_tokens": tokens,
|
293
|
+
"timeout": self.config.timeout
|
294
|
+
}
|
295
|
+
|
296
|
+
# Add provider-specific parameters
|
297
|
+
if self.config.provider == "azure":
|
298
|
+
if self.config.api_base:
|
299
|
+
completion_params["api_base"] = self.config.api_base
|
300
|
+
if self.config.api_key:
|
301
|
+
completion_params["api_key"] = self.config.api_key
|
302
|
+
if self.config.azure_deployment:
|
303
|
+
completion_params["deployment_id"] = self.config.azure_deployment
|
304
|
+
if self.config.azure_api_version:
|
305
|
+
completion_params["api_version"] = self.config.azure_api_version
|
306
|
+
|
307
|
+
elif self.config.provider in ["ollama", "lmstudio"]:
|
308
|
+
# Local providers don't need API keys
|
309
|
+
pass
|
310
|
+
|
311
|
+
else:
|
312
|
+
# Other providers (OpenAI, Anthropic, etc.)
|
313
|
+
if self.config.api_key:
|
314
|
+
completion_params["api_key"] = self.config.api_key
|
315
|
+
if self.config.api_base:
|
316
|
+
completion_params["api_base"] = self.config.api_base
|
317
|
+
|
318
|
+
self.logger.debug(f"Calling LLM with provider: {self.config.provider}, model: {self.config.model}")
|
319
|
+
|
320
|
+
response = completion(**completion_params)
|
321
|
+
|
322
|
+
# Handle different response formats from LiteLLM
|
323
|
+
# LiteLLM has complex response objects that vary by provider
|
324
|
+
try:
|
325
|
+
if hasattr(response, 'choices') and response.choices: # type: ignore[attr-defined]
|
326
|
+
choice = response.choices[0] # type: ignore[attr-defined]
|
327
|
+
if hasattr(choice, 'message') and hasattr(choice.message, 'content'): # type: ignore[attr-defined]
|
328
|
+
return choice.message.content # type: ignore[attr-defined]
|
329
|
+
elif hasattr(choice, 'content'): # type: ignore[attr-defined]
|
330
|
+
return choice.content # type: ignore[attr-defined]
|
331
|
+
elif hasattr(response, 'content'): # type: ignore[attr-defined]
|
332
|
+
return response.content # type: ignore[attr-defined]
|
333
|
+
elif isinstance(response, str):
|
334
|
+
return response
|
335
|
+
else:
|
336
|
+
self.logger.error(f"Unexpected response format: {response}")
|
337
|
+
return None
|
338
|
+
except Exception as e:
|
339
|
+
self.logger.error(f"Error parsing LLM response: {e}")
|
340
|
+
return None
|
341
|
+
|
342
|
+
except Exception as e:
|
343
|
+
self.logger.error(f"Error calling LLM ({self.config.provider}): {str(e)}")
|
344
|
+
return None
|
345
|
+
|
346
|
+
def validate_and_retry(
|
347
|
+
self,
|
348
|
+
prompt: str,
|
349
|
+
validation_func: Callable[[str], bool],
|
350
|
+
temperature: Optional[float] = None,
|
351
|
+
max_tokens: Optional[int] = None,
|
352
|
+
retries: Optional[int] = None,
|
353
|
+
system_message: str = "You are a helpful AI assistant that analyzes Rust crates and provides insights."
|
354
|
+
) -> Optional[str]:
|
355
|
+
"""Call LLM with validation and retry logic"""
|
356
|
+
max_retries = retries if retries is not None else self.config.max_retries
|
357
|
+
|
358
|
+
for attempt in range(max_retries + 1):
|
359
|
+
try:
|
360
|
+
result = self.call_llm(prompt, temperature, max_tokens, system_message)
|
361
|
+
if result and validation_func(result):
|
362
|
+
return result
|
363
|
+
|
364
|
+
if attempt < max_retries:
|
365
|
+
self.logger.warning(f"Validation failed, retrying... (attempt {attempt + 1}/{max_retries})")
|
366
|
+
time.sleep(1 * (attempt + 1)) # Exponential backoff
|
367
|
+
|
368
|
+
except Exception as e:
|
369
|
+
self.logger.error(f"Error in attempt {attempt + 1}: {str(e)}")
|
370
|
+
if attempt < max_retries:
|
371
|
+
time.sleep(1 * (attempt + 1))
|
372
|
+
|
373
|
+
self.logger.error(f"Failed after {max_retries + 1} attempts")
|
374
|
+
return None
|
375
|
+
|
376
|
+
def simplify_prompt(self, prompt: str) -> str:
|
377
|
+
"""Simplify prompt for better LLM understanding"""
|
378
|
+
# Remove excessive whitespace and normalize
|
379
|
+
prompt = re.sub(r'\n\s*\n', '\n\n', prompt)
|
380
|
+
prompt = re.sub(r' +', ' ', prompt)
|
381
|
+
return prompt.strip()
|
382
|
+
|
383
|
+
def validate_classification(self, result: str) -> bool:
|
384
|
+
"""Validate classification output"""
|
385
|
+
categories = ["AI", "Database", "Web Framework", "Networking", "Serialization",
|
386
|
+
"Utilities", "DevTools", "ML", "Cryptography", "Unknown"]
|
387
|
+
return any(cat.lower() in result.lower() for cat in categories)
|
388
|
+
|
389
|
+
def validate_factual_pairs(self, result: str) -> bool:
|
390
|
+
"""Validate factual pairs output"""
|
391
|
+
return "✅" in result and "❌" in result
|
392
|
+
|
393
|
+
def enrich_crate(self, crate: CrateMetadata) -> EnrichedCrate:
|
394
|
+
"""Enrich a crate with LLM analysis"""
|
395
|
+
self.logger.info(f"Enriching crate: {crate.name}")
|
396
|
+
|
397
|
+
# Create enriched crate with base metadata
|
398
|
+
enriched = EnrichedCrate(**crate.__dict__)
|
399
|
+
|
400
|
+
# Summarize README
|
401
|
+
if crate.readme:
|
402
|
+
readme_summary = self.summarize_features(crate)
|
403
|
+
enriched.readme_summary = readme_summary
|
404
|
+
|
405
|
+
# Classify use case
|
406
|
+
use_case = self.classify_use_case(crate, readme_summary)
|
407
|
+
enriched.use_case = use_case
|
408
|
+
|
409
|
+
# Generate factual pairs
|
410
|
+
factual_pairs = self.generate_factual_pairs(crate)
|
411
|
+
enriched.factual_counterfactual = factual_pairs
|
412
|
+
|
413
|
+
# Score crate
|
414
|
+
score = self.score_crate(crate)
|
415
|
+
enriched.score = score
|
416
|
+
|
417
|
+
return enriched
|
418
|
+
|
419
|
+
def summarize_features(self, crate: CrateMetadata) -> str:
|
420
|
+
"""Summarize crate features using LLM"""
|
421
|
+
prompt = f"""
|
422
|
+
Summarize the key features and capabilities of the Rust crate '{crate.name}' based on its README.
|
423
|
+
|
424
|
+
Crate: {crate.name} v{crate.version}
|
425
|
+
Description: {crate.description}
|
426
|
+
Keywords: {', '.join(crate.keywords)}
|
427
|
+
Categories: {', '.join(crate.categories)}
|
428
|
+
|
429
|
+
README Content:
|
430
|
+
{self.smart_truncate(crate.readme, 2000)}
|
431
|
+
|
432
|
+
Provide a concise summary (2-3 sentences) of what this crate does and its main features.
|
433
|
+
"""
|
434
|
+
|
435
|
+
result = self.call_llm(
|
436
|
+
self.simplify_prompt(prompt),
|
437
|
+
temperature=0.3,
|
438
|
+
max_tokens=150,
|
439
|
+
system_message="You are an expert Rust developer who summarizes crate features concisely."
|
440
|
+
)
|
441
|
+
|
442
|
+
return self.clean_output(result or "Unable to summarize features", "general")
|
443
|
+
|
444
|
+
def classify_use_case(self, crate: CrateMetadata, readme_summary: str) -> str:
|
445
|
+
"""Classify the primary use case of the crate"""
|
446
|
+
prompt = f"""
|
447
|
+
Classify the primary use case of the Rust crate '{crate.name}' into one of these categories:
|
448
|
+
- AI: Machine learning, AI, or data science related
|
449
|
+
- Database: Database drivers, ORMs, or data storage
|
450
|
+
- Web Framework: Web servers, HTTP, or web development
|
451
|
+
- Networking: Network protocols, communication, or system networking
|
452
|
+
- Serialization: Data serialization, deserialization, or format handling
|
453
|
+
- Utilities: General utilities, helpers, or tools
|
454
|
+
- DevTools: Development tools, testing, or debugging
|
455
|
+
- ML: Machine learning specific (subset of AI)
|
456
|
+
- Cryptography: Security, encryption, or cryptographic operations
|
457
|
+
- Unknown: If none of the above categories fit
|
458
|
+
|
459
|
+
Crate: {crate.name} v{crate.version}
|
460
|
+
Description: {crate.description}
|
461
|
+
Summary: {readme_summary}
|
462
|
+
Keywords: {', '.join(crate.keywords)}
|
463
|
+
Categories: {', '.join(crate.categories)}
|
464
|
+
|
465
|
+
Respond with only the category name.
|
466
|
+
"""
|
467
|
+
|
468
|
+
result = self.validate_and_retry(
|
469
|
+
self.simplify_prompt(prompt),
|
470
|
+
self.validate_classification,
|
471
|
+
temperature=0.1,
|
472
|
+
max_tokens=50,
|
473
|
+
system_message="You are a Rust ecosystem expert who classifies crates accurately."
|
474
|
+
)
|
475
|
+
|
476
|
+
return self.clean_output(result or "Unknown", "classification")
|
477
|
+
|
478
|
+
def generate_factual_pairs(self, crate: CrateMetadata) -> str:
|
479
|
+
"""Generate factual and counterfactual statements about the crate"""
|
480
|
+
prompt = f"""
|
481
|
+
Generate 2-3 pairs of factual and counterfactual statements about the Rust crate '{crate.name}'.
|
482
|
+
|
483
|
+
Crate: {crate.name} v{crate.version}
|
484
|
+
Description: {crate.description}
|
485
|
+
Keywords: {', '.join(crate.keywords)}
|
486
|
+
|
487
|
+
README Content:
|
488
|
+
{self.smart_truncate(crate.readme, 1500)}
|
489
|
+
|
490
|
+
For each pair:
|
491
|
+
- ✅ Factual: A true statement about the crate's capabilities or features
|
492
|
+
- ❌ Counterfactual: A false statement that sounds plausible but is incorrect
|
493
|
+
|
494
|
+
Format each pair as:
|
495
|
+
✅ Factual: [true statement]
|
496
|
+
❌ Counterfactual: [false statement]
|
497
|
+
|
498
|
+
Focus on technical capabilities, performance characteristics, and use cases.
|
499
|
+
"""
|
500
|
+
|
501
|
+
result = self.validate_and_retry(
|
502
|
+
self.simplify_prompt(prompt),
|
503
|
+
self.validate_factual_pairs,
|
504
|
+
temperature=0.4,
|
505
|
+
max_tokens=300,
|
506
|
+
system_message="You are a Rust expert who generates accurate factual statements and plausible counterfactuals."
|
507
|
+
)
|
508
|
+
|
509
|
+
return self.clean_output(result or "Unable to generate factual pairs", "factual_pairs")
|
510
|
+
|
511
|
+
def score_crate(self, crate: CrateMetadata) -> float:
|
512
|
+
"""Score the crate based on various factors"""
|
513
|
+
prompt = f"""
|
514
|
+
Rate the Rust crate '{crate.name}' on a scale of 0.0 to 10.0 based on:
|
515
|
+
- Documentation quality (README, examples)
|
516
|
+
- Feature completeness
|
517
|
+
- Community adoption (downloads, stars)
|
518
|
+
- Code quality indicators
|
519
|
+
- Practical usefulness
|
520
|
+
|
521
|
+
Crate: {crate.name} v{crate.version}
|
522
|
+
Description: {crate.description}
|
523
|
+
Downloads: {crate.downloads}
|
524
|
+
GitHub Stars: {crate.github_stars}
|
525
|
+
Keywords: {', '.join(crate.keywords)}
|
526
|
+
|
527
|
+
README Content:
|
528
|
+
{self.smart_truncate(crate.readme, 1000)}
|
529
|
+
|
530
|
+
Respond with only a number between 0.0 and 10.0 (e.g., 7.5).
|
531
|
+
"""
|
532
|
+
|
533
|
+
result = self.call_llm(
|
534
|
+
self.simplify_prompt(prompt),
|
535
|
+
temperature=0.2,
|
536
|
+
max_tokens=10,
|
537
|
+
system_message="You are a Rust ecosystem expert who rates crates objectively."
|
538
|
+
)
|
539
|
+
|
540
|
+
if result:
|
541
|
+
try:
|
542
|
+
# Extract numeric score
|
543
|
+
score_match = re.search(r'(\d+\.?\d*)', result)
|
544
|
+
if score_match:
|
545
|
+
score = float(score_match.group(1))
|
546
|
+
return max(0.0, min(10.0, score)) # Clamp between 0-10
|
547
|
+
except (ValueError, TypeError):
|
548
|
+
pass
|
549
|
+
|
550
|
+
return 5.0 # Default score
|
551
|
+
|
552
|
+
def batch_process_prompts(
|
553
|
+
self,
|
554
|
+
prompts: "List[Tuple[str, float, int]]",
|
555
|
+
batch_size: int = 4
|
556
|
+
) -> List[Optional[str]]:
|
557
|
+
"""Process multiple prompts in batches"""
|
558
|
+
results = []
|
559
|
+
|
560
|
+
for i in range(0, len(prompts), batch_size):
|
561
|
+
batch = prompts[i:i + batch_size]
|
562
|
+
batch_results = []
|
563
|
+
|
564
|
+
for prompt, temp, tokens in batch:
|
565
|
+
result = self.call_llm(prompt, float(temp), int(tokens))
|
566
|
+
batch_results.append(result)
|
567
|
+
|
568
|
+
results.extend(batch_results)
|
569
|
+
|
570
|
+
# Small delay between batches
|
571
|
+
if i + batch_size < len(prompts):
|
572
|
+
time.sleep(0.5)
|
573
|
+
|
574
|
+
return results
|
575
|
+
|
576
|
+
def smart_context_management(
|
577
|
+
self, context_history: List[str], new_prompt: str
|
578
|
+
) -> str:
|
579
|
+
"""Manage context intelligently to avoid token limits"""
|
580
|
+
# Simple context management - could be enhanced
|
581
|
+
total_context = "\n".join(context_history[-3:]) + "\n" + new_prompt
|
582
|
+
return self.smart_truncate(total_context, 3000)
|
583
|
+
|
584
|
+
|
585
|
+
def create_llm_processor_from_config(pipeline_config: PipelineConfig) -> UnifiedLLMProcessor:
|
586
|
+
"""Create LLM processor from pipeline configuration"""
|
587
|
+
|
588
|
+
# Determine which provider to use based on config
|
589
|
+
if pipeline_config.use_azure_openai:
|
590
|
+
llm_config = LLMConfig(
|
591
|
+
provider="azure",
|
592
|
+
model=pipeline_config.azure_openai_deployment_name,
|
593
|
+
api_base=pipeline_config.azure_openai_endpoint,
|
594
|
+
api_key=pipeline_config.azure_openai_api_key,
|
595
|
+
azure_deployment=pipeline_config.azure_openai_deployment_name,
|
596
|
+
azure_api_version=pipeline_config.azure_openai_api_version,
|
597
|
+
temperature=0.2,
|
598
|
+
max_tokens=pipeline_config.max_tokens,
|
599
|
+
timeout=30,
|
600
|
+
max_retries=pipeline_config.max_retries
|
601
|
+
)
|
602
|
+
else:
|
603
|
+
# Default to local model
|
604
|
+
llm_config = LLMConfig(
|
605
|
+
provider="ollama", # Default local provider
|
606
|
+
model="llama2", # Default model
|
607
|
+
temperature=0.2,
|
608
|
+
max_tokens=pipeline_config.max_tokens,
|
609
|
+
timeout=30,
|
610
|
+
max_retries=pipeline_config.max_retries
|
611
|
+
)
|
612
|
+
|
613
|
+
return UnifiedLLMProcessor(llm_config)
|
614
|
+
|
615
|
+
|
616
|
+
def create_llm_processor_from_args(
|
617
|
+
provider: str,
|
618
|
+
model: str,
|
619
|
+
api_base: Optional[str] = None,
|
620
|
+
api_key: Optional[str] = None,
|
621
|
+
temperature: float = 0.2,
|
622
|
+
max_tokens: int = 256,
|
623
|
+
**kwargs
|
624
|
+
) -> UnifiedLLMProcessor:
|
625
|
+
"""Create LLM processor from command line arguments"""
|
626
|
+
|
627
|
+
llm_config = LLMConfig(
|
628
|
+
provider=provider,
|
629
|
+
model=model,
|
630
|
+
api_base=api_base,
|
631
|
+
api_key=api_key,
|
632
|
+
temperature=temperature,
|
633
|
+
max_tokens=max_tokens,
|
634
|
+
**kwargs
|
635
|
+
)
|
636
|
+
|
637
|
+
return UnifiedLLMProcessor(llm_config)
|