nc1709 1.15.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. nc1709/__init__.py +13 -0
  2. nc1709/agent/__init__.py +36 -0
  3. nc1709/agent/core.py +505 -0
  4. nc1709/agent/mcp_bridge.py +245 -0
  5. nc1709/agent/permissions.py +298 -0
  6. nc1709/agent/tools/__init__.py +21 -0
  7. nc1709/agent/tools/base.py +440 -0
  8. nc1709/agent/tools/bash_tool.py +367 -0
  9. nc1709/agent/tools/file_tools.py +454 -0
  10. nc1709/agent/tools/notebook_tools.py +516 -0
  11. nc1709/agent/tools/search_tools.py +322 -0
  12. nc1709/agent/tools/task_tool.py +284 -0
  13. nc1709/agent/tools/web_tools.py +555 -0
  14. nc1709/agents/__init__.py +17 -0
  15. nc1709/agents/auto_fix.py +506 -0
  16. nc1709/agents/test_generator.py +507 -0
  17. nc1709/checkpoints.py +372 -0
  18. nc1709/cli.py +3380 -0
  19. nc1709/cli_ui.py +1080 -0
  20. nc1709/cognitive/__init__.py +149 -0
  21. nc1709/cognitive/anticipation.py +594 -0
  22. nc1709/cognitive/context_engine.py +1046 -0
  23. nc1709/cognitive/council.py +824 -0
  24. nc1709/cognitive/learning.py +761 -0
  25. nc1709/cognitive/router.py +583 -0
  26. nc1709/cognitive/system.py +519 -0
  27. nc1709/config.py +155 -0
  28. nc1709/custom_commands.py +300 -0
  29. nc1709/executor.py +333 -0
  30. nc1709/file_controller.py +354 -0
  31. nc1709/git_integration.py +308 -0
  32. nc1709/github_integration.py +477 -0
  33. nc1709/image_input.py +446 -0
  34. nc1709/linting.py +519 -0
  35. nc1709/llm_adapter.py +667 -0
  36. nc1709/logger.py +192 -0
  37. nc1709/mcp/__init__.py +18 -0
  38. nc1709/mcp/client.py +370 -0
  39. nc1709/mcp/manager.py +407 -0
  40. nc1709/mcp/protocol.py +210 -0
  41. nc1709/mcp/server.py +473 -0
  42. nc1709/memory/__init__.py +20 -0
  43. nc1709/memory/embeddings.py +325 -0
  44. nc1709/memory/indexer.py +474 -0
  45. nc1709/memory/sessions.py +432 -0
  46. nc1709/memory/vector_store.py +451 -0
  47. nc1709/models/__init__.py +86 -0
  48. nc1709/models/detector.py +377 -0
  49. nc1709/models/formats.py +315 -0
  50. nc1709/models/manager.py +438 -0
  51. nc1709/models/registry.py +497 -0
  52. nc1709/performance/__init__.py +343 -0
  53. nc1709/performance/cache.py +705 -0
  54. nc1709/performance/pipeline.py +611 -0
  55. nc1709/performance/tiering.py +543 -0
  56. nc1709/plan_mode.py +362 -0
  57. nc1709/plugins/__init__.py +17 -0
  58. nc1709/plugins/agents/__init__.py +18 -0
  59. nc1709/plugins/agents/django_agent.py +912 -0
  60. nc1709/plugins/agents/docker_agent.py +623 -0
  61. nc1709/plugins/agents/fastapi_agent.py +887 -0
  62. nc1709/plugins/agents/git_agent.py +731 -0
  63. nc1709/plugins/agents/nextjs_agent.py +867 -0
  64. nc1709/plugins/base.py +359 -0
  65. nc1709/plugins/manager.py +411 -0
  66. nc1709/plugins/registry.py +337 -0
  67. nc1709/progress.py +443 -0
  68. nc1709/prompts/__init__.py +22 -0
  69. nc1709/prompts/agent_system.py +180 -0
  70. nc1709/prompts/task_prompts.py +340 -0
  71. nc1709/prompts/unified_prompt.py +133 -0
  72. nc1709/reasoning_engine.py +541 -0
  73. nc1709/remote_client.py +266 -0
  74. nc1709/shell_completions.py +349 -0
  75. nc1709/slash_commands.py +649 -0
  76. nc1709/task_classifier.py +408 -0
  77. nc1709/version_check.py +177 -0
  78. nc1709/web/__init__.py +8 -0
  79. nc1709/web/server.py +950 -0
  80. nc1709/web/templates/index.html +1127 -0
  81. nc1709-1.15.4.dist-info/METADATA +858 -0
  82. nc1709-1.15.4.dist-info/RECORD +86 -0
  83. nc1709-1.15.4.dist-info/WHEEL +5 -0
  84. nc1709-1.15.4.dist-info/entry_points.txt +2 -0
  85. nc1709-1.15.4.dist-info/licenses/LICENSE +9 -0
  86. nc1709-1.15.4.dist-info/top_level.txt +1 -0
@@ -0,0 +1,611 @@
1
+ """
2
+ NC1709 Performance - Parallel Processing Pipeline
3
+
4
+ Runs independent operations concurrently instead of sequentially.
5
+ Cache lookup, intent analysis, and context building all happen
6
+ simultaneously, with cache hits short-circuiting the pipeline.
7
+
8
+ Before (Sequential):
9
+ Cache → Intent → Context → Generate = 500ms + 200ms + 300ms + 4000ms = 5000ms
10
+
11
+ After (Parallel):
12
+ ┌─ Cache lookup ─────┐
13
+ │ │
14
+ ├─ Intent analysis ──┼─→ Short-circuit if cache hit
15
+ │ │
16
+ └─ Context building ─┘
17
+
18
+ Generate (only if cache miss)
19
+
20
+ = max(500ms, 200ms, 300ms) + 4000ms = 4500ms (+ cache hits save 100%)
21
+ """
22
+
23
+ import asyncio
24
+ import logging
25
+ import time
26
+ from dataclasses import dataclass, field
27
+ from typing import Dict, List, Optional, Any, Callable, Tuple, Coroutine
28
+ from concurrent.futures import ThreadPoolExecutor, as_completed
29
+ from enum import Enum
30
+ import threading
31
+
32
+ logger = logging.getLogger(__name__)
33
+
34
+
35
+ class PipelineStage(Enum):
36
+ """Stages in the processing pipeline"""
37
+ CACHE_LOOKUP = "cache_lookup"
38
+ INTENT_ANALYSIS = "intent_analysis"
39
+ CONTEXT_BUILDING = "context_building"
40
+ TIER_SELECTION = "tier_selection"
41
+ GENERATION = "generation"
42
+ POST_PROCESS = "post_process"
43
+
44
+
45
+ @dataclass
46
+ class StageResult:
47
+ """Result from a pipeline stage"""
48
+ stage: PipelineStage
49
+ success: bool
50
+ result: Any
51
+ duration_ms: float
52
+ error: Optional[str] = None
53
+
54
+ @property
55
+ def failed(self) -> bool:
56
+ return not self.success
57
+
58
+
59
+ @dataclass
60
+ class PipelineResult:
61
+ """Final result from pipeline execution"""
62
+ response: Optional[str]
63
+ stage_results: Dict[PipelineStage, StageResult]
64
+ total_duration_ms: float
65
+ cache_hit: bool
66
+ tier_used: Optional[str] = None
67
+
68
+ @property
69
+ def success(self) -> bool:
70
+ return self.response is not None
71
+
72
+ def get_stage_timing(self) -> Dict[str, float]:
73
+ """Get timing for each stage"""
74
+ return {
75
+ stage.value: result.duration_ms
76
+ for stage, result in self.stage_results.items()
77
+ }
78
+
79
+
80
+ @dataclass
81
+ class PipelineStats:
82
+ """Statistics for pipeline execution"""
83
+ total_executions: int = 0
84
+ cache_hits: int = 0
85
+ parallel_time_saved_ms: float = 0
86
+ stage_times: Dict[str, List[float]] = field(default_factory=lambda: {
87
+ stage.value: [] for stage in PipelineStage
88
+ })
89
+
90
+ def record_execution(
91
+ self,
92
+ result: PipelineResult,
93
+ sequential_estimate_ms: float
94
+ ):
95
+ """Record a pipeline execution"""
96
+ self.total_executions += 1
97
+ if result.cache_hit:
98
+ self.cache_hits += 1
99
+
100
+ # Track time saved by parallel execution
101
+ if result.total_duration_ms < sequential_estimate_ms:
102
+ self.parallel_time_saved_ms += sequential_estimate_ms - result.total_duration_ms
103
+
104
+ # Track stage timings
105
+ for stage, stage_result in result.stage_results.items():
106
+ self.stage_times[stage.value].append(stage_result.duration_ms)
107
+
108
+ def get_avg_stage_times(self) -> Dict[str, float]:
109
+ """Get average time for each stage"""
110
+ return {
111
+ stage: sum(times) / len(times) if times else 0
112
+ for stage, times in self.stage_times.items()
113
+ }
114
+
115
+ def to_dict(self) -> Dict[str, Any]:
116
+ return {
117
+ "total_executions": self.total_executions,
118
+ "cache_hits": self.cache_hits,
119
+ "cache_hit_rate": self.cache_hits / self.total_executions if self.total_executions > 0 else 0,
120
+ "parallel_time_saved_ms": round(self.parallel_time_saved_ms, 2),
121
+ "avg_stage_times": {k: round(v, 2) for k, v in self.get_avg_stage_times().items()},
122
+ }
123
+
124
+
125
+ class ParallelPipeline:
126
+ """
127
+ Parallel processing pipeline for request handling.
128
+
129
+ Executes independent stages concurrently and short-circuits
130
+ on cache hits to minimize latency.
131
+
132
+ Usage:
133
+ pipeline = ParallelPipeline(
134
+ cache=layered_cache,
135
+ intent_analyzer=analyzer,
136
+ context_engine=context_engine,
137
+ tier_orchestrator=orchestrator,
138
+ )
139
+
140
+ result = await pipeline.process(
141
+ prompt="explain decorators in Python",
142
+ context={"files": ["main.py"]}
143
+ )
144
+
145
+ if result.cache_hit:
146
+ print("Instant response from cache!")
147
+ print(result.response)
148
+ """
149
+
150
+ def __init__(
151
+ self,
152
+ cache=None,
153
+ intent_analyzer=None,
154
+ context_engine=None,
155
+ tier_orchestrator=None,
156
+ llm_adapter=None,
157
+ max_workers: int = 4,
158
+ enable_parallel: bool = True
159
+ ):
160
+ self.cache = cache
161
+ self.intent_analyzer = intent_analyzer
162
+ self.context_engine = context_engine
163
+ self.tier_orchestrator = tier_orchestrator
164
+ self.llm_adapter = llm_adapter
165
+
166
+ self.max_workers = max_workers
167
+ self.enable_parallel = enable_parallel
168
+ self.stats = PipelineStats()
169
+
170
+ # Thread pool for sync operations
171
+ self._executor = ThreadPoolExecutor(max_workers=max_workers)
172
+
173
+ async def process(
174
+ self,
175
+ prompt: str,
176
+ context: Optional[Dict[str, Any]] = None,
177
+ force_no_cache: bool = False
178
+ ) -> PipelineResult:
179
+ """
180
+ Process a request through the parallel pipeline.
181
+
182
+ Args:
183
+ prompt: User's prompt
184
+ context: Additional context
185
+ force_no_cache: Skip cache lookup
186
+
187
+ Returns:
188
+ PipelineResult with response and timing info
189
+ """
190
+ start_time = time.time()
191
+ context = context or {}
192
+ stage_results: Dict[PipelineStage, StageResult] = {}
193
+
194
+ # Phase 1: Parallel initial stages
195
+ # These are independent and can run concurrently
196
+ if self.enable_parallel:
197
+ phase1_results = await self._run_parallel_phase1(
198
+ prompt, context, force_no_cache
199
+ )
200
+ else:
201
+ phase1_results = await self._run_sequential_phase1(
202
+ prompt, context, force_no_cache
203
+ )
204
+
205
+ stage_results.update(phase1_results)
206
+
207
+ # Check for cache hit (short-circuit)
208
+ cache_result = stage_results.get(PipelineStage.CACHE_LOOKUP)
209
+ if cache_result and cache_result.success and cache_result.result:
210
+ cache_data = cache_result.result
211
+ if cache_data.get("hit"):
212
+ total_ms = (time.time() - start_time) * 1000
213
+
214
+ # Record stats
215
+ result = PipelineResult(
216
+ response=cache_data["response"],
217
+ stage_results=stage_results,
218
+ total_duration_ms=total_ms,
219
+ cache_hit=True,
220
+ tier_used="cache",
221
+ )
222
+ self.stats.record_execution(result, self._estimate_sequential_time())
223
+ return result
224
+
225
+ # Phase 2: Tier selection (depends on intent)
226
+ intent_result = stage_results.get(PipelineStage.INTENT_ANALYSIS)
227
+ intent_data = intent_result.result if intent_result and intent_result.success else None
228
+
229
+ tier_result = await self._run_stage(
230
+ PipelineStage.TIER_SELECTION,
231
+ self._select_tier,
232
+ prompt, intent_data
233
+ )
234
+ stage_results[PipelineStage.TIER_SELECTION] = tier_result
235
+
236
+ # Phase 3: Generate response
237
+ context_result = stage_results.get(PipelineStage.CONTEXT_BUILDING)
238
+ context_data = context_result.result if context_result and context_result.success else None
239
+ tier_data = tier_result.result if tier_result.success else None
240
+
241
+ gen_result = await self._run_stage(
242
+ PipelineStage.GENERATION,
243
+ self._generate_response,
244
+ prompt, tier_data, context_data, intent_data
245
+ )
246
+ stage_results[PipelineStage.GENERATION] = gen_result
247
+
248
+ # Phase 4: Post-process and cache
249
+ response = gen_result.result if gen_result.success else None
250
+
251
+ if response and self.cache:
252
+ post_result = await self._run_stage(
253
+ PipelineStage.POST_PROCESS,
254
+ self._post_process,
255
+ prompt, context, response, tier_data
256
+ )
257
+ stage_results[PipelineStage.POST_PROCESS] = post_result
258
+
259
+ total_ms = (time.time() - start_time) * 1000
260
+
261
+ result = PipelineResult(
262
+ response=response,
263
+ stage_results=stage_results,
264
+ total_duration_ms=total_ms,
265
+ cache_hit=False,
266
+ tier_used=tier_data.get("model") if tier_data else None,
267
+ )
268
+
269
+ self.stats.record_execution(result, self._estimate_sequential_time())
270
+ return result
271
+
272
+ async def _run_parallel_phase1(
273
+ self,
274
+ prompt: str,
275
+ context: Dict[str, Any],
276
+ force_no_cache: bool
277
+ ) -> Dict[PipelineStage, StageResult]:
278
+ """Run Phase 1 stages in parallel"""
279
+ tasks = []
280
+
281
+ # Cache lookup
282
+ if not force_no_cache and self.cache:
283
+ tasks.append(
284
+ self._run_stage(
285
+ PipelineStage.CACHE_LOOKUP,
286
+ self._lookup_cache,
287
+ prompt, context
288
+ )
289
+ )
290
+
291
+ # Intent analysis
292
+ if self.intent_analyzer:
293
+ tasks.append(
294
+ self._run_stage(
295
+ PipelineStage.INTENT_ANALYSIS,
296
+ self._analyze_intent,
297
+ prompt, context
298
+ )
299
+ )
300
+
301
+ # Context building
302
+ if self.context_engine:
303
+ tasks.append(
304
+ self._run_stage(
305
+ PipelineStage.CONTEXT_BUILDING,
306
+ self._build_context,
307
+ prompt, context
308
+ )
309
+ )
310
+
311
+ # Run all tasks concurrently
312
+ results = await asyncio.gather(*tasks, return_exceptions=True)
313
+
314
+ stage_results = {}
315
+ for result in results:
316
+ if isinstance(result, StageResult):
317
+ stage_results[result.stage] = result
318
+ elif isinstance(result, Exception):
319
+ logger.error(f"Stage failed with exception: {result}")
320
+
321
+ return stage_results
322
+
323
+ async def _run_sequential_phase1(
324
+ self,
325
+ prompt: str,
326
+ context: Dict[str, Any],
327
+ force_no_cache: bool
328
+ ) -> Dict[PipelineStage, StageResult]:
329
+ """Run Phase 1 stages sequentially (for comparison/fallback)"""
330
+ stage_results = {}
331
+
332
+ # Cache lookup first (can short-circuit)
333
+ if not force_no_cache and self.cache:
334
+ result = await self._run_stage(
335
+ PipelineStage.CACHE_LOOKUP,
336
+ self._lookup_cache,
337
+ prompt, context
338
+ )
339
+ stage_results[PipelineStage.CACHE_LOOKUP] = result
340
+
341
+ # Short-circuit on cache hit
342
+ if result.success and result.result and result.result.get("hit"):
343
+ return stage_results
344
+
345
+ # Intent analysis
346
+ if self.intent_analyzer:
347
+ result = await self._run_stage(
348
+ PipelineStage.INTENT_ANALYSIS,
349
+ self._analyze_intent,
350
+ prompt, context
351
+ )
352
+ stage_results[PipelineStage.INTENT_ANALYSIS] = result
353
+
354
+ # Context building
355
+ if self.context_engine:
356
+ result = await self._run_stage(
357
+ PipelineStage.CONTEXT_BUILDING,
358
+ self._build_context,
359
+ prompt, context
360
+ )
361
+ stage_results[PipelineStage.CONTEXT_BUILDING] = result
362
+
363
+ return stage_results
364
+
365
+ async def _run_stage(
366
+ self,
367
+ stage: PipelineStage,
368
+ func: Callable,
369
+ *args, **kwargs
370
+ ) -> StageResult:
371
+ """Run a single pipeline stage with timing"""
372
+ start = time.time()
373
+
374
+ try:
375
+ # Check if function is async
376
+ if asyncio.iscoroutinefunction(func):
377
+ result = await func(*args, **kwargs)
378
+ else:
379
+ # Run sync function in thread pool
380
+ loop = asyncio.get_event_loop()
381
+ result = await loop.run_in_executor(
382
+ self._executor,
383
+ lambda: func(*args, **kwargs)
384
+ )
385
+
386
+ duration = (time.time() - start) * 1000
387
+
388
+ return StageResult(
389
+ stage=stage,
390
+ success=True,
391
+ result=result,
392
+ duration_ms=duration,
393
+ )
394
+
395
+ except Exception as e:
396
+ duration = (time.time() - start) * 1000
397
+ logger.error(f"Stage {stage.value} failed: {e}")
398
+
399
+ return StageResult(
400
+ stage=stage,
401
+ success=False,
402
+ result=None,
403
+ duration_ms=duration,
404
+ error=str(e),
405
+ )
406
+
407
+ # Stage implementations
408
+
409
+ def _lookup_cache(self, prompt: str, context: Dict[str, Any]) -> Dict[str, Any]:
410
+ """Look up in cache"""
411
+ if not self.cache:
412
+ return {"hit": False}
413
+
414
+ from .cache import make_context_hash
415
+ context_hash = make_context_hash(context)
416
+
417
+ result = self.cache.get(prompt, context_hash)
418
+
419
+ return {
420
+ "hit": result.hit,
421
+ "response": result.response,
422
+ "level": result.level,
423
+ "similarity": result.similarity,
424
+ }
425
+
426
+ def _analyze_intent(self, prompt: str, context: Dict[str, Any]) -> Dict[str, Any]:
427
+ """Analyze user intent"""
428
+ if not self.intent_analyzer:
429
+ return {}
430
+
431
+ # Use sync method
432
+ if hasattr(self.intent_analyzer, 'analyze_sync'):
433
+ intent = self.intent_analyzer.analyze_sync(prompt)
434
+ else:
435
+ intent = self.intent_analyzer.analyze(prompt)
436
+
437
+ return {
438
+ "category": intent.primary_category.value if hasattr(intent.primary_category, 'value') else str(intent.primary_category),
439
+ "complexity": intent.complexity,
440
+ "confidence": intent.confidence,
441
+ "requires_context": intent.requires_context,
442
+ }
443
+
444
+ def _build_context(self, prompt: str, context: Dict[str, Any]) -> Dict[str, Any]:
445
+ """Build context for the request"""
446
+ if not self.context_engine:
447
+ return {}
448
+
449
+ # Get relevant context
450
+ target_files = context.get("target_files", [])
451
+
452
+ try:
453
+ built_context = self.context_engine.build_context_for_task(
454
+ task_description=prompt,
455
+ target_files=target_files,
456
+ )
457
+ return built_context or {}
458
+ except Exception as e:
459
+ logger.warning(f"Context building failed: {e}")
460
+ return {}
461
+
462
+ def _select_tier(
463
+ self,
464
+ prompt: str,
465
+ intent_data: Optional[Dict[str, Any]]
466
+ ) -> Dict[str, Any]:
467
+ """Select model tier"""
468
+ if not self.tier_orchestrator:
469
+ return {"model": "default", "tier": "smart"}
470
+
471
+ category = intent_data.get("category") if intent_data else None
472
+ complexity = intent_data.get("complexity") if intent_data else None
473
+
474
+ decision = self.tier_orchestrator.select_tier(
475
+ prompt=prompt,
476
+ category=category,
477
+ complexity=complexity,
478
+ )
479
+
480
+ return {
481
+ "tier": decision.tier.value,
482
+ "model": decision.model,
483
+ "reasoning": decision.reasoning,
484
+ "confidence": decision.confidence,
485
+ }
486
+
487
+ def _generate_response(
488
+ self,
489
+ prompt: str,
490
+ tier_data: Optional[Dict[str, Any]],
491
+ context_data: Optional[Dict[str, Any]],
492
+ intent_data: Optional[Dict[str, Any]]
493
+ ) -> Optional[str]:
494
+ """Generate LLM response"""
495
+ if not self.llm_adapter:
496
+ # Mock response for testing
497
+ return f"[Mock response for: {prompt[:50]}...]"
498
+
499
+ model = tier_data.get("model") if tier_data else None
500
+
501
+ # Build enhanced prompt with context
502
+ enhanced_prompt = prompt
503
+ if context_data and context_data.get("summary"):
504
+ enhanced_prompt = f"Context: {context_data['summary']}\n\n{prompt}"
505
+
506
+ try:
507
+ response = self.llm_adapter.complete(
508
+ prompt=enhanced_prompt,
509
+ model=model,
510
+ )
511
+ return response
512
+ except Exception as e:
513
+ logger.error(f"Generation failed: {e}")
514
+ return None
515
+
516
+ def _post_process(
517
+ self,
518
+ prompt: str,
519
+ context: Dict[str, Any],
520
+ response: str,
521
+ tier_data: Optional[Dict[str, Any]]
522
+ ) -> Dict[str, Any]:
523
+ """Post-process and cache response"""
524
+ if self.cache and response:
525
+ from .cache import make_context_hash
526
+ context_hash = make_context_hash(context)
527
+
528
+ model_used = tier_data.get("model", "unknown") if tier_data else "unknown"
529
+
530
+ self.cache.set(
531
+ prompt=prompt,
532
+ context_hash=context_hash,
533
+ response=response,
534
+ model_used=model_used,
535
+ tokens_saved=len(response.split()) * 2, # Rough estimate
536
+ )
537
+
538
+ return {"cached": True}
539
+
540
+ def _estimate_sequential_time(self) -> float:
541
+ """Estimate time for sequential execution"""
542
+ # Based on typical stage times
543
+ return 5000 # 5 seconds
544
+
545
+ def get_stats(self) -> Dict[str, Any]:
546
+ """Get pipeline statistics"""
547
+ return self.stats.to_dict()
548
+
549
+ def shutdown(self):
550
+ """Shutdown the pipeline"""
551
+ self._executor.shutdown(wait=False)
552
+
553
+
554
+ class SyncPipeline:
555
+ """
556
+ Synchronous wrapper for ParallelPipeline.
557
+
558
+ For use in non-async contexts.
559
+ """
560
+
561
+ def __init__(self, pipeline: ParallelPipeline):
562
+ self._pipeline = pipeline
563
+ self._loop = None
564
+
565
+ def process(
566
+ self,
567
+ prompt: str,
568
+ context: Optional[Dict[str, Any]] = None,
569
+ force_no_cache: bool = False
570
+ ) -> PipelineResult:
571
+ """Process synchronously"""
572
+ try:
573
+ # Try to get existing loop
574
+ loop = asyncio.get_event_loop()
575
+ if loop.is_running():
576
+ # We're in an async context, use run_coroutine_threadsafe
577
+ import concurrent.futures
578
+ future = asyncio.run_coroutine_threadsafe(
579
+ self._pipeline.process(prompt, context, force_no_cache),
580
+ loop
581
+ )
582
+ return future.result(timeout=120)
583
+ else:
584
+ return loop.run_until_complete(
585
+ self._pipeline.process(prompt, context, force_no_cache)
586
+ )
587
+ except RuntimeError:
588
+ # No event loop, create one
589
+ return asyncio.run(
590
+ self._pipeline.process(prompt, context, force_no_cache)
591
+ )
592
+
593
+
594
+ # Convenience functions
595
+ def create_pipeline(
596
+ cache=None,
597
+ intent_analyzer=None,
598
+ context_engine=None,
599
+ tier_orchestrator=None,
600
+ llm_adapter=None,
601
+ **kwargs
602
+ ) -> ParallelPipeline:
603
+ """Create a configured pipeline"""
604
+ return ParallelPipeline(
605
+ cache=cache,
606
+ intent_analyzer=intent_analyzer,
607
+ context_engine=context_engine,
608
+ tier_orchestrator=tier_orchestrator,
609
+ llm_adapter=llm_adapter,
610
+ **kwargs
611
+ )