mrmd-ai 0.1.0__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mrmd_ai/juice.py CHANGED
@@ -34,6 +34,85 @@ class JuiceLevel(IntEnum):
34
34
  ULTIMATE = 4
35
35
 
36
36
 
37
+ class ReasoningLevel(IntEnum):
38
+ """Independent reasoning/thinking budget control.
39
+
40
+ This is separate from JuiceLevel and controls how much "thinking"
41
+ the model does, independent of which model is selected.
42
+ """
43
+
44
+ # No extended thinking - fastest responses
45
+ OFF = 0
46
+
47
+ # Minimal reasoning
48
+ MINIMAL = 1
49
+
50
+ # Low reasoning effort
51
+ LOW = 2
52
+
53
+ # Medium reasoning effort
54
+ MEDIUM = 3
55
+
56
+ # High reasoning effort
57
+ HIGH = 4
58
+
59
+ # Maximum reasoning budget
60
+ MAXIMUM = 5
61
+
62
+
63
+ # Map reasoning levels to thinking budgets and reasoning_effort values
64
+ # For Anthropic: uses `thinking={"type": "enabled", "budget_tokens": X}`
65
+ # For others: uses `reasoning_effort` ("low", "medium", "high")
66
+ # Note: Anthropic requires max_tokens > thinking.budget_tokens
67
+ REASONING_CONFIGS: dict[ReasoningLevel, dict] = {
68
+ ReasoningLevel.OFF: {
69
+ "budget_tokens": None, # No thinking
70
+ "reasoning_effort": None,
71
+ "temperature": None, # None means use model default
72
+ },
73
+ ReasoningLevel.MINIMAL: {
74
+ "budget_tokens": 1024, # Minimum thinking budget
75
+ "reasoning_effort": "low",
76
+ "temperature": 1.0, # Required for Anthropic extended thinking
77
+ "max_tokens": 4096, # Must be > budget_tokens
78
+ },
79
+ ReasoningLevel.LOW: {
80
+ "budget_tokens": 4096,
81
+ "reasoning_effort": "low",
82
+ "temperature": 1.0,
83
+ "max_tokens": 8192,
84
+ },
85
+ ReasoningLevel.MEDIUM: {
86
+ "budget_tokens": 8192,
87
+ "reasoning_effort": "medium",
88
+ "temperature": 1.0,
89
+ "max_tokens": 16000,
90
+ },
91
+ ReasoningLevel.HIGH: {
92
+ "budget_tokens": 16384,
93
+ "reasoning_effort": "high",
94
+ "temperature": 1.0,
95
+ "max_tokens": 24000,
96
+ },
97
+ ReasoningLevel.MAXIMUM: {
98
+ "budget_tokens": 32768, # Maximum thinking budget
99
+ "reasoning_effort": "high",
100
+ "temperature": 1.0,
101
+ "max_tokens": 48000, # Must be > budget_tokens
102
+ },
103
+ }
104
+
105
+
106
+ REASONING_DESCRIPTIONS = {
107
+ ReasoningLevel.OFF: "Off - No extended thinking",
108
+ ReasoningLevel.MINIMAL: "Minimal - Light reasoning",
109
+ ReasoningLevel.LOW: "Low - Some reasoning",
110
+ ReasoningLevel.MEDIUM: "Medium - Moderate reasoning",
111
+ ReasoningLevel.HIGH: "High - Deep reasoning",
112
+ ReasoningLevel.MAXIMUM: "Maximum - Full reasoning budget",
113
+ }
114
+
115
+
37
116
  @dataclass
38
117
  class ModelConfig:
39
118
  """Configuration for a model at a specific juice level."""
@@ -42,6 +121,7 @@ class ModelConfig:
42
121
  max_tokens: int = 4096
43
122
  reasoning_effort: str | None = None
44
123
  thinking: dict | None = None
124
+ supports_reasoning: bool = True # Whether the model supports reasoning_effort
45
125
  extra_kwargs: dict = field(default_factory=dict)
46
126
 
47
127
  def to_lm_kwargs(self) -> dict:
@@ -60,28 +140,33 @@ class ModelConfig:
60
140
 
61
141
 
62
142
  # Model configurations for each juice level
143
+ # supports_reasoning indicates if the model/provider supports reasoning_effort parameter
63
144
  JUICE_MODELS: dict[JuiceLevel, ModelConfig] = {
64
145
  JuiceLevel.QUICK: ModelConfig(
65
146
  model="groq/moonshotai/kimi-k2-instruct-0905",
66
147
  temperature=0.7,
67
148
  max_tokens=4096,
149
+ supports_reasoning=False, # Groq does NOT support reasoning_effort
68
150
  ),
69
151
  JuiceLevel.BALANCED: ModelConfig(
70
152
  model="anthropic/claude-sonnet-4-5",
71
153
  temperature=0.7,
72
154
  max_tokens=4096,
155
+ supports_reasoning=True, # Anthropic supports reasoning_effort
73
156
  ),
74
157
  JuiceLevel.DEEP: ModelConfig(
75
158
  model="gemini/gemini-3-pro-preview",
76
159
  temperature=1.0,
77
160
  max_tokens=16000,
78
161
  reasoning_effort="high",
162
+ supports_reasoning=True, # Gemini supports reasoning_effort
79
163
  ),
80
164
  JuiceLevel.MAXIMUM: ModelConfig(
81
165
  model="anthropic/claude-opus-4-5",
82
166
  temperature=1.0,
83
167
  max_tokens=16000,
84
168
  reasoning_effort="high",
169
+ supports_reasoning=True, # Anthropic supports reasoning_effort
85
170
  ),
86
171
  }
87
172
 
@@ -93,24 +178,28 @@ ULTIMATE_MODELS: list[ModelConfig] = [
93
178
  model="openrouter/x-ai/grok-4",
94
179
  temperature=0.7,
95
180
  max_tokens=8192,
181
+ supports_reasoning=True, # Grok 4 supports reasoning
96
182
  ),
97
183
  ModelConfig(
98
- model="openai/gpt-5.1",
184
+ model="openai/gpt-5.2",
99
185
  temperature=1.0,
100
186
  max_tokens=16000,
101
187
  reasoning_effort="high",
188
+ supports_reasoning=True, # OpenAI supports reasoning
102
189
  ),
103
190
  ModelConfig(
104
191
  model="gemini/gemini-3-pro-preview",
105
192
  temperature=1.0,
106
193
  max_tokens=16000,
107
194
  reasoning_effort="high",
195
+ supports_reasoning=True, # Gemini supports reasoning
108
196
  ),
109
197
  ModelConfig(
110
198
  model="anthropic/claude-opus-4-5",
111
199
  temperature=1.0, # Must be 1 for extended thinking
112
200
  max_tokens=16000,
113
201
  reasoning_effort="high",
202
+ supports_reasoning=True, # Anthropic supports reasoning
114
203
  ),
115
204
  ]
116
205
 
@@ -120,14 +209,19 @@ SYNTHESIZER_MODEL = ModelConfig(
120
209
  temperature=0.7,
121
210
  max_tokens=32000,
122
211
  reasoning_effort="high",
212
+ supports_reasoning=True,
123
213
  )
124
214
 
125
215
 
126
- def get_lm(juice: JuiceLevel | int = JuiceLevel.QUICK) -> dspy.LM:
127
- """Get a dspy.LM configured for the specified juice level.
216
+ def get_lm(
217
+ juice: JuiceLevel | int = JuiceLevel.QUICK,
218
+ reasoning: ReasoningLevel | int | None = None
219
+ ) -> dspy.LM:
220
+ """Get a dspy.LM configured for the specified juice and reasoning levels.
128
221
 
129
222
  Args:
130
223
  juice: Juice level (0-3). Level 4 (ULTIMATE) requires special handling.
224
+ reasoning: Optional reasoning level (0-5). If None, uses juice level's default.
131
225
 
132
226
  Returns:
133
227
  Configured dspy.LM instance.
@@ -139,20 +233,72 @@ def get_lm(juice: JuiceLevel | int = JuiceLevel.QUICK) -> dspy.LM:
139
233
  raise ValueError("ULTIMATE juice level requires multi-model merger. Use JuicedProgram instead.")
140
234
 
141
235
  config = JUICE_MODELS[juice]
142
- return dspy.LM(**config.to_lm_kwargs())
236
+ kwargs = config.to_lm_kwargs()
237
+
238
+ # Apply reasoning level overrides if specified AND model supports reasoning
239
+ if reasoning is not None and config.supports_reasoning:
240
+ if isinstance(reasoning, int):
241
+ reasoning = ReasoningLevel(reasoning)
242
+
243
+ # Skip if reasoning is OFF
244
+ if reasoning == ReasoningLevel.OFF:
245
+ # Remove any existing reasoning params
246
+ kwargs.pop("reasoning_effort", None)
247
+ kwargs.pop("thinking", None)
248
+ return dspy.LM(**kwargs)
249
+
250
+ reasoning_config = REASONING_CONFIGS[reasoning]
251
+ model = config.model.lower()
252
+
253
+ # Determine provider and use appropriate parameter format
254
+ is_anthropic = "anthropic/" in model or "claude" in model
255
+ is_gemini = "gemini" in model
256
+ is_openai = "openai/" in model or "gpt" in model
257
+
258
+ # Apply temperature (required for Anthropic extended thinking)
259
+ if reasoning_config.get("temperature") is not None:
260
+ kwargs["temperature"] = reasoning_config["temperature"]
261
+
262
+ # Apply max_tokens
263
+ if reasoning_config.get("max_tokens") is not None:
264
+ kwargs["max_tokens"] = reasoning_config["max_tokens"]
265
+
266
+ if is_anthropic:
267
+ # Anthropic uses explicit thinking parameter with budget_tokens
268
+ budget = reasoning_config.get("budget_tokens", 1024)
269
+ kwargs["thinking"] = {"type": "enabled", "budget_tokens": budget}
270
+ # Remove reasoning_effort if present (not used for thinking)
271
+ kwargs.pop("reasoning_effort", None)
272
+ else:
273
+ # Other providers use reasoning_effort
274
+ if reasoning_config["reasoning_effort"] is not None:
275
+ kwargs["reasoning_effort"] = reasoning_config["reasoning_effort"]
143
276
 
277
+ return dspy.LM(**kwargs)
144
278
 
145
- class SynthesizeResponses(dspy.Signature):
146
- """Synthesize multiple AI model responses into an optimal final answer.
147
279
 
148
- You are given the original input and responses from multiple AI models.
149
- Analyze all responses, identify the best insights from each, resolve
150
- any contradictions, and produce the ultimate synthesized response.
280
+ class SynthesizeResponses(dspy.Signature):
281
+ """Synthesize multiple AI model responses into one optimal final answer.
282
+
283
+ You are given responses from multiple AI models for the same task.
284
+ Your job is to create the BEST possible response by:
285
+ 1. Identifying the strongest elements from each model's response
286
+ 2. Resolving any contradictions (prefer the most accurate/well-reasoned answer)
287
+ 3. Combining complementary insights that don't conflict
288
+ 4. Maintaining the original format and style expected for the task
289
+ 5. Being concise - don't add unnecessary elaboration
290
+
291
+ For grammar/spelling fixes: Pick the most correct version, don't over-correct.
292
+ For text completion: Choose the most natural, coherent continuation.
293
+ For code: Select the cleanest, most idiomatic solution.
294
+ For lists: You may combine unique items if appropriate.
295
+
296
+ Output ONLY the synthesized response - no explanations or meta-commentary.
151
297
  """
152
298
 
153
- original_input: str = dspy.InputField(desc="The original input/question")
154
- model_responses: str = dspy.InputField(desc="Responses from multiple AI models, labeled by model name")
155
- synthesized_response: str = dspy.OutputField(desc="The optimal synthesized response combining the best from all models")
299
+ original_input: str = dspy.InputField(desc="The original input/task that was given to all models")
300
+ model_responses: str = dspy.InputField(desc="Responses from multiple AI models, each labeled with model name")
301
+ synthesized_response: str = dspy.OutputField(desc="The single best response, synthesized from all model outputs. Output ONLY the response content.")
156
302
 
157
303
 
158
304
  class JuicedProgram:
@@ -166,6 +312,7 @@ class JuicedProgram:
166
312
  self,
167
313
  program: dspy.Module,
168
314
  juice: JuiceLevel | int = JuiceLevel.QUICK,
315
+ reasoning: ReasoningLevel | int | None = None,
169
316
  progress_callback: Callable[[str, dict], None] | None = None
170
317
  ):
171
318
  """Initialize a juiced program.
@@ -173,6 +320,7 @@ class JuicedProgram:
173
320
  Args:
174
321
  program: The DSPy program/module to wrap.
175
322
  juice: Juice level (0-4).
323
+ reasoning: Optional reasoning level (0-5). If None, uses juice level's default.
176
324
  progress_callback: Optional callback for progress events.
177
325
  Called with (event_type, data) where event_type is:
178
326
  - "status": General status update
@@ -181,6 +329,7 @@ class JuicedProgram:
181
329
  """
182
330
  self.program = program
183
331
  self.juice = JuiceLevel(juice) if isinstance(juice, int) else juice
332
+ self.reasoning = ReasoningLevel(reasoning) if isinstance(reasoning, int) else reasoning
184
333
  self.progress_callback = progress_callback
185
334
 
186
335
  def _emit(self, event_type: str, data: dict):
@@ -200,13 +349,18 @@ class JuicedProgram:
200
349
  config = JUICE_MODELS[self.juice]
201
350
  model_name = config.model.split("/")[-1]
202
351
 
352
+ reasoning_desc = ""
353
+ if self.reasoning is not None:
354
+ reasoning_desc = f" (reasoning={self.reasoning.name})"
355
+
203
356
  self._emit("status", {
204
357
  "step": "calling_model",
205
358
  "model": model_name,
206
- "model_full": config.model
359
+ "model_full": config.model,
360
+ "reasoning_level": self.reasoning.value if self.reasoning else None,
207
361
  })
208
362
 
209
- lm = get_lm(self.juice)
363
+ lm = get_lm(self.juice, self.reasoning)
210
364
  with dspy.context(lm=lm):
211
365
  result = self.program(**kwargs)
212
366
 
@@ -230,12 +384,39 @@ class JuicedProgram:
230
384
  self._emit("status", {
231
385
  "step": "starting_multi_model",
232
386
  "models": model_names,
233
- "total": len(model_names)
387
+ "total": len(model_names),
388
+ "reasoning_level": self.reasoning.value if self.reasoning else None,
234
389
  })
235
390
 
236
391
  def run_model(config):
237
392
  """Run a single model - called in parallel."""
238
- lm = dspy.LM(**config.to_lm_kwargs())
393
+ lm_kwargs = config.to_lm_kwargs()
394
+
395
+ # Apply reasoning level overrides if specified AND model supports reasoning
396
+ if self.reasoning is not None and self.reasoning != ReasoningLevel.OFF and config.supports_reasoning:
397
+ reasoning_config = REASONING_CONFIGS[self.reasoning]
398
+ model = config.model.lower()
399
+
400
+ # Determine provider
401
+ is_anthropic = "anthropic/" in model or "claude" in model
402
+
403
+ # Apply temperature and max_tokens
404
+ if reasoning_config.get("temperature") is not None:
405
+ lm_kwargs["temperature"] = reasoning_config["temperature"]
406
+ if reasoning_config.get("max_tokens") is not None:
407
+ lm_kwargs["max_tokens"] = reasoning_config["max_tokens"]
408
+
409
+ if is_anthropic:
410
+ # Anthropic uses thinking parameter with budget_tokens
411
+ budget = reasoning_config.get("budget_tokens", 1024)
412
+ lm_kwargs["thinking"] = {"type": "enabled", "budget_tokens": budget}
413
+ lm_kwargs.pop("reasoning_effort", None)
414
+ else:
415
+ # Other providers use reasoning_effort
416
+ if reasoning_config["reasoning_effort"] is not None:
417
+ lm_kwargs["reasoning_effort"] = reasoning_config["reasoning_effort"]
418
+
419
+ lm = dspy.LM(**lm_kwargs)
239
420
  model_name = config.model.split("/")[-1]
240
421
 
241
422
  # Emit model start
@@ -250,13 +431,19 @@ class JuicedProgram:
250
431
  with dspy.context(lm=lm):
251
432
  result = self.program(**kwargs)
252
433
 
253
- # Emit model complete
434
+ # Extract response text from DSPy Prediction for streaming
435
+ response_data = {}
436
+ if hasattr(result, "_store") and result._store:
437
+ response_data = dict(result._store)
438
+
439
+ # Emit model complete WITH the actual response
254
440
  with status_lock:
255
441
  models_status[model_name] = "complete"
256
442
  self._emit("model_complete", {
257
443
  "model": model_name,
258
444
  "success": True,
259
- "models_status": dict(models_status)
445
+ "models_status": dict(models_status),
446
+ "response": response_data, # Include actual response!
260
447
  })
261
448
 
262
449
  return {"model": model_name, "result": result, "error": None}
@@ -268,7 +455,8 @@ class JuicedProgram:
268
455
  "model": model_name,
269
456
  "success": False,
270
457
  "error": str(e),
271
- "models_status": dict(models_status)
458
+ "models_status": dict(models_status),
459
+ "response": None,
272
460
  })
273
461
  return {"model": model_name, "result": None, "error": str(e)}
274
462
 
@@ -285,15 +473,13 @@ class JuicedProgram:
285
473
  "models_completed": len([r for r in model_results if r["result"] is not None])
286
474
  })
287
475
 
288
- # Merge results - combine outputs from all successful models
289
- return self._merge_results(model_results)
476
+ # Merge results using AI synthesis
477
+ return self._merge_results(model_results, kwargs)
290
478
 
291
- def _merge_results(self, model_results: list) -> Any:
292
- """Merge results from multiple models into a single response.
479
+ def _merge_results(self, model_results: list, original_input: dict) -> Any:
480
+ """Merge results from multiple models using AI synthesis.
293
481
 
294
- For list fields (like synonyms), combines unique values from all models.
295
- For string fields, uses the first successful result.
296
- Also includes individual model responses for transparency.
482
+ Uses SYNTHESIZER_MODEL to intelligently combine responses from all models.
297
483
  """
298
484
  # Get successful results
299
485
  successful = [r for r in model_results if r["result"] is not None]
@@ -302,23 +488,27 @@ class JuicedProgram:
302
488
  errors = [r["error"] for r in model_results if r["error"]]
303
489
  raise RuntimeError(f"All models failed: {errors}")
304
490
 
305
- # Use first successful result as base
306
- base_result = successful[0]["result"]
307
-
308
- # Get the _store dict from the result (DSPy stores outputs there)
309
- if hasattr(base_result, "_store"):
310
- merged = dict(base_result._store)
311
- else:
312
- merged = {}
491
+ # If only one model succeeded, just return its result
492
+ if len(successful) == 1:
493
+ result = successful[0]["result"]
494
+ if hasattr(result, "_store"):
495
+ result._individual_responses = [{
496
+ "model": successful[0]["model"],
497
+ "response": str(result._store),
498
+ "error": None
499
+ }]
500
+ return result
313
501
 
314
- # Collect individual responses for display
502
+ # Collect individual responses
315
503
  individual_responses = []
504
+ model_outputs = {} # model_name -> {field: value}
505
+
316
506
  for r in model_results:
317
507
  model_name = r["model"]
318
508
  if r["result"] is not None and hasattr(r["result"], "_store"):
319
- # Extract the main output field (usually 'response', 'completion', etc.)
320
509
  store = r["result"]._store
321
- # Get the first string output field
510
+ model_outputs[model_name] = dict(store)
511
+ # Get main output text for display
322
512
  output_text = None
323
513
  for key, value in store.items():
324
514
  if isinstance(value, str) and len(value) > 10:
@@ -336,26 +526,86 @@ class JuicedProgram:
336
526
  "error": r["error"]
337
527
  })
338
528
 
339
- # Merge fields from other models
340
- for r in successful[1:]:
341
- result = r["result"]
342
- if hasattr(result, "_store"):
343
- store = result._store
344
- for key, value in store.items():
345
- if key in merged:
346
- # Merge lists by combining unique values
347
- if isinstance(value, list) and isinstance(merged[key], list):
348
- # Combine and dedupe while preserving order
349
- seen = set(merged[key])
350
- for item in value:
351
- if item not in seen:
352
- merged[key].append(item)
353
- seen.add(item)
354
- # For strings, keep the first (base) value
355
- else:
356
- merged[key] = value
357
-
358
- # Return a simple object with the merged data + individual responses
529
+ # Use first result as template for output fields
530
+ base_result = successful[0]["result"]
531
+ base_store = base_result._store if hasattr(base_result, "_store") else {}
532
+
533
+ # Format original input for synthesizer
534
+ input_text = self._format_input(original_input)
535
+
536
+ # Create synthesized result
537
+ merged = {}
538
+
539
+ # Configure synthesizer LM
540
+ synth_lm = dspy.LM(**SYNTHESIZER_MODEL.to_lm_kwargs())
541
+
542
+ # Synthesize each output field
543
+ for field_name, base_value in base_store.items():
544
+ # Collect this field's values from all models
545
+ field_values = {}
546
+ for model_name, outputs in model_outputs.items():
547
+ if field_name in outputs:
548
+ field_values[model_name] = outputs[field_name]
549
+
550
+ if not field_values:
551
+ merged[field_name] = base_value
552
+ continue
553
+
554
+ # Check if it's a list field (like synonyms)
555
+ if isinstance(base_value, list):
556
+ # For lists, combine unique values from all models
557
+ combined = []
558
+ seen = set()
559
+ for model_name, values in field_values.items():
560
+ if isinstance(values, list):
561
+ for item in values:
562
+ # Get hashable key for deduplication
563
+ # Pydantic models aren't hashable, so convert to JSON
564
+ try:
565
+ if hasattr(item, 'model_dump_json'):
566
+ # Pydantic v2 model
567
+ item_key = item.model_dump_json()
568
+ elif hasattr(item, 'json'):
569
+ # Pydantic v1 model
570
+ item_key = item.json()
571
+ else:
572
+ # Regular hashable item
573
+ item_key = item
574
+ except TypeError:
575
+ # Fallback: convert to string representation
576
+ item_key = str(item)
577
+
578
+ if item_key not in seen:
579
+ combined.append(item)
580
+ seen.add(item_key)
581
+ merged[field_name] = combined
582
+ else:
583
+ # For string/text fields, use AI synthesis
584
+ responses_text = "\n\n".join([
585
+ f"=== {model_name} ===\n{value}"
586
+ for model_name, value in field_values.items()
587
+ ])
588
+
589
+ self._emit("status", {
590
+ "step": "synthesizing_field",
591
+ "field": field_name,
592
+ "model": SYNTHESIZER_MODEL.model.split("/")[-1]
593
+ })
594
+
595
+ try:
596
+ with dspy.context(lm=synth_lm):
597
+ predictor = dspy.Predict(SynthesizeResponses)
598
+ synth_result = predictor(
599
+ original_input=input_text,
600
+ model_responses=responses_text
601
+ )
602
+ merged[field_name] = synth_result.synthesized_response
603
+ except Exception as e:
604
+ # Fallback to first model's response on synthesis error
605
+ print(f"[Synthesis] Error synthesizing {field_name}: {e}")
606
+ merged[field_name] = base_value
607
+
608
+ # Return a result object with merged data
359
609
  class MergedResult:
360
610
  pass
361
611
 
@@ -364,6 +614,7 @@ class JuicedProgram:
364
614
  setattr(result, key, value)
365
615
  result._store = merged # For extract_result in server.py
366
616
  result._individual_responses = individual_responses # For UI display
617
+ result._synthesized = True # Mark as AI-synthesized
367
618
 
368
619
  return result
369
620
 
@@ -391,18 +642,24 @@ def juiced(juice: JuiceLevel | int = JuiceLevel.QUICK):
391
642
  return decorator
392
643
 
393
644
 
394
- def run_with_juice(program: dspy.Module, juice: JuiceLevel | int, **kwargs) -> Any:
645
+ def run_with_juice(
646
+ program: dspy.Module,
647
+ juice: JuiceLevel | int,
648
+ reasoning: ReasoningLevel | int | None = None,
649
+ **kwargs
650
+ ) -> Any:
395
651
  """Convenience function to run a program with a specific juice level.
396
652
 
397
653
  Args:
398
654
  program: The DSPy program to run.
399
655
  juice: Juice level (0-4).
656
+ reasoning: Optional reasoning level (0-5). If None, uses juice level's default.
400
657
  **kwargs: Arguments to pass to the program.
401
658
 
402
659
  Returns:
403
660
  The program result.
404
661
  """
405
- juiced_program = JuicedProgram(program, juice)
662
+ juiced_program = JuicedProgram(program, juice, reasoning=reasoning)
406
663
  return juiced_program(**kwargs)
407
664
 
408
665
 
@@ -38,6 +38,12 @@ from .document import (
38
38
  from .notebook import (
39
39
  NotebookNamePredict,
40
40
  )
41
+ from .edit import (
42
+ EditAtCursorPredict,
43
+ AddressCommentPredict,
44
+ AddressAllCommentsPredict,
45
+ AddressNearbyCommentPredict,
46
+ )
41
47
 
42
48
  __all__ = [
43
49
  # Finish programs
@@ -71,4 +77,9 @@ __all__ = [
71
77
  "DocumentAnalysisPredict",
72
78
  # Notebook programs
73
79
  "NotebookNamePredict",
80
+ # Edit programs (Ctrl-K and comments)
81
+ "EditAtCursorPredict",
82
+ "AddressCommentPredict",
83
+ "AddressAllCommentsPredict",
84
+ "AddressNearbyCommentPredict",
74
85
  ]
@@ -0,0 +1,102 @@
1
+ """Edit modules for cursor-based editing and comment processing."""
2
+
3
+ import dspy
4
+ from typing import List, Optional
5
+ from ..signatures.edit import (
6
+ Edit,
7
+ CommentInfo,
8
+ EditAtCursorSignature,
9
+ AddressCommentSignature,
10
+ AddressAllCommentsSignature,
11
+ AddressNearbyCommentSignature,
12
+ )
13
+
14
+
15
+ class EditAtCursorPredict(dspy.Module):
16
+ """Execute user instructions via precise find/replace edits."""
17
+
18
+ def __init__(self):
19
+ super().__init__()
20
+ self.predict = dspy.Predict(EditAtCursorSignature)
21
+
22
+ def forward(
23
+ self,
24
+ text_before: str,
25
+ text_after: str,
26
+ selection: str,
27
+ full_document: str,
28
+ instruction: str,
29
+ ):
30
+ return self.predict(
31
+ text_before=text_before,
32
+ text_after=text_after,
33
+ selection=selection,
34
+ full_document=full_document,
35
+ instruction=instruction,
36
+ )
37
+
38
+
39
+ class AddressCommentPredict(dspy.Module):
40
+ """Address a single comment embedded in the document."""
41
+
42
+ def __init__(self):
43
+ super().__init__()
44
+ self.predict = dspy.Predict(AddressCommentSignature)
45
+
46
+ def forward(
47
+ self,
48
+ full_document: str,
49
+ comment_text: str,
50
+ comment_context_before: str,
51
+ comment_context_after: str,
52
+ comment_raw: str,
53
+ ):
54
+ return self.predict(
55
+ full_document=full_document,
56
+ comment_text=comment_text,
57
+ comment_context_before=comment_context_before,
58
+ comment_context_after=comment_context_after,
59
+ comment_raw=comment_raw,
60
+ )
61
+
62
+
63
+ class AddressAllCommentsPredict(dspy.Module):
64
+ """Address all comments in a document."""
65
+
66
+ def __init__(self):
67
+ super().__init__()
68
+ self.predict = dspy.Predict(AddressAllCommentsSignature)
69
+
70
+ def forward(
71
+ self,
72
+ full_document: str,
73
+ comments: List[CommentInfo],
74
+ ):
75
+ return self.predict(
76
+ full_document=full_document,
77
+ comments=comments,
78
+ )
79
+
80
+
81
+ class AddressNearbyCommentPredict(dspy.Module):
82
+ """Address the comment nearest to the cursor."""
83
+
84
+ def __init__(self):
85
+ super().__init__()
86
+ self.predict = dspy.Predict(AddressNearbyCommentSignature)
87
+
88
+ def forward(
89
+ self,
90
+ full_document: str,
91
+ cursor_context_before: str,
92
+ cursor_context_after: str,
93
+ nearby_comment: CommentInfo,
94
+ nearby_comment_raw: str,
95
+ ):
96
+ return self.predict(
97
+ full_document=full_document,
98
+ cursor_context_before=cursor_context_before,
99
+ cursor_context_after=cursor_context_after,
100
+ nearby_comment=nearby_comment,
101
+ nearby_comment_raw=nearby_comment_raw,
102
+ )
mrmd_ai/server.py CHANGED
@@ -23,7 +23,7 @@ import json
23
23
  # Thread pool for running blocking DSPy calls
24
24
  _executor = ThreadPoolExecutor(max_workers=10)
25
25
 
26
- from .juice import JuiceLevel, JuicedProgram, get_lm, JUICE_MODELS
26
+ from .juice import JuiceLevel, ReasoningLevel, JuicedProgram, get_lm, JUICE_MODELS, REASONING_DESCRIPTIONS
27
27
  from .modules import (
28
28
  # Finish
29
29
  FinishSentencePredict,
@@ -56,6 +56,11 @@ from .modules import (
56
56
  DocumentAnalysisPredict,
57
57
  # Notebook
58
58
  NotebookNamePredict,
59
+ # Edit (Ctrl-K and comments)
60
+ EditAtCursorPredict,
61
+ AddressCommentPredict,
62
+ AddressAllCommentsPredict,
63
+ AddressNearbyCommentPredict,
59
64
  )
60
65
 
61
66
 
@@ -92,21 +97,26 @@ PROGRAMS = {
92
97
  "DocumentAnalysisPredict": DocumentAnalysisPredict,
93
98
  # Notebook
94
99
  "NotebookNamePredict": NotebookNamePredict,
100
+ # Edit (Ctrl-K and comments)
101
+ "EditAtCursorPredict": EditAtCursorPredict,
102
+ "AddressCommentPredict": AddressCommentPredict,
103
+ "AddressAllCommentsPredict": AddressAllCommentsPredict,
104
+ "AddressNearbyCommentPredict": AddressNearbyCommentPredict,
95
105
  }
96
106
 
97
- # Cached program instances per juice level
98
- _program_cache: dict[tuple[str, int], JuicedProgram] = {}
107
+ # Cached program instances per juice level and reasoning level
108
+ _program_cache: dict[tuple[str, int, int | None], JuicedProgram] = {}
99
109
 
100
110
 
101
- def get_program(name: str, juice: int = 0) -> JuicedProgram:
102
- """Get a JuicedProgram instance for the given program and juice level."""
103
- cache_key = (name, juice)
111
+ def get_program(name: str, juice: int = 0, reasoning: int | None = None) -> JuicedProgram:
112
+ """Get a JuicedProgram instance for the given program, juice level, and reasoning level."""
113
+ cache_key = (name, juice, reasoning)
104
114
  if cache_key not in _program_cache:
105
115
  if name not in PROGRAMS:
106
116
  raise ValueError(f"Unknown program: {name}")
107
117
  program_class = PROGRAMS[name]
108
118
  program = program_class()
109
- _program_cache[cache_key] = JuicedProgram(program, juice=juice)
119
+ _program_cache[cache_key] = JuicedProgram(program, juice=juice, reasoning=reasoning)
110
120
  return _program_cache[cache_key]
111
121
 
112
122
 
@@ -153,12 +163,31 @@ async def list_programs():
153
163
 
154
164
  @app.get("/juice")
155
165
  async def get_juice_levels():
156
- """Get available juice levels."""
157
- from .juice import JUICE_DESCRIPTIONS
166
+ """Get available juice levels with their capabilities."""
167
+ from .juice import JUICE_DESCRIPTIONS, JUICE_MODELS, JuiceLevel
168
+ levels = []
169
+ for level, desc in JUICE_DESCRIPTIONS.items():
170
+ level_info = {
171
+ "level": level.value,
172
+ "description": desc,
173
+ }
174
+ # Add supports_reasoning for non-ULTIMATE levels
175
+ if level != JuiceLevel.ULTIMATE and level in JUICE_MODELS:
176
+ level_info["supports_reasoning"] = JUICE_MODELS[level].supports_reasoning
177
+ else:
178
+ # ULTIMATE level supports reasoning (all its sub-models do)
179
+ level_info["supports_reasoning"] = True
180
+ levels.append(level_info)
181
+ return {"levels": levels}
182
+
183
+
184
+ @app.get("/reasoning")
185
+ async def get_reasoning_levels():
186
+ """Get available reasoning levels."""
158
187
  return {
159
188
  "levels": [
160
189
  {"level": level.value, "description": desc}
161
- for level, desc in JUICE_DESCRIPTIONS.items()
190
+ for level, desc in REASONING_DESCRIPTIONS.items()
162
191
  ]
163
192
  }
164
193
 
@@ -184,6 +213,7 @@ def extract_result(prediction: Any) -> dict:
184
213
  "reformatted_text", "text_to_replace", "replacement",
185
214
  "response", "summary", "analysis", # Document-level fields
186
215
  "code", # ProgramCodePredict output
216
+ "edits", # EditAtCursor and AddressComment outputs
187
217
  ]
188
218
 
189
219
  for field in output_fields:
@@ -210,6 +240,16 @@ async def run_program(program_name: str, request: Request):
210
240
  except ValueError:
211
241
  juice_level = 0
212
242
 
243
+ # Get reasoning level from header (optional)
244
+ reasoning_header = request.headers.get("X-Reasoning-Level")
245
+ reasoning_level = None
246
+ if reasoning_header is not None:
247
+ try:
248
+ reasoning_level = int(reasoning_header)
249
+ reasoning_level = max(0, min(5, reasoning_level)) # Clamp to 0-5
250
+ except ValueError:
251
+ reasoning_level = None
252
+
213
253
  # Get request body
214
254
  try:
215
255
  params = await request.json()
@@ -218,14 +258,17 @@ async def run_program(program_name: str, request: Request):
218
258
 
219
259
  # Get program
220
260
  try:
221
- juiced_program = get_program(program_name, juice_level)
261
+ juiced_program = get_program(program_name, juice_level, reasoning_level)
222
262
  except ValueError as e:
223
263
  raise HTTPException(status_code=404, detail=str(e))
224
264
 
225
265
  # Log the call and get model info
226
- from .juice import JUICE_DESCRIPTIONS, JUICE_MODELS, ULTIMATE_MODELS, JuiceLevel
266
+ from .juice import JUICE_DESCRIPTIONS, JUICE_MODELS, ULTIMATE_MODELS, JuiceLevel, ReasoningLevel
227
267
  juice_desc = JUICE_DESCRIPTIONS.get(JuiceLevel(juice_level), f"Level {juice_level}")
228
- print(f"[AI] {program_name} @ {juice_desc}", flush=True)
268
+ reasoning_desc = ""
269
+ if reasoning_level is not None:
270
+ reasoning_desc = f" | {REASONING_DESCRIPTIONS.get(ReasoningLevel(reasoning_level), f'Reasoning {reasoning_level}')}"
271
+ print(f"[AI] {program_name} @ {juice_desc}{reasoning_desc}", flush=True)
229
272
 
230
273
  # Get the model name for this juice level
231
274
  if juice_level == JuiceLevel.ULTIMATE:
@@ -245,16 +288,35 @@ async def run_program(program_name: str, request: Request):
245
288
  # Add model metadata to response
246
289
  response["_model"] = model_name
247
290
  response["_juice_level"] = juice_level
248
- return response
291
+ response["_reasoning_level"] = reasoning_level
292
+ # Serialize any Pydantic models to dicts for JSON compatibility
293
+ return serialize_for_json(response)
249
294
  except Exception as e:
250
295
  import traceback
251
296
  traceback.print_exc()
252
297
  raise HTTPException(status_code=500, detail=str(e))
253
298
 
254
299
 
300
+ def serialize_for_json(obj):
301
+ """Recursively convert Pydantic models and other objects to JSON-serializable form."""
302
+ if hasattr(obj, 'model_dump'):
303
+ # Pydantic v2 model
304
+ return obj.model_dump()
305
+ elif hasattr(obj, 'dict'):
306
+ # Pydantic v1 model
307
+ return obj.dict()
308
+ elif isinstance(obj, dict):
309
+ return {k: serialize_for_json(v) for k, v in obj.items()}
310
+ elif isinstance(obj, (list, tuple)):
311
+ return [serialize_for_json(item) for item in obj]
312
+ else:
313
+ return obj
314
+
315
+
255
316
  def sse_event(event: str, data: dict) -> str:
256
317
  """Format a Server-Sent Event."""
257
- return f"event: {event}\ndata: {json.dumps(data)}\n\n"
318
+ serialized = serialize_for_json(data)
319
+ return f"event: {event}\ndata: {json.dumps(serialized)}\n\n"
258
320
 
259
321
 
260
322
  @app.post("/{program_name}/stream")
@@ -275,6 +337,16 @@ async def run_program_stream(program_name: str, request: Request):
275
337
  except ValueError:
276
338
  juice_level = 0
277
339
 
340
+ # Get reasoning level from header (optional)
341
+ reasoning_header = request.headers.get("X-Reasoning-Level")
342
+ reasoning_level = None
343
+ if reasoning_header is not None:
344
+ try:
345
+ reasoning_level = int(reasoning_header)
346
+ reasoning_level = max(0, min(5, reasoning_level)) # Clamp to 0-5
347
+ except ValueError:
348
+ reasoning_level = None
349
+
278
350
  # Get request body
279
351
  try:
280
352
  params = await request.json()
@@ -286,9 +358,12 @@ async def run_program_stream(program_name: str, request: Request):
286
358
  raise HTTPException(status_code=404, detail=f"Unknown program: {program_name}")
287
359
 
288
360
  # Get model info
289
- from .juice import JUICE_DESCRIPTIONS, JUICE_MODELS, ULTIMATE_MODELS, JuiceLevel, JuicedProgram
361
+ from .juice import JUICE_DESCRIPTIONS, JUICE_MODELS, ULTIMATE_MODELS, JuiceLevel, ReasoningLevel, JuicedProgram
290
362
  juice_desc = JUICE_DESCRIPTIONS.get(JuiceLevel(juice_level), f"Level {juice_level}")
291
- print(f"[AI Stream] {program_name} @ {juice_desc}", flush=True)
363
+ reasoning_desc = ""
364
+ if reasoning_level is not None:
365
+ reasoning_desc = f" | {REASONING_DESCRIPTIONS.get(ReasoningLevel(reasoning_level), f'Reasoning {reasoning_level}')}"
366
+ print(f"[AI Stream] {program_name} @ {juice_desc}{reasoning_desc}", flush=True)
292
367
 
293
368
  # Get model name(s) for display
294
369
  if juice_level == JuiceLevel.ULTIMATE:
@@ -318,14 +393,16 @@ async def run_program_stream(program_name: str, request: Request):
318
393
  # Create program with progress callback
319
394
  program_class = PROGRAMS[program_name]
320
395
  program = program_class()
321
- juiced = JuicedProgram(program, juice=juice_level, progress_callback=progress_callback)
396
+ juiced = JuicedProgram(program, juice=juice_level, reasoning=reasoning_level, progress_callback=progress_callback)
322
397
 
323
398
  # Emit starting event
324
399
  progress_callback("status", {
325
400
  "step": "starting",
326
401
  "model": model_name,
327
402
  "juice_level": juice_level,
328
- "juice_name": juice_desc
403
+ "juice_name": juice_desc,
404
+ "reasoning_level": reasoning_level,
405
+ "reasoning_name": reasoning_desc.strip(" |") if reasoning_desc else None,
329
406
  })
330
407
 
331
408
  # Run the program
@@ -357,6 +434,7 @@ async def run_program_stream(program_name: str, request: Request):
357
434
  response = extract_result(result_holder["result"])
358
435
  response["_model"] = model_name
359
436
  response["_juice_level"] = juice_level
437
+ response["_reasoning_level"] = reasoning_level
360
438
  yield sse_event("result", response)
361
439
  break
362
440
 
@@ -14,6 +14,14 @@ from .correct import (
14
14
  CorrectAndFinishLineSignature,
15
15
  CorrectAndFinishSectionSignature,
16
16
  )
17
+ from .edit import (
18
+ Edit,
19
+ CommentInfo,
20
+ EditAtCursorSignature,
21
+ AddressCommentSignature,
22
+ AddressAllCommentsSignature,
23
+ AddressNearbyCommentSignature,
24
+ )
17
25
 
18
26
  __all__ = [
19
27
  "FinishSentenceSignature",
@@ -24,4 +32,11 @@ __all__ = [
24
32
  "FixTranscriptionSignature",
25
33
  "CorrectAndFinishLineSignature",
26
34
  "CorrectAndFinishSectionSignature",
35
+ # Edit signatures
36
+ "Edit",
37
+ "CommentInfo",
38
+ "EditAtCursorSignature",
39
+ "AddressCommentSignature",
40
+ "AddressAllCommentsSignature",
41
+ "AddressNearbyCommentSignature",
27
42
  ]
@@ -0,0 +1,173 @@
1
+ """Signature definitions for cursor-based editing and comment processing."""
2
+
3
+ import dspy
4
+ from pydantic import BaseModel, Field
5
+ from typing import List, Optional
6
+
7
+
8
+ class Edit(BaseModel):
9
+ """A single find/replace edit operation.
10
+
11
+ For insertions at cursor, use find="" and the text will be inserted
12
+ at the cursor position.
13
+ """
14
+ find: str = Field(
15
+ description="Exact text to find in document. Use empty string for insertion at cursor."
16
+ )
17
+ replace: str = Field(
18
+ description="Text to replace the found text with, or text to insert if find is empty."
19
+ )
20
+
21
+
22
+ class CommentInfo(BaseModel):
23
+ """Information about a comment in the document."""
24
+ text: str = Field(description="The comment text content")
25
+ context_before: str = Field(description="Text immediately before the comment")
26
+ context_after: str = Field(description="Text immediately after the comment")
27
+
28
+
29
+ class EditAtCursorSignature(dspy.Signature):
30
+ """
31
+ Execute a user instruction by generating precise find/replace edits.
32
+
33
+ You are given the cursor context and a natural language instruction.
34
+ Generate a list of edits that implement the instruction.
35
+
36
+ CRITICAL RULES:
37
+ 1. Each edit has `find` (exact text to locate) and `replace` (replacement text)
38
+ 2. For INSERTIONS at cursor: use find="" - the replace text will be inserted at cursor
39
+ 3. For MODIFICATIONS: find must match the EXACT text in the document (character-for-character)
40
+ 4. find strings must be UNIQUE enough to match only the intended location
41
+ 5. Include surrounding context in find to ensure uniqueness (e.g., "def process_data(items)" not just "process_data")
42
+ 6. Edits are applied in order - earlier edits may shift positions of later ones
43
+
44
+ Examples:
45
+ - Instruction: "add a docstring" → find the function definition, replace with definition + docstring
46
+ - Instruction: "rename x to count" → find=" x " (with spaces), replace=" count "
47
+ - Instruction: "insert a comment here" → find="", replace="# comment\\n"
48
+ - Instruction: "delete this line" → find="the line content\\n", replace=""
49
+
50
+ When the user has selected text, that text is provided in `selection`.
51
+ Prefer to operate on the selection when it's relevant to the instruction.
52
+ """
53
+
54
+ text_before: str = dspy.InputField(
55
+ desc="Text immediately before the cursor (up to 500 characters for context)"
56
+ )
57
+ text_after: str = dspy.InputField(
58
+ desc="Text immediately after the cursor (up to 500 characters for context)"
59
+ )
60
+ selection: str = dspy.InputField(
61
+ desc="Currently selected text, or empty string if no selection"
62
+ )
63
+ full_document: str = dspy.InputField(
64
+ desc="The complete document content for full context"
65
+ )
66
+ instruction: str = dspy.InputField(
67
+ desc="User's natural language instruction for what to do"
68
+ )
69
+
70
+ edits: List[Edit] = dspy.OutputField(
71
+ desc="List of find/replace edits to apply. Order matters - applied sequentially."
72
+ )
73
+
74
+
75
+ class AddressCommentSignature(dspy.Signature):
76
+ """
77
+ Address a single comment/instruction embedded in the document.
78
+
79
+ Comments are marked with <!--! comment text !--> syntax.
80
+ The comment contains instructions or notes that should be addressed.
81
+ Generate edits that fulfill the comment's request.
82
+
83
+ After addressing, you may optionally remove the comment marker itself.
84
+
85
+ Guidelines:
86
+ - Read the comment carefully to understand what's requested
87
+ - Look at the surrounding context to understand where changes should go
88
+ - Generate precise edits that address the comment
89
+ - If the comment asks for something that's already done, return empty edits
90
+ - Consider removing the comment after addressing it (include that as an edit)
91
+ """
92
+
93
+ full_document: str = dspy.InputField(
94
+ desc="The complete document content"
95
+ )
96
+ comment_text: str = dspy.InputField(
97
+ desc="The text content of the comment (without the <!--! !--> markers)"
98
+ )
99
+ comment_context_before: str = dspy.InputField(
100
+ desc="Text immediately before the comment marker"
101
+ )
102
+ comment_context_after: str = dspy.InputField(
103
+ desc="Text immediately after the comment marker"
104
+ )
105
+ comment_raw: str = dspy.InputField(
106
+ desc="The full raw comment including markers (e.g., '<!--! add error handling !-->')"
107
+ )
108
+
109
+ edits: List[Edit] = dspy.OutputField(
110
+ desc="List of find/replace edits to address the comment"
111
+ )
112
+
113
+
114
+ class AddressAllCommentsSignature(dspy.Signature):
115
+ """
116
+ Address ALL comments/instructions in a document.
117
+
118
+ Scan the document for all <!--! ... !--> comment markers and generate
119
+ edits that address each one.
120
+
121
+ Guidelines:
122
+ - Process comments in document order (top to bottom)
123
+ - Each comment should be addressed appropriately
124
+ - Comments that conflict should be resolved sensibly
125
+ - After addressing, remove the comment markers
126
+ - Return all edits as a single list (they'll be applied in order)
127
+ """
128
+
129
+ full_document: str = dspy.InputField(
130
+ desc="The complete document content with embedded comments"
131
+ )
132
+ comments: List[CommentInfo] = dspy.InputField(
133
+ desc="List of all comments found in the document with their context"
134
+ )
135
+
136
+ edits: List[Edit] = dspy.OutputField(
137
+ desc="List of all find/replace edits to address all comments"
138
+ )
139
+
140
+
141
+ class AddressNearbyCommentSignature(dspy.Signature):
142
+ """
143
+ Address the comment nearest to the cursor position.
144
+
145
+ Find the comment that's closest to where the user's cursor is and
146
+ generate edits to address that specific comment.
147
+
148
+ Guidelines:
149
+ - Focus only on the comment nearest to the cursor
150
+ - Use the cursor context to identify which comment is relevant
151
+ - Generate edits that address that comment
152
+ - Optionally remove the comment marker after addressing
153
+ """
154
+
155
+ full_document: str = dspy.InputField(
156
+ desc="The complete document content"
157
+ )
158
+ cursor_context_before: str = dspy.InputField(
159
+ desc="Text before the cursor position"
160
+ )
161
+ cursor_context_after: str = dspy.InputField(
162
+ desc="Text after the cursor position"
163
+ )
164
+ nearby_comment: CommentInfo = dspy.InputField(
165
+ desc="The comment closest to the cursor"
166
+ )
167
+ nearby_comment_raw: str = dspy.InputField(
168
+ desc="The full raw comment including markers"
169
+ )
170
+
171
+ edits: List[Edit] = dspy.OutputField(
172
+ desc="List of find/replace edits to address the nearby comment"
173
+ )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mrmd-ai
3
- Version: 0.1.0
3
+ Version: 0.1.1
4
4
  Summary: AI programs for MRMD editor - completions, fixes, and corrections
5
5
  Requires-Python: >=3.11
6
6
  Requires-Dist: dspy>=2.6
@@ -1,26 +1,28 @@
1
1
  mrmd_ai/__init__.py,sha256=RocuoOEEJCUNinxqsXWjm_bIimCu8aDCDha-G5pU4fU,105
2
- mrmd_ai/juice.py,sha256=0_h7_M89IDhSt4iohdz0J5StcFXOaLF33d3fTxdBO-o,14442
3
- mrmd_ai/server.py,sha256=Jgl-bsQYbBMtv_tGu_2FwSN9BxgFpt-fmb9_pjIcqNw,14416
2
+ mrmd_ai/juice.py,sha256=pCXFgAv_GZOYmIrhcsn-6qeqfolcexwbp3mr-Z34NQk,25235
3
+ mrmd_ai/server.py,sha256=jQSX12LK0RiFSMbHp81zaDirbWD14CNT9S5lGPQKRNc,17961
4
4
  mrmd_ai/metrics/__init__.py,sha256=6BngKqh0a09phOzUdYeWjhsUXznCIx5jEjrEt7DIDu4,62
5
- mrmd_ai/modules/__init__.py,sha256=4V4IzurDZs_nhBTGlHyXco24iwEkum58kRNrKfm4IjQ,1793
5
+ mrmd_ai/modules/__init__.py,sha256=OO-5alsFmR1bLtsvxAJ2wNO5nOtHRmWChTexEI6Y9xU,2097
6
6
  mrmd_ai/modules/code.py,sha256=8cK6LF0ZTrSp2srt1ltvUnmYjPlt5NZTj1yWctpJ7j0,4099
7
7
  mrmd_ai/modules/correct.py,sha256=TWnE1HD_Ip7xZ5yQwJi1n01tNXgBtYNvkTK--kAknak,1478
8
8
  mrmd_ai/modules/document.py,sha256=o6iLR2amscn-DHZ95JFQuEwhaj8cLs4bBISf7G9cT9Y,1106
9
+ mrmd_ai/modules/edit.py,sha256=lGa0tNB7d9tRG4rtdQ6uNIi3lzSbC588c3r86ccPXZc,2736
9
10
  mrmd_ai/modules/finish.py,sha256=VtyE-45-8iM6iWjNg57wRWL0Ln3mSF0RcZq8CO5CoSk,2638
10
11
  mrmd_ai/modules/fix.py,sha256=fb4flKWyyyelheigeb1sI0dixd2scL9HZX-0_M_Uh-o,1506
11
12
  mrmd_ai/modules/notebook.py,sha256=w8Dg-NKVL6_kPOKkvb84kGrwgv5zHxvFNWBtXHLHww8,477
12
13
  mrmd_ai/modules/text.py,sha256=9MCO__EDalwi-iFf__sd8t7orsUy7WiBBf6Lp4bxxGE,2010
13
14
  mrmd_ai/optimizers/__init__.py,sha256=Ay6ZrQu8fLQaG7-dl6hTMruQY5AdGOT_YnlRhhZGgag,60
14
- mrmd_ai/signatures/__init__.py,sha256=wWT2D8beisIpYMPxN4j4JncQmUqcamD6v5BTKp2zFWc,655
15
+ mrmd_ai/signatures/__init__.py,sha256=UoIplXOXxPidkaXi9u-7l2LziDJX2MN4PmwtXVsC04U,1013
15
16
  mrmd_ai/signatures/code.py,sha256=zBM_Nl2NImfOw49fVWCGlXcE_sm8xgWCN1ksDbEa6e8,11245
16
17
  mrmd_ai/signatures/correct.py,sha256=tIhYCONgGhuTV0eJCiLSXcGZSAEi06XY35ommtTTsRE,2920
17
18
  mrmd_ai/signatures/document.py,sha256=4Y-9SeXJGCq098Vy-PIbb_rexS2dYDlkU-kxnKAPVSU,1828
19
+ mrmd_ai/signatures/edit.py,sha256=OBAYsh88Qg_EIoEJHHa28QRuSh2xbGM3OSGWtPJ8u_A,6524
18
20
  mrmd_ai/signatures/finish.py,sha256=x-ZB0U8GQJdNoGGO80FBOxHXjYsCmTFq9fnkXlHDeUY,5294
19
21
  mrmd_ai/signatures/fix.py,sha256=LJNvu9_XjPl90Wtt3xn6s-jGXA9GB5rdIL0MeFyRGtE,3042
20
22
  mrmd_ai/signatures/notebook.py,sha256=ZBioHA9ZTkLUD_UovdfiRYiDaUKuKOCDhiZP1NDFY8o,1226
21
23
  mrmd_ai/signatures/text.py,sha256=GhmFtEZqwivbevPI_NSBzh6AlH6JKLt2rA_LaYGK2lQ,5223
22
24
  mrmd_ai/utils/__init__.py,sha256=T4e9jmFWDSj1HOyz5_Qv-JQSC08GwT_9CACcAn37vWg,46
23
- mrmd_ai-0.1.0.dist-info/METADATA,sha256=4w17Do4YbBpwoT3PFN2i01zswTPJCVzR6rgib0zZXME,1167
24
- mrmd_ai-0.1.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
25
- mrmd_ai-0.1.0.dist-info/entry_points.txt,sha256=Bq6nXiXxhNSPEgYWBgrrgJH1DeGjKM6hfCijcWClApw,55
26
- mrmd_ai-0.1.0.dist-info/RECORD,,
25
+ mrmd_ai-0.1.1.dist-info/METADATA,sha256=_Vd_Hf2JHGDyX9ZtTd3ogma9NnwfDyml1mgTzpEHRQI,1167
26
+ mrmd_ai-0.1.1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
27
+ mrmd_ai-0.1.1.dist-info/entry_points.txt,sha256=Bq6nXiXxhNSPEgYWBgrrgJH1DeGjKM6hfCijcWClApw,55
28
+ mrmd_ai-0.1.1.dist-info/RECORD,,