iints-sdk-python35 1.0.0__tar.gz → 1.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {iints_sdk_python35-1.0.0/src/iints_sdk_python35.egg-info → iints_sdk_python35-1.1.0}/PKG-INFO +58 -5
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/README.md +56 -3
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/pyproject.toml +2 -2
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/__init__.py +4 -0
- iints_sdk_python35-1.1.0/src/iints/ai/__init__.py +13 -0
- iints_sdk_python35-1.1.0/src/iints/ai/assistant.py +124 -0
- iints_sdk_python35-1.1.0/src/iints/ai/backends/__init__.py +11 -0
- iints_sdk_python35-1.1.0/src/iints/ai/backends/base.py +14 -0
- iints_sdk_python35-1.1.0/src/iints/ai/backends/mistral_api.py +17 -0
- iints_sdk_python35-1.1.0/src/iints/ai/backends/ollama.py +165 -0
- iints_sdk_python35-1.1.0/src/iints/ai/cli.py +269 -0
- iints_sdk_python35-1.1.0/src/iints/ai/mdmp_guard.py +112 -0
- iints_sdk_python35-1.1.0/src/iints/ai/prompts.py +74 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/analysis/clinical_benchmark.py +2 -2
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/cli/cli.py +2 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/data/adapter.py +4 -2
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/mdmp/__init__.py +4 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/mdmp/backend.py +51 -22
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0/src/iints_sdk_python35.egg-info}/PKG-INFO +58 -5
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints_sdk_python35.egg-info/SOURCES.txt +9 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints_sdk_python35.egg-info/requires.txt +1 -1
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/LICENSE +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/setup.cfg +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/analysis/__init__.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/analysis/algorithm_xray.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/analysis/baseline.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/analysis/clinical_metrics.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/analysis/clinical_tir_analyzer.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/analysis/diabetes_metrics.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/analysis/edge_efficiency.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/analysis/edge_performance_monitor.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/analysis/explainability.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/analysis/explainable_ai.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/analysis/hardware_benchmark.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/analysis/metrics.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/analysis/population_report.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/analysis/reporting.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/analysis/safety_index.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/analysis/sensor_filtering.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/analysis/validator.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/api/__init__.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/api/base_algorithm.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/api/registry.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/api/template_algorithm.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/assets/iints_logo.png +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/cli/__init__.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/core/__init__.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/core/algorithms/__init__.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/core/algorithms/battle_runner.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/core/algorithms/correction_bolus.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/core/algorithms/discovery.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/core/algorithms/fixed_basal_bolus.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/core/algorithms/hybrid_algorithm.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/core/algorithms/lstm_algorithm.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/core/algorithms/mock_algorithms.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/core/algorithms/pid_controller.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/core/algorithms/standard_pump_algo.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/core/device.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/core/device_manager.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/core/devices/__init__.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/core/devices/models.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/core/patient/__init__.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/core/patient/bergman_model.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/core/patient/models.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/core/patient/patient_factory.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/core/patient/profile.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/core/safety/__init__.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/core/safety/config.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/core/safety/input_validator.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/core/safety/supervisor.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/core/simulation/__init__.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/core/simulation/scenario_parser.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/core/simulator.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/core/supervisor.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/data/__init__.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/data/column_mapper.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/data/contracts.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/data/datasets.json +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/data/demo/__init__.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/data/demo/demo_cgm.csv +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/data/guardians.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/data/importer.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/data/ingestor.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/data/mdmp_visualizer.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/data/nightscout.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/data/quality_checker.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/data/registry.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/data/runner.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/data/synthetic_mirror.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/data/tidepool.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/data/universal_parser.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/data/virtual_patients/clinic_safe_baseline.yaml +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/data/virtual_patients/clinic_safe_hyper_challenge.yaml +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/data/virtual_patients/clinic_safe_hypo_prone.yaml +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/data/virtual_patients/clinic_safe_midnight.yaml +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/data/virtual_patients/clinic_safe_pizza.yaml +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/data/virtual_patients/clinic_safe_stress_meal.yaml +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/data/virtual_patients/default_patient.yaml +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/data/virtual_patients/patient_559_config.yaml +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/emulation/__init__.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/emulation/legacy_base.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/emulation/medtronic_780g.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/emulation/omnipod_5.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/emulation/tandem_controliq.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/highlevel.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/learning/__init__.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/learning/autonomous_optimizer.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/learning/learning_system.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/metrics.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/population/__init__.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/population/generator.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/population/runner.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/presets/__init__.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/presets/evidence_sources.yaml +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/presets/forecast_calibration_profiles.yaml +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/presets/golden_benchmark.yaml +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/presets/presets.json +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/presets/safety_contract_default.yaml +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/presets/validation_profiles.yaml +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/research/__init__.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/research/audit.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/research/calibration_gate.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/research/config.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/research/dataset.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/research/evaluation.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/research/losses.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/research/metrics.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/research/model_registry.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/research/predictor.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/scenarios/__init__.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/scenarios/generator.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/templates/__init__.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/templates/default_algorithm.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/templates/scenarios/__init__.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/templates/scenarios/chaos_insulin_stacking.json +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/templates/scenarios/chaos_runaway_ai.json +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/templates/scenarios/example_scenario.json +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/templates/scenarios/exercise_stress.json +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/utils/__init__.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/utils/plotting.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/utils/run_io.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/validation/__init__.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/validation/golden.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/validation/replay.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/validation/run_validation.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/validation/safety_contract.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/validation/schemas.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/visualization/__init__.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/visualization/cockpit.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints/visualization/uncertainty_cloud.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints_sdk_python35.egg-info/dependency_links.txt +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints_sdk_python35.egg-info/entry_points.txt +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/src/iints_sdk_python35.egg-info/top_level.txt +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/tests/test_bergman.py +0 -0
- {iints_sdk_python35-1.0.0 → iints_sdk_python35-1.1.0}/tests/test_population.py +0 -0
{iints_sdk_python35-1.0.0/src/iints_sdk_python35.egg-info → iints_sdk_python35-1.1.0}/PKG-INFO
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: iints-sdk-python35
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.1.0
|
|
4
4
|
Summary: A pre-clinical Edge-AI SDK for diabetes management validation.
|
|
5
5
|
Author-email: Rune Bobbaers <rune.bobbaers@gmail.com>
|
|
6
6
|
Project-URL: Homepage, https://github.com/python35/IINTS-SDK
|
|
@@ -46,7 +46,7 @@ Requires-Dist: h5py>=3.10.0; extra == "research"
|
|
|
46
46
|
Requires-Dist: onnx>=1.16.0; extra == "research"
|
|
47
47
|
Requires-Dist: onnxscript>=0.1.0; extra == "research"
|
|
48
48
|
Provides-Extra: mdmp
|
|
49
|
-
Requires-Dist: mdmp-protocol>=0.
|
|
49
|
+
Requires-Dist: mdmp-protocol>=0.3.0; extra == "mdmp"
|
|
50
50
|
Dynamic: license-file
|
|
51
51
|
|
|
52
52
|
# IINTS-AF SDK
|
|
@@ -81,6 +81,44 @@ cd iints_quickstart
|
|
|
81
81
|
iints presets run --name baseline_t1d --algo algorithms/example_algorithm.py
|
|
82
82
|
```
|
|
83
83
|
|
|
84
|
+
## AI Assistant (Ministral via Ollama)
|
|
85
|
+
|
|
86
|
+
The SDK now includes a research-only AI assistant layer for explanations and run summaries.
|
|
87
|
+
It is gated by MDMP verification before any LLM call is allowed.
|
|
88
|
+
|
|
89
|
+
Use an active virtual environment for the full flow:
|
|
90
|
+
|
|
91
|
+
```bash
|
|
92
|
+
python3 -m venv .venv
|
|
93
|
+
source .venv/bin/activate
|
|
94
|
+
python -m pip install -U pip
|
|
95
|
+
python -m pip install -e ".[mdmp]"
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
Run Ministral locally with Ollama:
|
|
99
|
+
|
|
100
|
+
```bash
|
|
101
|
+
ollama pull mistral/ministral-8b-instruct
|
|
102
|
+
iints ai local-check --model ministral
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
Example commands:
|
|
106
|
+
|
|
107
|
+
```bash
|
|
108
|
+
iints ai explain results/step.json \
|
|
109
|
+
--mdmp-cert results/report.signed.mdmp
|
|
110
|
+
|
|
111
|
+
iints ai report results/simulation_run.json \
|
|
112
|
+
--mdmp-cert results/report.signed.mdmp \
|
|
113
|
+
--output results/ai_report.md
|
|
114
|
+
```
|
|
115
|
+
|
|
116
|
+
Notes:
|
|
117
|
+
- AI analysis is blocked if the MDMP artifact is invalid.
|
|
118
|
+
- Minimum required MDMP grade defaults to `research_grade`.
|
|
119
|
+
- Large JSON payloads are clipped automatically before prompt generation to keep local inference stable.
|
|
120
|
+
- Output is research-only and not medical advice.
|
|
121
|
+
|
|
84
122
|
## MDMP (Short)
|
|
85
123
|
MDMP is the data-quality protocol used by IINTS.
|
|
86
124
|
|
|
@@ -118,16 +156,31 @@ mdmp registry push --registry registry/mdmp_registry.json --report results/mdmp_
|
|
|
118
156
|
- MDMP repo: `python35/MDMP`
|
|
119
157
|
|
|
120
158
|
Local helper scripts:
|
|
121
|
-
- `tools/
|
|
122
|
-
- `tools/
|
|
159
|
+
- `tools/dev/dual_repo_status.sh`
|
|
160
|
+
- `tools/dev/dual_repo_commit_push.sh`
|
|
123
161
|
|
|
124
162
|
Full process: `docs/DUAL_REPO_WORKFLOW.md`
|
|
125
163
|
|
|
126
164
|
MDMP sync CI gate:
|
|
127
165
|
- `.github/workflows/mdmp-sync.yml`
|
|
128
|
-
-
|
|
166
|
+
- Uses private-repo checkout when `MDMP_REPO_TOKEN` is configured.
|
|
167
|
+
- Falls back to `mdmp-protocol` from PyPI when checkout is unavailable.
|
|
129
168
|
- Auto dependency updates for MDMP are handled via Dependabot (`.github/dependabot.yml`).
|
|
130
169
|
|
|
170
|
+
## Tools Layout
|
|
171
|
+
|
|
172
|
+
Repository helpers are now grouped by purpose:
|
|
173
|
+
|
|
174
|
+
- `scripts/`: simple user-facing shortcuts like test, lint, and demo entrypoints
|
|
175
|
+
- `tools/ci/`: CI gates and policy checks
|
|
176
|
+
- `tools/dev/`: maintainer workflows and multi-repo helpers
|
|
177
|
+
- `tools/docs/`: manual and documentation builders
|
|
178
|
+
- `tools/data/`: dataset import and conversion utilities
|
|
179
|
+
- `tools/analysis/`: plotting, diagnostics, and report helpers
|
|
180
|
+
- `tools/assets/`: branding and asset generation helpers
|
|
181
|
+
|
|
182
|
+
Reference: `tools/README.md`
|
|
183
|
+
|
|
131
184
|
## Typical Workflow
|
|
132
185
|
1. Prepare or import data.
|
|
133
186
|
2. Validate data with MDMP.
|
|
@@ -30,6 +30,44 @@ cd iints_quickstart
|
|
|
30
30
|
iints presets run --name baseline_t1d --algo algorithms/example_algorithm.py
|
|
31
31
|
```
|
|
32
32
|
|
|
33
|
+
## AI Assistant (Ministral via Ollama)
|
|
34
|
+
|
|
35
|
+
The SDK now includes a research-only AI assistant layer for explanations and run summaries.
|
|
36
|
+
It is gated by MDMP verification before any LLM call is allowed.
|
|
37
|
+
|
|
38
|
+
Use an active virtual environment for the full flow:
|
|
39
|
+
|
|
40
|
+
```bash
|
|
41
|
+
python3 -m venv .venv
|
|
42
|
+
source .venv/bin/activate
|
|
43
|
+
python -m pip install -U pip
|
|
44
|
+
python -m pip install -e ".[mdmp]"
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
Run Ministral locally with Ollama:
|
|
48
|
+
|
|
49
|
+
```bash
|
|
50
|
+
ollama pull mistral/ministral-8b-instruct
|
|
51
|
+
iints ai local-check --model ministral
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
Example commands:
|
|
55
|
+
|
|
56
|
+
```bash
|
|
57
|
+
iints ai explain results/step.json \
|
|
58
|
+
--mdmp-cert results/report.signed.mdmp
|
|
59
|
+
|
|
60
|
+
iints ai report results/simulation_run.json \
|
|
61
|
+
--mdmp-cert results/report.signed.mdmp \
|
|
62
|
+
--output results/ai_report.md
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
Notes:
|
|
66
|
+
- AI analysis is blocked if the MDMP artifact is invalid.
|
|
67
|
+
- Minimum required MDMP grade defaults to `research_grade`.
|
|
68
|
+
- Large JSON payloads are clipped automatically before prompt generation to keep local inference stable.
|
|
69
|
+
- Output is research-only and not medical advice.
|
|
70
|
+
|
|
33
71
|
## MDMP (Short)
|
|
34
72
|
MDMP is the data-quality protocol used by IINTS.
|
|
35
73
|
|
|
@@ -67,16 +105,31 @@ mdmp registry push --registry registry/mdmp_registry.json --report results/mdmp_
|
|
|
67
105
|
- MDMP repo: `python35/MDMP`
|
|
68
106
|
|
|
69
107
|
Local helper scripts:
|
|
70
|
-
- `tools/
|
|
71
|
-
- `tools/
|
|
108
|
+
- `tools/dev/dual_repo_status.sh`
|
|
109
|
+
- `tools/dev/dual_repo_commit_push.sh`
|
|
72
110
|
|
|
73
111
|
Full process: `docs/DUAL_REPO_WORKFLOW.md`
|
|
74
112
|
|
|
75
113
|
MDMP sync CI gate:
|
|
76
114
|
- `.github/workflows/mdmp-sync.yml`
|
|
77
|
-
-
|
|
115
|
+
- Uses private-repo checkout when `MDMP_REPO_TOKEN` is configured.
|
|
116
|
+
- Falls back to `mdmp-protocol` from PyPI when checkout is unavailable.
|
|
78
117
|
- Auto dependency updates for MDMP are handled via Dependabot (`.github/dependabot.yml`).
|
|
79
118
|
|
|
119
|
+
## Tools Layout
|
|
120
|
+
|
|
121
|
+
Repository helpers are now grouped by purpose:
|
|
122
|
+
|
|
123
|
+
- `scripts/`: simple user-facing shortcuts like test, lint, and demo entrypoints
|
|
124
|
+
- `tools/ci/`: CI gates and policy checks
|
|
125
|
+
- `tools/dev/`: maintainer workflows and multi-repo helpers
|
|
126
|
+
- `tools/docs/`: manual and documentation builders
|
|
127
|
+
- `tools/data/`: dataset import and conversion utilities
|
|
128
|
+
- `tools/analysis/`: plotting, diagnostics, and report helpers
|
|
129
|
+
- `tools/assets/`: branding and asset generation helpers
|
|
130
|
+
|
|
131
|
+
Reference: `tools/README.md`
|
|
132
|
+
|
|
80
133
|
## Typical Workflow
|
|
81
134
|
1. Prepare or import data.
|
|
82
135
|
2. Validate data with MDMP.
|
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "iints-sdk-python35"
|
|
7
|
-
version = "1.
|
|
7
|
+
version = "1.1.0"
|
|
8
8
|
authors = [
|
|
9
9
|
{ name="Rune Bobbaers", email="rune.bobbaers@gmail.com" },
|
|
10
10
|
]
|
|
@@ -60,7 +60,7 @@ research = [
|
|
|
60
60
|
"onnxscript>=0.1.0",
|
|
61
61
|
]
|
|
62
62
|
mdmp = [
|
|
63
|
-
"mdmp-protocol>=0.
|
|
63
|
+
"mdmp-protocol>=0.3.0",
|
|
64
64
|
]
|
|
65
65
|
|
|
66
66
|
[project.scripts]
|
|
@@ -61,6 +61,7 @@ from .data.synthetic_mirror import generate_synthetic_mirror, SyntheticMirrorArt
|
|
|
61
61
|
from .analysis.metrics import generate_benchmark_metrics # Added for benchmark
|
|
62
62
|
from .analysis.reporting import ClinicalReportGenerator
|
|
63
63
|
from .analysis.edge_efficiency import EnergyEstimate, estimate_energy_per_decision
|
|
64
|
+
from .ai import AIResponse, IINTSAssistant, MDMPGuard
|
|
64
65
|
from .highlevel import run_simulation, run_full, run_population
|
|
65
66
|
from .scenarios import ScenarioGeneratorConfig, generate_random_scenario
|
|
66
67
|
|
|
@@ -169,6 +170,9 @@ __all__ = [
|
|
|
169
170
|
"ClinicalReportGenerator",
|
|
170
171
|
"EnergyEstimate",
|
|
171
172
|
"estimate_energy_per_decision",
|
|
173
|
+
"AIResponse",
|
|
174
|
+
"IINTSAssistant",
|
|
175
|
+
"MDMPGuard",
|
|
172
176
|
# Reporting
|
|
173
177
|
"generate_report",
|
|
174
178
|
"generate_quickstart_report",
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
from .assistant import AIResponse, IINTSAssistant
|
|
2
|
+
from .backends import DEFAULT_MINISTRAL_MODEL, DEFAULT_OLLAMA_HOST, OllamaBackend
|
|
3
|
+
from .mdmp_guard import GuardResult, MDMPGuard
|
|
4
|
+
|
|
5
|
+
__all__ = [
|
|
6
|
+
"AIResponse",
|
|
7
|
+
"IINTSAssistant",
|
|
8
|
+
"DEFAULT_MINISTRAL_MODEL",
|
|
9
|
+
"DEFAULT_OLLAMA_HOST",
|
|
10
|
+
"OllamaBackend",
|
|
11
|
+
"GuardResult",
|
|
12
|
+
"MDMPGuard",
|
|
13
|
+
]
|
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from .backends import DEFAULT_MINISTRAL_MODEL, CompletionBackend, MistralAPIBackend, OllamaBackend
|
|
8
|
+
from .mdmp_guard import GuardResult, MDMPGuard
|
|
9
|
+
from .prompts import TaskName, build_prompt
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass(frozen=True)
|
|
13
|
+
class AIResponse:
|
|
14
|
+
task: str
|
|
15
|
+
text: str
|
|
16
|
+
backend: str
|
|
17
|
+
model: str
|
|
18
|
+
certification: GuardResult
|
|
19
|
+
|
|
20
|
+
def to_dict(self) -> dict[str, Any]:
|
|
21
|
+
return {
|
|
22
|
+
"task": self.task,
|
|
23
|
+
"text": self.text,
|
|
24
|
+
"backend": self.backend,
|
|
25
|
+
"model": self.model,
|
|
26
|
+
"certification": self.certification.to_dict(),
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class IINTSAssistant:
|
|
31
|
+
"""Research-only LLM assistant gated by MDMP certification."""
|
|
32
|
+
|
|
33
|
+
def __init__(
|
|
34
|
+
self,
|
|
35
|
+
mdmp_cert: str | Path,
|
|
36
|
+
*,
|
|
37
|
+
mode: str = "auto",
|
|
38
|
+
model: str = DEFAULT_MINISTRAL_MODEL,
|
|
39
|
+
minimum_grade: str = "research_grade",
|
|
40
|
+
public_key_path: str | Path | None = None,
|
|
41
|
+
trust_store_path: str | Path | None = None,
|
|
42
|
+
ollama_host: str | None = None,
|
|
43
|
+
timeout_seconds: float = 120.0,
|
|
44
|
+
backend: CompletionBackend | None = None,
|
|
45
|
+
guard: MDMPGuard | None = None,
|
|
46
|
+
) -> None:
|
|
47
|
+
self.guard = guard or MDMPGuard(
|
|
48
|
+
mdmp_cert,
|
|
49
|
+
minimum_grade=minimum_grade,
|
|
50
|
+
public_key_path=public_key_path,
|
|
51
|
+
trust_store_path=trust_store_path,
|
|
52
|
+
)
|
|
53
|
+
self.backend = backend or self._detect_backend(
|
|
54
|
+
mode=mode,
|
|
55
|
+
model=model,
|
|
56
|
+
ollama_host=ollama_host,
|
|
57
|
+
timeout_seconds=timeout_seconds,
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
def _detect_backend(
|
|
61
|
+
self,
|
|
62
|
+
*,
|
|
63
|
+
mode: str,
|
|
64
|
+
model: str,
|
|
65
|
+
ollama_host: str | None,
|
|
66
|
+
timeout_seconds: float,
|
|
67
|
+
) -> CompletionBackend:
|
|
68
|
+
requested = mode.strip().lower()
|
|
69
|
+
if requested in {"auto", "local", "ollama"}:
|
|
70
|
+
ollama_backend = OllamaBackend(
|
|
71
|
+
model_name=model,
|
|
72
|
+
base_url=ollama_host,
|
|
73
|
+
timeout_seconds=timeout_seconds,
|
|
74
|
+
)
|
|
75
|
+
local_backend: CompletionBackend = ollama_backend
|
|
76
|
+
if not ollama_backend.available():
|
|
77
|
+
raise RuntimeError(
|
|
78
|
+
"No local Ollama backend is available. "
|
|
79
|
+
f"Could not reach {ollama_backend.base_url}. "
|
|
80
|
+
"Start Ollama and try again."
|
|
81
|
+
)
|
|
82
|
+
ollama_backend.ensure_model_ready()
|
|
83
|
+
return local_backend
|
|
84
|
+
if requested == "api":
|
|
85
|
+
api_backend: CompletionBackend = MistralAPIBackend()
|
|
86
|
+
if api_backend.available():
|
|
87
|
+
return api_backend
|
|
88
|
+
raise RuntimeError(
|
|
89
|
+
"Cloud API fallback is not enabled in this SDK build yet. "
|
|
90
|
+
"Use mode='local' with Ollama."
|
|
91
|
+
)
|
|
92
|
+
raise ValueError(f"Unsupported AI mode: {mode}")
|
|
93
|
+
|
|
94
|
+
def _run_task(self, task: TaskName, payload: Any) -> AIResponse:
|
|
95
|
+
certification = self.guard.check()
|
|
96
|
+
system_prompt, user_prompt = build_prompt(task, payload)
|
|
97
|
+
text = self.guard.wrap(
|
|
98
|
+
self.backend.complete(system_prompt=system_prompt, user_prompt=user_prompt)
|
|
99
|
+
)
|
|
100
|
+
resolved_model = getattr(self.backend, "resolved_model_name", None)
|
|
101
|
+
response_model = (
|
|
102
|
+
str(resolved_model)
|
|
103
|
+
if isinstance(resolved_model, str) and resolved_model.strip()
|
|
104
|
+
else str(getattr(self.backend, "model_name", DEFAULT_MINISTRAL_MODEL))
|
|
105
|
+
)
|
|
106
|
+
return AIResponse(
|
|
107
|
+
task=task,
|
|
108
|
+
text=text,
|
|
109
|
+
backend=getattr(self.backend, "backend_name", type(self.backend).__name__),
|
|
110
|
+
model=response_model,
|
|
111
|
+
certification=certification,
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
def explain_decision(self, step: dict[str, Any]) -> AIResponse:
|
|
115
|
+
return self._run_task("explain_decision", step)
|
|
116
|
+
|
|
117
|
+
def analyze_trends(self, glucose_payload: list[Any] | dict[str, Any]) -> AIResponse:
|
|
118
|
+
return self._run_task("analyze_trends", glucose_payload)
|
|
119
|
+
|
|
120
|
+
def detect_anomalies(self, results: dict[str, Any]) -> AIResponse:
|
|
121
|
+
return self._run_task("detect_anomalies", results)
|
|
122
|
+
|
|
123
|
+
def generate_report(self, run: dict[str, Any]) -> AIResponse:
|
|
124
|
+
return self._run_task("generate_report", run)
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
from .base import CompletionBackend
|
|
2
|
+
from .mistral_api import MistralAPIBackend
|
|
3
|
+
from .ollama import DEFAULT_MINISTRAL_MODEL, DEFAULT_OLLAMA_HOST, OllamaBackend
|
|
4
|
+
|
|
5
|
+
__all__ = [
|
|
6
|
+
"CompletionBackend",
|
|
7
|
+
"DEFAULT_MINISTRAL_MODEL",
|
|
8
|
+
"DEFAULT_OLLAMA_HOST",
|
|
9
|
+
"OllamaBackend",
|
|
10
|
+
"MistralAPIBackend",
|
|
11
|
+
]
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Protocol
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class CompletionBackend(Protocol):
|
|
7
|
+
backend_name: str
|
|
8
|
+
model_name: str
|
|
9
|
+
|
|
10
|
+
def available(self) -> bool:
|
|
11
|
+
...
|
|
12
|
+
|
|
13
|
+
def complete(self, *, system_prompt: str, user_prompt: str) -> str:
|
|
14
|
+
...
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class MistralAPIBackend:
|
|
5
|
+
backend_name = "mistral_api"
|
|
6
|
+
|
|
7
|
+
def __init__(self, *args, **kwargs) -> None:
|
|
8
|
+
self.model_name = "mistral_api_unconfigured"
|
|
9
|
+
|
|
10
|
+
def available(self) -> bool:
|
|
11
|
+
return False
|
|
12
|
+
|
|
13
|
+
def complete(self, *, system_prompt: str, user_prompt: str) -> str:
|
|
14
|
+
raise RuntimeError(
|
|
15
|
+
"Cloud fallback is not enabled in this SDK build yet. "
|
|
16
|
+
"Use mode='local' with Ollama for Ministral."
|
|
17
|
+
)
|
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
from urllib import error, request
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
DEFAULT_OLLAMA_HOST = "http://127.0.0.1:11434"
|
|
9
|
+
DEFAULT_MINISTRAL_MODEL = "mistral/ministral-8b-instruct"
|
|
10
|
+
MINISTRAL_MODEL_ALIASES = (
|
|
11
|
+
DEFAULT_MINISTRAL_MODEL,
|
|
12
|
+
"ministral",
|
|
13
|
+
"ministral-8b",
|
|
14
|
+
"ministral-8b-instruct",
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class OllamaBackend:
|
|
19
|
+
backend_name = "ollama"
|
|
20
|
+
|
|
21
|
+
def __init__(
|
|
22
|
+
self,
|
|
23
|
+
*,
|
|
24
|
+
model_name: str = DEFAULT_MINISTRAL_MODEL,
|
|
25
|
+
base_url: str | None = None,
|
|
26
|
+
timeout_seconds: float = 60.0,
|
|
27
|
+
) -> None:
|
|
28
|
+
self.model_name = model_name
|
|
29
|
+
self.base_url = (base_url or os.getenv("OLLAMA_HOST") or DEFAULT_OLLAMA_HOST).rstrip("/")
|
|
30
|
+
self.timeout_seconds = timeout_seconds
|
|
31
|
+
self.resolved_model_name: str | None = None
|
|
32
|
+
|
|
33
|
+
def _pull_hint(self) -> str:
|
|
34
|
+
return f"ollama pull {self.model_name}"
|
|
35
|
+
|
|
36
|
+
def _request_json(
|
|
37
|
+
self,
|
|
38
|
+
path: str,
|
|
39
|
+
payload: dict[str, object] | None = None,
|
|
40
|
+
*,
|
|
41
|
+
method: str = "POST",
|
|
42
|
+
) -> dict[str, object]:
|
|
43
|
+
url = f"{self.base_url}{path}"
|
|
44
|
+
body = None
|
|
45
|
+
headers = {"Accept": "application/json"}
|
|
46
|
+
if payload is not None:
|
|
47
|
+
body = json.dumps(payload).encode("utf-8")
|
|
48
|
+
headers["Content-Type"] = "application/json"
|
|
49
|
+
req = request.Request(url, data=body, headers=headers, method=method)
|
|
50
|
+
try:
|
|
51
|
+
with request.urlopen(req, timeout=self.timeout_seconds) as response:
|
|
52
|
+
text = response.read().decode("utf-8")
|
|
53
|
+
except error.HTTPError as exc:
|
|
54
|
+
detail = exc.read().decode("utf-8", errors="replace").strip()
|
|
55
|
+
if exc.code == 404 and path == "/api/generate":
|
|
56
|
+
raise RuntimeError(
|
|
57
|
+
f"Ollama model '{self.model_name}' is not available locally. "
|
|
58
|
+
f"Run: ollama pull {self.model_name}"
|
|
59
|
+
) from exc
|
|
60
|
+
raise RuntimeError(f"Ollama request failed ({exc.code}): {detail or exc.reason}") from exc
|
|
61
|
+
except error.URLError as exc:
|
|
62
|
+
raise RuntimeError(
|
|
63
|
+
f"Could not reach Ollama at {self.base_url}. "
|
|
64
|
+
"Start Ollama or set OLLAMA_HOST to the correct endpoint."
|
|
65
|
+
) from exc
|
|
66
|
+
|
|
67
|
+
try:
|
|
68
|
+
payload_json = json.loads(text)
|
|
69
|
+
except json.JSONDecodeError as exc:
|
|
70
|
+
raise RuntimeError("Ollama returned invalid JSON.") from exc
|
|
71
|
+
if not isinstance(payload_json, dict):
|
|
72
|
+
raise RuntimeError("Ollama returned an unexpected response shape.")
|
|
73
|
+
return payload_json
|
|
74
|
+
|
|
75
|
+
def available(self) -> bool:
|
|
76
|
+
try:
|
|
77
|
+
self._request_json("/api/tags", method="GET")
|
|
78
|
+
except Exception:
|
|
79
|
+
return False
|
|
80
|
+
return True
|
|
81
|
+
|
|
82
|
+
def list_models(self) -> list[str]:
|
|
83
|
+
response = self._request_json("/api/tags", method="GET")
|
|
84
|
+
raw_models = response.get("models", [])
|
|
85
|
+
if not isinstance(raw_models, list):
|
|
86
|
+
raise RuntimeError("Ollama returned an unexpected model list.")
|
|
87
|
+
|
|
88
|
+
discovered: list[str] = []
|
|
89
|
+
for entry in raw_models:
|
|
90
|
+
if not isinstance(entry, dict):
|
|
91
|
+
continue
|
|
92
|
+
name = entry.get("name")
|
|
93
|
+
if isinstance(name, str) and name.strip():
|
|
94
|
+
discovered.append(name.strip())
|
|
95
|
+
return discovered
|
|
96
|
+
|
|
97
|
+
def resolve_model_name(self) -> str | None:
|
|
98
|
+
installed = self.list_models()
|
|
99
|
+
installed_lookup = {name.lower(): name for name in installed}
|
|
100
|
+
|
|
101
|
+
if self.model_name.lower() in installed_lookup:
|
|
102
|
+
return installed_lookup[self.model_name.lower()]
|
|
103
|
+
|
|
104
|
+
requested = self.model_name.strip().lower()
|
|
105
|
+
if requested in {"ministral", "ministral-8b", "ministral-8b-instruct"}:
|
|
106
|
+
for alias in MINISTRAL_MODEL_ALIASES:
|
|
107
|
+
resolved = installed_lookup.get(alias.lower())
|
|
108
|
+
if resolved is not None:
|
|
109
|
+
return resolved
|
|
110
|
+
|
|
111
|
+
for installed_name in installed:
|
|
112
|
+
lowered = installed_name.lower()
|
|
113
|
+
if "ministral" in lowered and "8b" in lowered:
|
|
114
|
+
return installed_name
|
|
115
|
+
|
|
116
|
+
return None
|
|
117
|
+
|
|
118
|
+
def ensure_model_ready(self) -> str:
|
|
119
|
+
try:
|
|
120
|
+
resolved = self.resolve_model_name()
|
|
121
|
+
except RuntimeError:
|
|
122
|
+
raise
|
|
123
|
+
except Exception as exc:
|
|
124
|
+
raise RuntimeError(f"Failed to inspect local Ollama models: {exc}") from exc
|
|
125
|
+
|
|
126
|
+
if resolved is None:
|
|
127
|
+
self.resolved_model_name = None
|
|
128
|
+
installed = self.list_models()
|
|
129
|
+
installed_hint = ", ".join(installed) if installed else "none"
|
|
130
|
+
raise RuntimeError(
|
|
131
|
+
"Ollama is running, but the requested Ministral model is not installed locally.\n"
|
|
132
|
+
f"Requested: {self.model_name}\n"
|
|
133
|
+
f"Installed: {installed_hint}\n"
|
|
134
|
+
f"Run: {self._pull_hint()}"
|
|
135
|
+
)
|
|
136
|
+
self.resolved_model_name = resolved
|
|
137
|
+
return resolved
|
|
138
|
+
|
|
139
|
+
def healthcheck(self) -> dict[str, object]:
|
|
140
|
+
installed = self.list_models()
|
|
141
|
+
resolved = self.resolve_model_name() if installed else None
|
|
142
|
+
return {
|
|
143
|
+
"available": True,
|
|
144
|
+
"base_url": self.base_url,
|
|
145
|
+
"requested_model": self.model_name,
|
|
146
|
+
"resolved_model": resolved,
|
|
147
|
+
"installed_models": installed,
|
|
148
|
+
"ready": resolved is not None,
|
|
149
|
+
"pull_command": None if resolved is not None else self._pull_hint(),
|
|
150
|
+
"timeout_seconds": self.timeout_seconds,
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
def complete(self, *, system_prompt: str, user_prompt: str) -> str:
|
|
154
|
+
resolved_model = self.ensure_model_ready()
|
|
155
|
+
payload = {
|
|
156
|
+
"model": resolved_model,
|
|
157
|
+
"system": system_prompt,
|
|
158
|
+
"prompt": user_prompt,
|
|
159
|
+
"stream": False,
|
|
160
|
+
}
|
|
161
|
+
response = self._request_json("/api/generate", payload)
|
|
162
|
+
text = response.get("response")
|
|
163
|
+
if not isinstance(text, str) or not text.strip():
|
|
164
|
+
raise RuntimeError("Ollama returned an empty completion.")
|
|
165
|
+
return text.strip()
|