eval-ai-library 0.3.1__tar.gz → 0.3.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of eval-ai-library might be problematic. Click here for more details.

Files changed (41) hide show
  1. {eval_ai_library-0.3.1/eval_ai_library.egg-info → eval_ai_library-0.3.2}/PKG-INFO +1 -1
  2. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2/eval_ai_library.egg-info}/PKG-INFO +1 -1
  3. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_lib/__init__.py +8 -6
  4. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/pyproject.toml +1 -1
  5. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/LICENSE +0 -0
  6. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/MANIFEST.in +0 -0
  7. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/README.md +0 -0
  8. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_ai_library.egg-info/SOURCES.txt +0 -0
  9. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_ai_library.egg-info/dependency_links.txt +0 -0
  10. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_ai_library.egg-info/requires.txt +0 -0
  11. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_ai_library.egg-info/top_level.txt +0 -0
  12. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_lib/agent_metrics/__init__.py +0 -0
  13. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_lib/agent_metrics/knowledge_retention_metric/knowledge_retention.py +0 -0
  14. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_lib/agent_metrics/role_adherence_metric/role_adherence.py +0 -0
  15. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_lib/agent_metrics/task_success_metric/task_success_rate.py +0 -0
  16. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_lib/agent_metrics/tools_correctness_metric/tool_correctness.py +0 -0
  17. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_lib/datagenerator/datagenerator.py +0 -0
  18. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_lib/datagenerator/document_loader.py +0 -0
  19. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_lib/datagenerator/prompts.py +0 -0
  20. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_lib/evaluate.py +0 -0
  21. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_lib/evaluation_schema.py +0 -0
  22. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_lib/llm_client.py +0 -0
  23. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_lib/metric_pattern.py +0 -0
  24. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_lib/metrics/__init__.py +0 -0
  25. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_lib/metrics/answer_precision_metric/answer_precision.py +0 -0
  26. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_lib/metrics/answer_relevancy_metric/answer_relevancy.py +0 -0
  27. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_lib/metrics/bias_metric/bias.py +0 -0
  28. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_lib/metrics/contextual_precision_metric/contextual_precision.py +0 -0
  29. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_lib/metrics/contextual_recall_metric/contextual_recall.py +0 -0
  30. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_lib/metrics/contextual_relevancy_metric/contextual_relevancy.py +0 -0
  31. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_lib/metrics/custom_metric/custom_eval.py +0 -0
  32. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_lib/metrics/faithfulness_metric/faithfulness.py +0 -0
  33. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_lib/metrics/geval/geval.py +0 -0
  34. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_lib/metrics/restricted_refusal_metric/restricted_refusal.py +0 -0
  35. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_lib/metrics/toxicity_metric/toxicity.py +0 -0
  36. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_lib/price.py +0 -0
  37. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_lib/py.typed +0 -0
  38. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_lib/testcases_schema.py +0 -0
  39. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/eval_lib/utils.py +0 -0
  40. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/setup.cfg +0 -0
  41. {eval_ai_library-0.3.1 → eval_ai_library-0.3.2}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: eval-ai-library
3
- Version: 0.3.1
3
+ Version: 0.3.2
4
4
  Summary: Comprehensive AI Model Evaluation Framework with support for multiple LLM providers
5
5
  Author-email: Aleksandr Meshkov <alekslynx90@gmail.com>
6
6
  License: MIT
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: eval-ai-library
3
- Version: 0.3.1
3
+ Version: 0.3.2
4
4
  Summary: Comprehensive AI Model Evaluation Framework with support for multiple LLM providers
5
5
  Author-email: Aleksandr Meshkov <alekslynx90@gmail.com>
6
6
  License: MIT
@@ -7,7 +7,7 @@ A powerful library for evaluating AI models with support for multiple LLM provid
7
7
  and a wide range of evaluation metrics for RAG systems and AI agents.
8
8
  """
9
9
 
10
- __version__ = "0.3.1"
10
+ __version__ = "0.3.2"
11
11
  __author__ = "Aleksandr Meshkov"
12
12
 
13
13
  # Core evaluation functions
@@ -68,12 +68,14 @@ from eval_lib.agent_metrics import (
68
68
 
69
69
  def __getattr__(name):
70
70
  """
71
- Ленивый импорт для модулей с тяжёлыми зависимостями.
72
- DataGenerator импортируется только когда реально используется.
71
+ Lazy loading for data generation components.
73
72
  """
74
- if name == "DataGenerator":
75
- from eval_lib.datagenerator.datagenerator import DataGenerator
76
- return DataGenerator
73
+ if name == "DatasetGenerator":
74
+ from eval_lib.datagenerator.datagenerator import DatasetGenerator
75
+ return DatasetGenerator
76
+ if name == "DataGenerator": # Alias for DatasetGenerator
77
+ from eval_lib.datagenerator.datagenerator import DatasetGenerator
78
+ return DatasetGenerator
77
79
  if name == "DocumentLoader":
78
80
  from eval_lib.datagenerator.document_loader import DocumentLoader
79
81
  return DocumentLoader
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "eval-ai-library"
7
- version = "0.3.1"
7
+ version = "0.3.2"
8
8
  description = "Comprehensive AI Model Evaluation Framework with support for multiple LLM providers"
9
9
  readme = "README.md"
10
10
  authors = [
File without changes