eval-ai-library 0.1.0__tar.gz → 0.2.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of eval-ai-library might be problematic. Click here for more details.

Files changed (41) hide show
  1. {eval_ai_library-0.1.0/eval_ai_library.egg-info → eval_ai_library-0.2.1}/PKG-INFO +1 -1
  2. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1/eval_ai_library.egg-info}/PKG-INFO +1 -1
  3. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_lib/__init__.py +1 -1
  4. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/pyproject.toml +1 -1
  5. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/LICENSE +0 -0
  6. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/MANIFEST.in +0 -0
  7. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/README.md +0 -0
  8. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_ai_library.egg-info/SOURCES.txt +0 -0
  9. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_ai_library.egg-info/dependency_links.txt +0 -0
  10. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_ai_library.egg-info/requires.txt +0 -0
  11. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_ai_library.egg-info/top_level.txt +0 -0
  12. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_lib/agent_metrics/__init__.py +0 -0
  13. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_lib/agent_metrics/knowledge_retention_metric/knowledge_retention.py +0 -0
  14. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_lib/agent_metrics/role_adherence_metric/role_adherence.py +0 -0
  15. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_lib/agent_metrics/task_success_metric/task_success_rate.py +0 -0
  16. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_lib/agent_metrics/tools_correctness_metric/tool_correctness.py +0 -0
  17. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_lib/datagenerator/datagenerator.py +0 -0
  18. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_lib/datagenerator/document_loader.py +0 -0
  19. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_lib/datagenerator/prompts.py +0 -0
  20. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_lib/evaluate.py +0 -0
  21. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_lib/evaluation_schema.py +0 -0
  22. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_lib/llm_client.py +0 -0
  23. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_lib/metric_pattern.py +0 -0
  24. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_lib/metrics/__init__.py +0 -0
  25. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_lib/metrics/answer_precision_metric/answer_precision.py +0 -0
  26. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_lib/metrics/answer_relevancy_metric/answer_relevancy.py +0 -0
  27. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_lib/metrics/bias_metric/bias.py +0 -0
  28. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_lib/metrics/contextual_precision_metric/contextual_precision.py +0 -0
  29. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_lib/metrics/contextual_recall_metric/contextual_recall.py +0 -0
  30. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_lib/metrics/contextual_relevancy_metric/contextual_relevancy.py +0 -0
  31. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_lib/metrics/custom_metric/custom_eval.py +0 -0
  32. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_lib/metrics/faithfulness_metric/faithfulness.py +0 -0
  33. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_lib/metrics/geval/geval.py +0 -0
  34. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_lib/metrics/restricted_refusal_metric/restricted_refusal.py +0 -0
  35. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_lib/metrics/toxicity_metric/toxicity.py +0 -0
  36. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_lib/price.py +0 -0
  37. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_lib/py.typed +0 -0
  38. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_lib/testcases_schema.py +0 -0
  39. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/eval_lib/utils.py +0 -0
  40. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/setup.cfg +0 -0
  41. {eval_ai_library-0.1.0 → eval_ai_library-0.2.1}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: eval-ai-library
3
- Version: 0.1.0
3
+ Version: 0.2.1
4
4
  Summary: Comprehensive AI Model Evaluation Framework with support for multiple LLM providers
5
5
  Author-email: Aleksandr Meshkov <alekslynx90@gmail.com>
6
6
  License: MIT
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: eval-ai-library
3
- Version: 0.1.0
3
+ Version: 0.2.1
4
4
  Summary: Comprehensive AI Model Evaluation Framework with support for multiple LLM providers
5
5
  Author-email: Aleksandr Meshkov <alekslynx90@gmail.com>
6
6
  License: MIT
@@ -10,7 +10,7 @@ __author__ = "Aleksandr Meskov"
10
10
 
11
11
  # Core evaluation functions
12
12
  from eval_lib.evaluate import evaluate, evaluate_conversations
13
- from eval_lib.utils import score_agg, softmax_agg
13
+ from eval_lib.utils import score_agg
14
14
 
15
15
  # Test case schemas
16
16
  from eval_lib.testcases_schema import (
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "eval-ai-library"
7
- version = "0.1.0"
7
+ version = "0.2.1"
8
8
  description = "Comprehensive AI Model Evaluation Framework with support for multiple LLM providers"
9
9
  readme = "README.md"
10
10
  authors = [
File without changes