contentintelpy 0.1.6__py3-none-any.whl → 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,17 +1,8 @@
1
- import warnings
2
- import os
3
- import sys
4
-
5
- # Suppress HuggingFace/Transformers deprecation warnings globally
6
- warnings.filterwarnings("ignore", category=FutureWarning)
7
- warnings.filterwarnings("ignore", message=".*TRANSFORMERS_CACHE.*")
8
- os.environ["TRANSFORMERS_NO_ADVISORY_WARNINGS"] = "1"
9
- os.environ["HF_HUB_DISABLE_SYMLINKS_WARNING"] = "1"
10
-
11
- import importlib
12
1
  from .pipeline.pipeline import Pipeline
13
2
  from .pipeline.context import PipelineContext
14
3
  from .pipeline.base_node import Node
4
+ import importlib
5
+ import sys
15
6
 
16
7
  def _show_welcome_hint():
17
8
  """Shows a concise setup hint if core optional dependencies are missing."""
@@ -43,6 +34,7 @@ from .services.sentiment_service import SentimentService
43
34
  from .services.translation_service import TranslationService
44
35
  from .services.ner_service import NERService
45
36
  from .services.summarization_service import SummarizationService
37
+ from .utils.logging import suppress_all_logs
46
38
 
47
39
  def create_default_pipeline() -> Pipeline:
48
40
  """
@@ -82,5 +74,6 @@ __all__ = [
82
74
  "SentimentService",
83
75
  "TranslationService",
84
76
  "NERService",
85
- "SummarizationService"
77
+ "SummarizationService",
78
+ "suppress_all_logs"
86
79
  ]
@@ -1,9 +1,6 @@
1
1
  from ..pipeline.base_node import Node
2
2
  from ..pipeline.context import PipelineContext
3
3
  from ..utils.model_registry import registry
4
- import logging
5
-
6
- logger = logging.getLogger("contentintelpy.nodes.classification")
7
4
 
8
5
  class CategoryClassificationNode(Node):
9
6
  """
@@ -24,7 +21,6 @@ class CategoryClassificationNode(Node):
24
21
  text = context.get("text_translated") or context.get("text")
25
22
 
26
23
  if not text or not isinstance(text, str):
27
- logger.warning("No text available for category classification.")
28
24
  return context
29
25
 
30
26
  try:
@@ -40,10 +36,8 @@ class CategoryClassificationNode(Node):
40
36
  context["category"] = top_label
41
37
  context["category_score"] = top_score
42
38
  context["all_categories"] = dict(zip(result['labels'], result['scores']))
43
- logger.debug(f"Classified as: {top_label} ({top_score:.2f})")
44
39
 
45
40
  except Exception as e:
46
- logger.error(f"Classification failed: {e}")
47
41
  context.add_error("CategoryClassificationNode", str(e))
48
42
 
49
43
  return context
@@ -3,11 +3,8 @@ from ..pipeline.context import PipelineContext
3
3
  from ..utils.model_registry import registry
4
4
  from ..utils.lazy_import import ensure_dependency
5
5
  import numpy as np
6
- import logging
7
6
  import itertools
8
7
 
9
- logger = logging.getLogger("contentintelpy.nodes.keywords")
10
-
11
8
  class KeywordExtractionNode(Node):
12
9
  """
13
10
  Extracts keywords using semantic embeddings (KeyBERT-style logic).
@@ -69,13 +66,8 @@ class KeywordExtractionNode(Node):
69
66
  })
70
67
 
71
68
  context["keywords"] = keywords
72
- logger.debug(f"Extracted {len(keywords)} keywords.")
73
69
 
74
70
  except Exception as e:
75
- # Fallback? Maybe just log error.
76
- # Ideally could fallback to rake-nltk but we banned it.
77
- # So we just fail softly.
78
- logger.error(f"Keyword extraction failed: {e}")
79
71
  context.add_error("KeywordExtractionNode", str(e))
80
72
 
81
73
  return context
@@ -1,9 +1,6 @@
1
1
  from ..pipeline.base_node import Node
2
2
  from ..pipeline.context import PipelineContext
3
3
  from ..utils.model_registry import registry
4
- import logging
5
-
6
- logger = logging.getLogger("contentintelpy.nodes.language")
7
4
 
8
5
  class LanguageDetectionNode(Node):
9
6
  """
@@ -16,7 +13,6 @@ class LanguageDetectionNode(Node):
16
13
  def process(self, context: PipelineContext) -> PipelineContext:
17
14
  text = context.get("text")
18
15
  if not text or not isinstance(text, str):
19
- logger.warning("No text found in context for LanguageDetectionNode.")
20
16
  context["language"] = "en" # Default to English if no text
21
17
  context["language_score"] = 0.0
22
18
  return context
@@ -36,14 +32,12 @@ class LanguageDetectionNode(Node):
36
32
 
37
33
  context["language"] = lang_code
38
34
  context["language_score"] = score
39
- logger.info(f"Detected language: {lang_code} ({score:.2f})")
40
35
  else:
41
36
  context["language"] = "unknown"
42
37
  context["language_score"] = 0.0
43
38
 
44
39
  except Exception as e:
45
40
  # Fallback if model fails
46
- logger.error(f"Language detection model error: {e}")
47
41
  context["language"] = "en" # Fallback safe default
48
42
  context["language_score"] = 0.0
49
43
  raise e # Re-raise to trigger BaseNode error logging
@@ -1,8 +1,5 @@
1
1
  from ..pipeline.base_node import Node
2
2
  from ..pipeline.context import PipelineContext
3
- import logging
4
-
5
- logger = logging.getLogger("contentintelpy.nodes.location")
6
3
 
7
4
  class LocationExtractionNode(Node):
8
5
  """
@@ -35,13 +32,10 @@ class LocationExtractionNode(Node):
35
32
  locations.append({
36
33
  "name": text,
37
34
  "type": label,
38
- # Placeholder for future geocoding extension:
39
- # "coordinates": None
40
35
  })
41
36
  seen.add(clean_text)
42
37
 
43
38
  if locations:
44
39
  context["locations"] = locations
45
- logger.debug(f"Extracted {len(locations)} unique locations.")
46
40
 
47
41
  return context
@@ -1,14 +1,11 @@
1
1
  from ..pipeline.base_node import Node
2
2
  from ..pipeline.context import PipelineContext
3
3
  from ..utils.model_registry import registry
4
- import logging
5
-
6
- logger = logging.getLogger("contentintelpy.nodes.ner")
7
4
 
8
5
  class NERNode(Node):
9
6
  """
10
7
  Named Entity Recognition using GLiNER.
11
- Extracts: Person, Organization, Location, Date, etc.
8
+ Extracts: Person, Organization, Location, City, Country, Date etc.
12
9
  """
13
10
  LABELS = ["Person", "Organization", "Location", "City", "Country", "Date"]
14
11
 
@@ -20,6 +17,7 @@ class NERNode(Node):
20
17
  text = context.get("text_translated") or context.get("text")
21
18
 
22
19
  if not text:
20
+ context.add_error("NERNode", "No input text provided for entity extraction.")
23
21
  return context
24
22
 
25
23
  try:
@@ -37,10 +35,8 @@ class NERNode(Node):
37
35
  })
38
36
 
39
37
  context["entities"] = serialized_entities
40
- logger.debug(f"Found {len(serialized_entities)} entities.")
41
38
 
42
39
  except Exception as e:
43
- logger.error(f"NER failed: {e}")
44
40
  context.add_error("NERNode", str(e))
45
41
 
46
42
  return context
@@ -1,9 +1,6 @@
1
1
  from ..pipeline.base_node import Node
2
2
  from ..pipeline.context import PipelineContext
3
3
  from ..utils.model_registry import registry
4
- import logging
5
-
6
- logger = logging.getLogger("contentintelpy.nodes.sentiment")
7
4
 
8
5
  class SentimentNode(Node):
9
6
  """
@@ -12,7 +9,6 @@ class SentimentNode(Node):
12
9
  """
13
10
 
14
11
  # Simple static mapping for target language labels (MVP approach)
15
- # In a full version, this could be dynamic or more extensive.
16
12
  LABEL_MAP = {
17
13
  "hi": {
18
14
  "positive": "सकारात्मक",
@@ -29,21 +25,23 @@ class SentimentNode(Node):
29
25
  "negative": "négatif",
30
26
  "neutral": "neutre"
31
27
  },
32
- # Add more as needed
33
28
  }
34
29
 
35
30
  def __init__(self):
36
31
  super().__init__("SentimentNode")
37
32
 
38
33
  def process(self, context: PipelineContext) -> PipelineContext:
39
- # Sentiment should run on the TRANSLATED text (English) for model accuracy
40
- # But we return labels relevant to the original language context if possible.
41
34
  text_to_analyze = context.get("text_translated") or context.get("text")
42
35
  original_lang = context.get("language", "en")
43
36
 
44
37
  if not text_to_analyze:
38
+ context.add_error("SentimentNode", "No input text provided for analysis.")
45
39
  return context
46
40
 
41
+ if len(text_to_analyze) > 512:
42
+ import warnings
43
+ warnings.warn("Input text exceeds 512 characters and will be truncated for sentiment analysis.", UserWarning)
44
+
47
45
  try:
48
46
  analyzer = registry.get_sentiment_pipeline()
49
47
  # Truncate to model max length (~512 tokens)
@@ -71,10 +69,8 @@ class SentimentNode(Node):
71
69
  "value_en": label_en,
72
70
  "confidence": round(float(score), 4)
73
71
  }
74
- logger.debug(f"Sentiment: {label_en} ({score:.2f})")
75
72
 
76
73
  except Exception as e:
77
- logger.error(f"Sentiment analysis failed: {e}")
78
74
  context.add_error("SentimentNode", str(e))
79
75
 
80
76
  return context
@@ -1,12 +1,8 @@
1
1
  from ..pipeline.base_node import Node
2
2
  from ..pipeline.context import PipelineContext
3
3
  from ..utils.model_registry import registry
4
- import logging
5
-
6
4
  from ..utils.lazy_import import ensure_dependency
7
5
 
8
- logger = logging.getLogger("contentintelpy.nodes.summarization")
9
-
10
6
  class SummarizationNode(Node):
11
7
  """
12
8
  Summarizes text.
@@ -22,7 +18,10 @@ class SummarizationNode(Node):
22
18
  text = context.get("text_translated") or context.get("text")
23
19
 
24
20
  if not text or len(text.split()) < 30:
25
- logger.debug("Text too short for summarization.")
21
+ import warnings
22
+ msg = "Text too short for summarization (minimum 30 words recommended)."
23
+ warnings.warn(msg, UserWarning)
24
+ context.add_error("SummarizationNode", msg)
26
25
  return context
27
26
 
28
27
  summary_text = None
@@ -37,7 +36,6 @@ class SummarizationNode(Node):
37
36
  if result and len(result) > 0:
38
37
  summary_text = result[0]['summary_text']
39
38
  except Exception as e:
40
- logger.warning(f"BART summarization failed: {e}. Falling back to Sumy.")
41
39
  context.add_error("SummarizationNode_BART", str(e))
42
40
 
43
41
  # 2. Fallback: Sumy (LSA)
@@ -60,7 +58,6 @@ class SummarizationNode(Node):
60
58
  summary_text = " ".join([str(s) for s in sentences])
61
59
  context["summary_method"] = "sumy_lsa"
62
60
  except Exception as e:
63
- logger.error(f"Sumy fallback failed: {e}")
64
61
  context.add_error("SummarizationNode_Sumy", str(e))
65
62
 
66
63
  if summary_text:
@@ -2,9 +2,6 @@ from ..pipeline.base_node import Node
2
2
  from ..pipeline.context import PipelineContext
3
3
  from ..utils.model_registry import registry
4
4
  from ..utils.lazy_import import ensure_dependency
5
- import logging
6
-
7
- logger = logging.getLogger("contentintelpy.nodes.translation")
8
5
 
9
6
  class TranslationNode(Node):
10
7
  """
@@ -28,7 +25,6 @@ class TranslationNode(Node):
28
25
  return context
29
26
 
30
27
  if not self.force and source_lang == self.target_lang:
31
- logger.info(f"Skipping translation: Source is already {source_lang}.")
32
28
  context["text_translated"] = original_text
33
29
  context["translation_method"] = "skipped"
34
30
  return context
@@ -36,23 +32,12 @@ class TranslationNode(Node):
36
32
  # 2. Try NLLB (High Quality)
37
33
  translated_text = None
38
34
  try:
39
- logger.info("Attempting translation with NLLB...")
40
35
  translator = registry.get_translation_pipeline()
41
- # NLLB expects specific language codes (e.g., 'hin_Deva' for Hindi)
42
- # For simplicity in this v0.1, we assume the model handles standard ISO codes or auto-detect
43
- # In a real deep implementation, we'd map ISO->NLLB codes.
44
- # Transformers pipeline handles source detection often, but target needs spec.
45
- # However, NLLB pipeline usage is slightly complex.
46
- # For v0.1.0 reliability, we wrap this in try-except carefully.
47
-
48
- # Simple direct usage for standard pipeline
49
36
  output = translator(original_text, src_lang=source_lang, tgt_lang=self.target_lang, max_length=512)
50
- # Output format: [{'translation_text': '...'}]
51
37
  if output and len(output) > 0:
52
38
  translated_text = output[0]['translation_text']
53
39
  context["translation_method"] = "nllb"
54
40
  except Exception as e:
55
- logger.warning(f"NLLB translation failed: {e}. Falling back to Argos.")
56
41
  context.add_error("TranslationNode_NLLB", str(e))
57
42
 
58
43
  # 3. Fallback: ArgosTranslate (Offline)
@@ -62,10 +47,7 @@ class TranslationNode(Node):
62
47
  import argostranslate.package
63
48
  import argostranslate.translate
64
49
 
65
- logger.info("Attempting translation with ArgosTranslate...")
66
50
  # Argos requires ensuring packages are installed
67
- # This is a blocking network call on first run if not present
68
- # Ideally, we should preload these in ModelRegistry, but Argos manages its own state
69
51
  argostranslate.package.update_package_index()
70
52
  available_packages = argostranslate.package.get_available_packages()
71
53
  package_to_install = next(
@@ -79,7 +61,6 @@ class TranslationNode(Node):
79
61
  translated_text = argostranslate.translate.translate(original_text, source_lang, self.target_lang)
80
62
  context["translation_method"] = "argos"
81
63
  except Exception as e:
82
- logger.error(f"Argos translation failed: {e}")
83
64
  context.add_error("TranslationNode_Argos", str(e))
84
65
 
85
66
  # 4. Final Result
@@ -87,7 +68,6 @@ class TranslationNode(Node):
87
68
  context["text_translated"] = translated_text
88
69
  else:
89
70
  # Last resort: Keep original
90
- logger.warning("All translation methods failed. Keeping original text.")
91
71
  context["text_translated"] = original_text
92
72
  context["translation_method"] = "failed_copy"
93
73
 
@@ -10,21 +10,48 @@ class Pipeline:
10
10
  def __init__(self, nodes: List[Node]):
11
11
  self.nodes = nodes
12
12
 
13
- def run(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
13
+ def run(self, input_data: Dict[str, Any], verbose: bool = False) -> Dict[str, Any]:
14
14
  """
15
15
  Execute the pipeline on the input data.
16
16
 
17
17
  Args:
18
18
  input_data: Dictionary containing the input (e.g. {"text": "..."})
19
+ verbose: If True, prints a professional progress indicator to console.
19
20
 
20
21
  Returns:
21
- The final dictionary with results and any errors (sparse output).
22
+ The final dictionary with results and any errors.
22
23
  """
23
24
  # Initialize context wrapper
24
25
  context = PipelineContext(input_data)
25
26
 
27
+ if verbose:
28
+ print(f"\n🚀 [ContentIntelPy] Starting Pipeline ({len(self.nodes)} stages)")
29
+
26
30
  # distinct linear execution
27
- for node in self.nodes:
31
+ for i, node in enumerate(self.nodes, 1):
32
+ if verbose:
33
+ # Pretty print stage
34
+ # Example: [1/8] 🔍 LanguageDetectionNode ...
35
+ icon = "⚙️"
36
+ if "Language" in node.name: icon = "🌍"
37
+ elif "Translation" in node.name: icon = "🔤"
38
+ elif "NER" in node.name: icon = "👤"
39
+ elif "Sentiment" in node.name: icon = "🎭"
40
+ elif "Summarization" in node.name: icon = "📝"
41
+ elif "Keyword" in node.name: icon = "🔑"
42
+ elif "Classification" in node.name: icon = "📊"
43
+
44
+ print(f" [{i}/{len(self.nodes)}] {icon} {node.name:25}", end="... ", flush=True)
45
+
28
46
  context = node.run(context)
29
47
 
48
+ if verbose:
49
+ if context.get("errors", {}).get(node.name):
50
+ print("❌")
51
+ else:
52
+ print("✅")
53
+
54
+ if verbose:
55
+ print(f"✨ Pipeline Complete!\n")
56
+
30
57
  return context.to_dict()
@@ -0,0 +1,46 @@
1
+ import os
2
+ import warnings
3
+ import logging
4
+ import sys
5
+
6
+ def suppress_all_logs():
7
+ """
8
+ Professional log cleaner for production and demos.
9
+ Suppresses all non-critical logs from Transformers, Torch, and HuggingFace.
10
+ """
11
+ # --- Python warnings ---
12
+ warnings.filterwarnings("ignore")
13
+ warnings.filterwarnings("ignore", category=UserWarning)
14
+ warnings.filterwarnings("ignore", category=FutureWarning)
15
+
16
+ # --- Environment Variables ---
17
+ # HuggingFace & Transformers
18
+ os.environ["TRANSFORMERS_NO_ADVISORY_WARNINGS"] = "1"
19
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
20
+ os.environ["HF_HUB_DISABLE_PROGRESS_BARS"] = "1"
21
+ os.environ["HF_HUB_DISABLE_SYMLINKS_WARNING"] = "1"
22
+ os.environ["TRANSFORMERS_VERBOSITY"] = "error"
23
+ os.environ["HF_HUB_VERBOSITY"] = "error"
24
+
25
+ # PyTorch
26
+ os.environ["TORCH_CPP_LOG_LEVEL"] = "ERROR"
27
+ os.environ["TORCH_DISTRIBUTED_DEBUG"] = "OFF"
28
+
29
+ # --- Logging levels ---
30
+ logging.getLogger().setLevel(logging.ERROR)
31
+
32
+ # Specific noisy libraries
33
+ noisy_libs = [
34
+ "transformers",
35
+ "torch",
36
+ "huggingface_hub",
37
+ "sentence_transformers",
38
+ "gliner",
39
+ "stanza",
40
+ "argostranslate"
41
+ ]
42
+ for lib in noisy_libs:
43
+ logging.getLogger(lib).setLevel(logging.ERROR)
44
+
45
+ # Special case: Silence stdout/stderr for specific noisy blocks if needed
46
+ # But for a general utility, environment variables are better.
@@ -1,5 +1,4 @@
1
1
  from .lazy_import import ensure_dependency
2
- import logging
3
2
  import threading
4
3
  from typing import Any, Optional
5
4
  import warnings
@@ -8,8 +7,6 @@ import warnings
8
7
  warnings.filterwarnings("ignore", category=UserWarning)
9
8
  warnings.filterwarnings("ignore", category=FutureWarning)
10
9
 
11
- logger = logging.getLogger("contentintelpy.registry")
12
-
13
10
  class ModelRegistry:
14
11
  """
15
12
  Centralized registry for ML models.
@@ -39,11 +36,8 @@ class ModelRegistry:
39
36
  # Double-check inside lock
40
37
  if key not in self._models:
41
38
  try:
42
- logger.info(f"Loading resource '{key}'... (This may take a moment)")
43
39
  self._models[key] = loader_func()
44
- logger.info(f"Successfully loaded '{key}'.")
45
40
  except Exception as e:
46
- logger.error(f"Failed to load '{key}': {e}")
47
41
  raise e
48
42
  return self._models[key]
49
43
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: contentintelpy
3
- Version: 0.1.6
3
+ Version: 0.1.7
4
4
  Summary: Production-grade NLP library for unified content intelligence.
5
5
  Author-email: Ronit Fulari <ronitfulari31@gmail.com>
6
6
  License: MIT
@@ -0,0 +1,24 @@
1
+ contentintelpy/__init__.py,sha256=3KZp_7SPuw657SfrQbxVoF6ePoulfsNMTPDbyDlLpRg,2749
2
+ contentintelpy/nodes/classification_node.py,sha256=rJCUnylkWY_4gOCZW9heGFArLdcILiwlV9U5rY4YNSE,1817
3
+ contentintelpy/nodes/keyword_extract_node.py,sha256=9J-qgwpJuaprvTH1asSAE9oTHwZHCVBk4eB-i8NkUQg,2662
4
+ contentintelpy/nodes/language_node.py,sha256=P2IPBRzRZ4KfaJLgNp2YiFpoqtgrm_9okIeirSRPcU0,1665
5
+ contentintelpy/nodes/location_node.py,sha256=6numI-xzcUwwsBXP-ecvwT4Fwr5GO_p2n7tTSO55G6Q,1306
6
+ contentintelpy/nodes/ner_node.py,sha256=V8uwA7EAyc4R8FAxgq0CMIeAi_I2WlzjmP2s1KpVgpM,1544
7
+ contentintelpy/nodes/sentiment_node.py,sha256=31PIyQUSFOwvEi0n2UWeNW-YDlbr0wH_hRghuVkQ_Ik,2807
8
+ contentintelpy/nodes/summarization_node.py,sha256=OdY6RvrDkEc1flMTbYQlAKZ_vHOwfweoKxCS1944NJM,2725
9
+ contentintelpy/nodes/translation_node.py,sha256=4tvP8KyLlPikxz0Tj1omFx56FQq9NiZ2lCm2MQa-eXo,3161
10
+ contentintelpy/pipeline/base_node.py,sha256=hYLx2yAURpbmTr9x4kG8qVIlNI1Q0UJckBltW5LJl-o,1394
11
+ contentintelpy/pipeline/context.py,sha256=fAtlqrrVSnGVx1DqJq15WT9nLE-MsuKP81xlqxu1cAY,1424
12
+ contentintelpy/pipeline/pipeline.py,sha256=HrY_wUuwP6EHQrPbv0Swc9jrHcDC-ITPdEVBu4RIsnc,2076
13
+ contentintelpy/services/ner_service.py,sha256=7-sEAqxYRpVksd-sZ5CPgAq3HfVeeb0OaRd0YPIqzPs,737
14
+ contentintelpy/services/sentiment_service.py,sha256=Yc6u0l8m_uN5ZxgUMr9DQziwi50cMlTZuaAOS8A7pJc,1130
15
+ contentintelpy/services/summarization_service.py,sha256=XK3vAGGoQS1dXxaO4nKjyrFlWwN_wZKY2qFNcDJ9IIM,748
16
+ contentintelpy/services/translation_service.py,sha256=6yNLLJ7mAE7ptHvprX1JUoUN-65Ot7ZdTszqqxMY1TA,1191
17
+ contentintelpy/utils/lazy_import.py,sha256=izXzPtoTUfkd6ee_7QFqE-IARd5NBe2_W6-pCbS2e2o,956
18
+ contentintelpy/utils/logging.py,sha256=MfX8-0FTaNmUgME2QU4-5WjMcqwln6KsmPVS-uJ6JqU,1492
19
+ contentintelpy/utils/model_registry.py,sha256=HHpfIqH5LT6xPhtxDBGqg3gEGy7qWoXjYhOPndyWb2w,5575
20
+ contentintelpy-0.1.7.dist-info/licenses/LICENSE,sha256=lZ8hT4isGfdFVxdD7gDRnt3RJqyrkO1L5GseyN3A9hM,1092
21
+ contentintelpy-0.1.7.dist-info/METADATA,sha256=Lsx9eeTA79SX4XhdPArPY_ZTVGNzQ9Gn7yish_CGJgw,5085
22
+ contentintelpy-0.1.7.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
23
+ contentintelpy-0.1.7.dist-info/top_level.txt,sha256=sxoE-r2-frUi3qwADEiYcFFxZW5hMI1Mjw87hcGMulQ,15
24
+ contentintelpy-0.1.7.dist-info/RECORD,,
@@ -1,23 +0,0 @@
1
- contentintelpy/__init__.py,sha256=j_4VSIgI5_zMyvl_Bi-TKP6C8I6mzWVFrbn-dkhgk3Y,3012
2
- contentintelpy/nodes/classification_node.py,sha256=ivfAHdYXZU-5eVbtgxD94_TiRHcq-mJg4ukOc7KqwXU,2116
3
- contentintelpy/nodes/keyword_extract_node.py,sha256=o-OTOdkdEIYnd_ZczdAjwJkWnjzlKLa28WnEZCKy_og,3024
4
- contentintelpy/nodes/language_node.py,sha256=sKRa65kLrb1IRYGrkT82tu8LgdhIXdN5EwhUrH6pSqI,1971
5
- contentintelpy/nodes/location_node.py,sha256=U3YQ31KclWNeoyrorodBAzAEd7zLmI31Deu72Viw1M0,1579
6
- contentintelpy/nodes/ner_node.py,sha256=8DRg7NVpz8ZXcobgwYZsWkNOvaFfIj_ZEWG8wJckqus,1632
7
- contentintelpy/nodes/sentiment_node.py,sha256=LzKCjhF1Q0iI4J9vC_8WPGzeTWIu5p_1lDgoFUox8oo,3000
8
- contentintelpy/nodes/summarization_node.py,sha256=XHdT8lMN-1_fiGBGkqbblPfJgkvQTf7XIq99z7w_6Bw,2803
9
- contentintelpy/nodes/translation_node.py,sha256=zebwFqp22bo4Oi5gKyNGZKduAl_grfZTC53PKPsKVko,4511
10
- contentintelpy/pipeline/base_node.py,sha256=hYLx2yAURpbmTr9x4kG8qVIlNI1Q0UJckBltW5LJl-o,1394
11
- contentintelpy/pipeline/context.py,sha256=fAtlqrrVSnGVx1DqJq15WT9nLE-MsuKP81xlqxu1cAY,1424
12
- contentintelpy/pipeline/pipeline.py,sha256=gTgRcF34KxAJMxtac7wHdesD33q3CIP9hncvILHQ-3c,888
13
- contentintelpy/services/ner_service.py,sha256=7-sEAqxYRpVksd-sZ5CPgAq3HfVeeb0OaRd0YPIqzPs,737
14
- contentintelpy/services/sentiment_service.py,sha256=Yc6u0l8m_uN5ZxgUMr9DQziwi50cMlTZuaAOS8A7pJc,1130
15
- contentintelpy/services/summarization_service.py,sha256=XK3vAGGoQS1dXxaO4nKjyrFlWwN_wZKY2qFNcDJ9IIM,748
16
- contentintelpy/services/translation_service.py,sha256=6yNLLJ7mAE7ptHvprX1JUoUN-65Ot7ZdTszqqxMY1TA,1191
17
- contentintelpy/utils/lazy_import.py,sha256=izXzPtoTUfkd6ee_7QFqE-IARd5NBe2_W6-pCbS2e2o,956
18
- contentintelpy/utils/model_registry.py,sha256=Zk52vJy4wtN2zcYnqOtWJTgbAGMQUYj3LFqHJBiILXY,5882
19
- contentintelpy-0.1.6.dist-info/licenses/LICENSE,sha256=lZ8hT4isGfdFVxdD7gDRnt3RJqyrkO1L5GseyN3A9hM,1092
20
- contentintelpy-0.1.6.dist-info/METADATA,sha256=bEW5BVMGbrqLoKrcFDqQKf5AsKlyqG6NjABcpK31ecw,5085
21
- contentintelpy-0.1.6.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
22
- contentintelpy-0.1.6.dist-info/top_level.txt,sha256=sxoE-r2-frUi3qwADEiYcFFxZW5hMI1Mjw87hcGMulQ,15
23
- contentintelpy-0.1.6.dist-info/RECORD,,