renard-pipeline 0.4.2__py3-none-any.whl → 0.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of renard-pipeline might be problematic. Click here for more details.

@@ -1,49 +1,78 @@
1
- from typing import Dict, Any, List, Optional, Set, Union, Literal
2
- import itertools
3
- import torch
1
+ from typing import Dict, Any, Set, Union, Literal, List, Tuple
2
+ from more_itertools import windowed
4
3
  import nltk
5
- from transformers.tokenization_utils_base import BatchEncoding
6
- from more_itertools.recipes import flatten
4
+ from nltk.data import load
5
+ from nltk.tokenize.destructive import NLTKWordTokenizer
7
6
  from renard.pipeline.core import PipelineStep
8
- from renard.pipeline.progress import ProgressReporter
9
7
  from renard.nltk_utils import NLTK_ISO_STRING_TO_LANG
10
8
 
11
9
 
10
+ def make_char2token(text: str, token2chars: List[Tuple[int, int]]) -> List[int]:
11
+ if len(token2chars) == 0:
12
+ return []
13
+
14
+ c2t = [None] * len(text)
15
+ for token_i, chars in enumerate(token2chars):
16
+ for char_i in range(*chars):
17
+ c2t[char_i] = token_i # type: ignore
18
+
19
+ for char_i in range(0, token2chars[0][0]):
20
+ c2t[char_i] = 0 # type: ignore
21
+ for chars1, chars2 in windowed(token2chars, 2):
22
+ if chars1 is None or chars2 is None:
23
+ continue
24
+ end1 = chars1[1]
25
+ start2 = chars2[0]
26
+ for char_i in range(end1, start2):
27
+ c2t[char_i] = c2t[end1 - 1]
28
+ for char_i in range(token2chars[-1][1], len(c2t)):
29
+ c2t[char_i] = token2chars[-1][1] # type: ignore
30
+
31
+ assert all([not i is None for i in c2t])
32
+ return c2t # type: ignore
33
+
34
+
12
35
  class NLTKTokenizer(PipelineStep):
13
- """Construct a nltk word tokenizer"""
36
+ """A NLTK-based tokenizer"""
14
37
 
15
38
  def __init__(self):
16
39
  nltk.download("punkt", quiet=True)
40
+ self.word_tokenizer = None
41
+ self.sent_tokenizer = None
17
42
  super().__init__()
18
43
 
19
- def __call__(
20
- self, text: str, chapters: Optional[List[str]] = None, **kwargs
21
- ) -> Dict[str, Any]:
22
- """
23
- :param text:
24
- """
25
- if not chapters is None:
26
- out_dicts = [self.__call__(chapter, None) for chapter in chapters]
27
- return {
28
- "tokens": list(itertools.chain(*[d["tokens"] for d in out_dicts])),
29
- "sentences": list(
30
- itertools.chain(*[d["sentences"] for d in out_dicts])
31
- ),
32
- "chapter_tokens": [d["tokens"] for d in out_dicts],
33
- }
34
-
35
- sentences = nltk.sent_tokenize(
36
- text, language=NLTK_ISO_STRING_TO_LANG[self.lang]
37
- )
44
+ def _pipeline_init_(self, lang: str, **kwargs):
45
+ assert lang in NLTK_ISO_STRING_TO_LANG
46
+ nltk_lang = NLTK_ISO_STRING_TO_LANG[lang]
47
+ self.word_tokenizer = NLTKWordTokenizer()
48
+ self.sent_tokenizer = load(f"tokenizers/punkt/{nltk_lang}.pickle")
49
+ super()._pipeline_init_(lang, **kwargs)
50
+
51
+ def __call__(self, text: str, **kwargs) -> Dict[str, Any]:
52
+ assert not self.word_tokenizer is None
53
+ assert not self.sent_tokenizer is None
54
+
55
+ sent_indices = self.sent_tokenizer.span_tokenize(text)
38
56
 
39
57
  tokens = []
58
+ token2chars = []
40
59
  tokenized_sentences = []
41
- for sent in sentences:
42
- sent_tokens = nltk.word_tokenize(sent)
60
+ for sent_start, sent_end in sent_indices:
61
+ sent = text[sent_start:sent_end]
62
+ sent_tokens_indices = list(self.word_tokenizer.span_tokenize(sent))
63
+ token2chars += [
64
+ (start + sent_start, end + sent_start)
65
+ for start, end in sent_tokens_indices
66
+ ]
67
+ sent_tokens = [sent[start:end] for start, end in sent_tokens_indices]
43
68
  tokenized_sentences.append(sent_tokens)
44
69
  tokens += sent_tokens
45
70
 
46
- return {"tokens": tokens, "sentences": tokenized_sentences}
71
+ return {
72
+ "tokens": tokens,
73
+ "char2token": make_char2token(text, token2chars),
74
+ "sentences": tokenized_sentences,
75
+ }
47
76
 
48
77
  def supported_langs(self) -> Union[Set[str], Literal["any"]]:
49
78
  return set(NLTK_ISO_STRING_TO_LANG.keys())
@@ -52,4 +81,4 @@ class NLTKTokenizer(PipelineStep):
52
81
  return {"text"}
53
82
 
54
83
  def production(self) -> Set[str]:
55
- return {"tokens", "chapter_tokens", "sentences"}
84
+ return {"tokens", "char2token", "sentences"}
renard/plot_utils.py CHANGED
@@ -15,53 +15,73 @@ CharactersGraphLayout = Union[
15
15
 
16
16
 
17
17
  def layout_nx_graph_reasonably(G: nx.Graph) -> Dict[Any, np.ndarray]:
18
- return nx.spring_layout(G, k=2 / math.sqrt(len(G.nodes))) # type: ignore
18
+ return nx.spring_layout(G, k=min(1.5, 8 / math.sqrt(len(G.nodes)))) # type: ignore
19
19
 
20
20
 
21
- def plot_nx_graph_reasonably(G: nx.Graph, ax=None, layout: Optional[dict] = None):
21
+ def plot_nx_graph_reasonably(
22
+ G: nx.Graph,
23
+ ax=None,
24
+ layout: Optional[dict] = None,
25
+ node_kwargs: Optional[Dict[str, Any]] = None,
26
+ edge_kwargs: Optional[Dict[str, Any]] = None,
27
+ label_kwargs: Optional[Dict[str, Any]] = None,
28
+ legend: bool = False,
29
+ ):
22
30
  """Try to plot a :class:`nx.Graph` with 'reasonable' parameters
23
31
 
24
32
  :param G: the graph to draw
25
33
  :param ax: matplotlib axes
26
34
  :param layout: if given, this graph layout will be applied.
27
35
  Otherwise, use :func:`layout_nx_graph_reasonably`.
36
+ :param node_kwargs: passed to :func:`nx.draw_networkx_nodes`
37
+ :param edge_kwargs: passed to :func:`nx.draw_networkx_nodes`
38
+ :param label_kwargs: passed to :func:`nx.draw_networkx_labels`
39
+ :param legend: if ``True``, will try to plot an additional legend.
28
40
  """
29
41
  pos = layout
30
42
  if pos is None:
31
43
  pos = layout_nx_graph_reasonably(G)
32
44
 
33
- nx.draw_networkx_nodes(
34
- G,
35
- pos,
36
- node_color=[degree for _, degree in G.degree], # type: ignore
37
- cmap=plt.get_cmap("winter_r"),
38
- node_size=[1 + degree * 10 for _, degree in G.degree], # type: ignore
39
- ax=ax,
45
+ node_kwargs = node_kwargs or {}
46
+ node_kwargs["node_color"] = node_kwargs.get(
47
+ "node_color", [degree for _, degree in G.degree]
40
48
  )
49
+ node_kwargs["cmap"] = node_kwargs.get("cmap", "viridis")
50
+ node_kwargs["node_size"] = node_kwargs.get(
51
+ "node_size", [1 + degree * 10 for _, degree in G.degree]
52
+ )
53
+ scatter = nx.draw_networkx_nodes(G, pos, ax=ax, **node_kwargs)
54
+ if legend:
55
+ if ax:
56
+ ax.legend(*scatter.legend_elements("sizes"))
57
+ else:
58
+ plt.legend(*scatter.legend_elements("sizes"))
41
59
 
60
+ edge_kwargs = edge_kwargs or {}
42
61
  edges_attrs = graph_edges_attributes(G)
43
- if "polarity" in edges_attrs:
62
+ if (
63
+ not "edge_color" in edge_kwargs
64
+ and not "edge_cmap" in edge_kwargs
65
+ and "polarity" in edges_attrs
66
+ ):
44
67
  # we draw the polarity of interactions if the 'polarity'
45
68
  # attribute is present in the graph
46
69
  polarities = [d.get("polarity", 0) for *_, d in G.edges.data()] # type: ignore
47
- edge_color = ["g" if p > 0 else "r" for p in polarities]
48
- edge_cmap = None
49
-
70
+ edge_kwargs["edge_color"] = ["g" if p > 0 else "r" for p in polarities]
71
+ edge_kwargs["edge_cmap"] = None
50
72
  else:
51
- edge_color = [math.log(d["weight"]) for *_, d in G.edges.data()]
52
- edge_cmap = plt.get_cmap("winter_r")
53
- nx.draw_networkx_edges(
54
- G,
55
- pos,
56
- edge_color=edge_color,
57
- edge_cmap=edge_cmap,
58
- edge_vmax=1,
59
- edge_vmin=-1,
60
- width=[1 + math.log(d["weight"]) for _, _, d in G.edges.data()], # type: ignore
61
- alpha=0.35,
62
- ax=ax,
73
+ edge_kwargs["edge_color"] = edge_kwargs.get(
74
+ "edge_color", [math.log(d.get("weight", 1)) for *_, d in G.edges.data()]
75
+ )
76
+ edge_kwargs["edge_cmap"] = edge_kwargs.get("edge_cmap", plt.get_cmap("viridis"))
77
+ edge_kwargs["width"] = edge_kwargs.get(
78
+ "width", [1 + math.log(d.get("weight", 1)) for _, _, d in G.edges.data()]
63
79
  )
80
+ edge_kwargs["alpha"] = edge_kwargs.get("alpha", 0.35)
81
+ nx.draw_networkx_edges(G, pos, ax=ax, **edge_kwargs)
64
82
 
65
- nx.draw_networkx_labels(
66
- G, pos=pos, ax=ax, verticalalignment="top", font_size=8, alpha=0.75
67
- )
83
+ label_kwargs = label_kwargs or {}
84
+ label_kwargs["verticalalignment"] = label_kwargs.get("verticalalignment", "top")
85
+ label_kwargs["font_size"] = label_kwargs.get("font_size", 8)
86
+ label_kwargs["alpha"] = label_kwargs.get("alpha", 0.75)
87
+ nx.draw_networkx_labels(G, pos=pos, ax=ax, **label_kwargs)
@@ -0,0 +1 @@
1
+ from renard.resources.determiners.determiners import *
@@ -0,0 +1,41 @@
1
+ singular_determiners = {
2
+ "eng": {
3
+ "a",
4
+ "some",
5
+ "the",
6
+ "his",
7
+ "her",
8
+ "my",
9
+ "their",
10
+ "this",
11
+ "that",
12
+ "its",
13
+ "our",
14
+ "your",
15
+ "such",
16
+ },
17
+ "fra": {
18
+ "le",
19
+ "la",
20
+ "les",
21
+ "un",
22
+ "une",
23
+ "du",
24
+ "de",
25
+ "de la",
26
+ "ce",
27
+ "cette",
28
+ "mon",
29
+ "ma",
30
+ "ton",
31
+ "ta",
32
+ "son",
33
+ "sa",
34
+ "notre",
35
+ "votre",
36
+ "leur",
37
+ "au",
38
+ "à",
39
+ "l '",
40
+ },
41
+ }
@@ -1,6 +1,6 @@
1
1
  from typing import Dict, List, Set, Tuple
2
2
  from collections import defaultdict
3
- import os
3
+ import os, sys
4
4
 
5
5
  script_dir = os.path.dirname(os.path.abspath(__file__))
6
6
 
@@ -24,7 +24,8 @@ class HypocorismGazetteer:
24
24
  """
25
25
  if not lang in HypocorismGazetteer.supported_langs:
26
26
  print(
27
- f"[warning] {lang} not supported by {type(self)} (supported languages: {HypocorismGazetteer.supported_langs})"
27
+ f"[warning] {lang} not supported by {type(self)} (supported languages: {HypocorismGazetteer.supported_langs})",
28
+ file=sys.stderr,
28
29
  )
29
30
 
30
31
  self.name_to_nicknames = defaultdict(set)
renard/utils.py CHANGED
@@ -1,4 +1,5 @@
1
- from typing import List, Tuple, TypeVar, Collection, Iterable, cast
1
+ from typing import List, Literal, Tuple, TypeVar, Collection, Iterable, cast, Union
2
+ import sys
2
3
  from more_itertools.more import windowed
3
4
  import torch
4
5
 
@@ -76,3 +77,58 @@ def search_pattern(seq: Iterable[R], pattern: List[R]) -> List[int]:
76
77
  if list(subseq) == pattern:
77
78
  start_indices.append(subseq_i)
78
79
  return start_indices
80
+
81
+
82
+ #: A `BlockBounds` delimits blocks in either raw text ("characters") or
83
+ #: tokenized text ("tokens"). It has the following form:
84
+ #:
85
+ #: ([(block start, block end), ...], unit)
86
+ #:
87
+ #: see :func:`block_indices` to easily create `BlockBounds`
88
+ BlockBounds = Tuple[List[Tuple[int, int]], Literal["characters", "tokens"]]
89
+
90
+
91
+ def block_bounds(blocks: Union[List[str], List[List[str]]]) -> BlockBounds:
92
+ """Return the boundaries of a series of blocks.
93
+
94
+ :param blocks: either a list of raw texts or a list of tokenized
95
+ texts.
96
+
97
+ :return: A `BlockBounds` with the correct unit.
98
+ """
99
+ if len(blocks) == 0:
100
+ print("[warning] computing block bounds on 0 blocks.", file=sys.stderr)
101
+ return ([], ("characters"))
102
+
103
+ if isinstance(blocks[0], str):
104
+ unit = "characters"
105
+ elif isinstance(blocks[0], list):
106
+ unit = "tokens"
107
+ else:
108
+ raise ValueError(blocks)
109
+
110
+ indices = []
111
+ start = 0
112
+ for block in blocks:
113
+ end = start + len(block)
114
+ indices.append((start, end))
115
+ start = end
116
+
117
+ return (indices, unit)
118
+
119
+
120
+ def charbb2tokenbb(char_bb: BlockBounds, char2token: List[int]) -> BlockBounds:
121
+ """Convert a `BlockBounds` in characters to a `BlockBounds` in
122
+ tokens.
123
+
124
+ :param char_bb: block bounds, in 'characters'.
125
+ :param char2token: a list with ``char2token[i]`` being the index
126
+ of token corresponding to character ``i``.
127
+
128
+ :return: a `BlockBounds`, in 'tokens'.
129
+ """
130
+ assert char_bb[1] == "characters"
131
+ tokens_blocks = []
132
+ for char_block_start, char_block_end in char_bb[0]:
133
+ tokens_blocks.append((char2token[char_block_start], char2token[char_block_end]))
134
+ return (tokens_blocks, "tokens")
@@ -1,46 +1,49 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: renard-pipeline
3
- Version: 0.4.2
3
+ Version: 0.6.0
4
4
  Summary: Relationships Extraction from NARrative Documents
5
5
  Home-page: https://github.com/CompNet/Renard
6
6
  License: GPL-3.0-only
7
7
  Author: Arthur Amalvy
8
8
  Author-email: arthur.amalvy@univ-avignon.fr
9
- Requires-Python: >=3.8,<3.11
9
+ Requires-Python: >=3.8,<3.12
10
10
  Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3)
11
11
  Classifier: Programming Language :: Python :: 3
12
12
  Classifier: Programming Language :: Python :: 3.8
13
13
  Classifier: Programming Language :: Python :: 3.9
14
14
  Classifier: Programming Language :: Python :: 3.10
15
+ Classifier: Programming Language :: Python :: 3.11
15
16
  Provides-Extra: spacy
16
17
  Provides-Extra: stanza
17
- Requires-Dist: coreferee (>=1.4.0,<2.0.0) ; extra == "spacy"
18
- Requires-Dist: datasets (>=2.16.1,<3.0.0)
19
- Requires-Dist: grimbert (>=0.1.0,<0.2.0)
20
- Requires-Dist: matplotlib (>=3.5.3,<4.0.0)
21
- Requires-Dist: more-itertools (>=10.1.0,<11.0.0)
22
- Requires-Dist: nameparser (>=1.1.0,<2.0.0)
23
- Requires-Dist: networkx (>=2.6.3,<3.0.0)
24
- Requires-Dist: nltk (>=3.6.5,<4.0.0)
25
- Requires-Dist: pandas (>=2.0.0,<3.0.0)
26
- Requires-Dist: pytest (>=7.2.1,<8.0.0)
27
- Requires-Dist: seqeval (==1.2.2)
28
- Requires-Dist: spacy (>=3.5.0,<4.0.0) ; extra == "spacy"
29
- Requires-Dist: spacy-transformers (>=1.2.1,<2.0.0) ; extra == "spacy"
30
- Requires-Dist: stanza (>=1.3.0,<2.0.0) ; extra == "stanza"
31
- Requires-Dist: tibert (>=0.3.0,<0.4.0)
18
+ Requires-Dist: coreferee (>=1.4,<2.0) ; extra == "spacy"
19
+ Requires-Dist: datasets (>=3.0,<4.0)
20
+ Requires-Dist: grimbert (>=0.1,<0.2)
21
+ Requires-Dist: matplotlib (>=3.5,<4.0)
22
+ Requires-Dist: more-itertools (>=10.5,<11.0)
23
+ Requires-Dist: nameparser (>=1.1,<2.0)
24
+ Requires-Dist: networkx (>=3.0,<4.0)
25
+ Requires-Dist: nltk (>=3.9,<4.0)
26
+ Requires-Dist: pandas (>=2.0,<3.0)
27
+ Requires-Dist: pytest (>=8.3.0,<9.0.0)
28
+ Requires-Dist: rank-bm25 (>=0.2.2,<0.3.0)
29
+ Requires-Dist: spacy (>=3.5,<4.0) ; extra == "spacy"
30
+ Requires-Dist: spacy-transformers (>=1.3,<2.0) ; extra == "spacy"
31
+ Requires-Dist: stanza (>=1.3,<2.0) ; extra == "stanza"
32
+ Requires-Dist: tibert (>=0.5,<0.6)
32
33
  Requires-Dist: torch (>=2.0.0,!=2.0.1)
33
34
  Requires-Dist: tqdm (>=4.62.3,<5.0.0)
34
- Requires-Dist: transformers (>=4.36.0,<5.0.0)
35
+ Requires-Dist: transformers (>=4.36,<5.0)
35
36
  Project-URL: Documentation, https://compnet.github.io/Renard/
36
37
  Project-URL: Repository, https://github.com/CompNet/Renard
37
38
  Description-Content-Type: text/markdown
38
39
 
39
40
  # Renard
40
41
 
41
- Renard (Relationships Extraction from NARrative Documents) is a library for creating and using custom character networks extraction pipelines. Renard can extract dynamic as well as static character networks.
42
+ [![DOI](https://joss.theoj.org/papers/10.21105/joss.06574/status.svg)](https://doi.org/10.21105/joss.06574)
42
43
 
43
- ![Character network extracted from "Pride and Prejudice"](./docs/pp_white_bg.svg)
44
+ Renard (Relationship Extraction from NARrative Documents) is a library for creating and using custom character networks extraction pipelines. Renard can extract dynamic as well as static character networks.
45
+
46
+ ![The Renard logo](./docs/renard.svg)
44
47
 
45
48
 
46
49
  # Installation
@@ -102,3 +105,25 @@ Expensive tests are disabled by default. These can be run by setting the environ
102
105
 
103
106
  see [the "Contributing" section of the documentation](https://compnet.github.io/Renard/contributing.html).
104
107
 
108
+
109
+ # How to cite
110
+
111
+ If you use Renard in your research project, please cite it as follows:
112
+
113
+ ```bibtex
114
+ @Article{Amalvy2024,
115
+ doi = {10.21105/joss.06574},
116
+ year = {2024},
117
+ publisher = {The Open Journal},
118
+ volume = {9},
119
+ number = {98},
120
+ pages = {6574},
121
+ author = {Amalvy, A. and Labatut, V. and Dufour, R.},
122
+ title = {Renard: A Modular Pipeline for Extracting Character
123
+ Networks from Narrative Texts},
124
+ journal = {Journal of Open Source Software},
125
+ }
126
+ ```
127
+
128
+ We would be happy to hear about your usage of Renard, so don't hesitate to reach out!
129
+
@@ -0,0 +1,39 @@
1
+ renard/gender.py,sha256=HDtJQKOqIkV8F-Mxva95XFXWJoKRKckQ3fc93OBM6sw,102
2
+ renard/graph_utils.py,sha256=EV0_56KtI3VOElCu7wxd2kL8QVPsOu7itE6wGJAJsNA,6073
3
+ renard/ner_utils.py,sha256=SFZoyJM6c2avE7-NDkCSzkx-O8ppzS00a8EyHt64iGI,11628
4
+ renard/nltk_utils.py,sha256=mUJiwMrEDZV4Fla7WuMR-hA_OC2ZIwSXgW_0Ew18VSo,977
5
+ renard/pipeline/__init__.py,sha256=8Yim2mmny8YGvM7N5-na5zK-C9UDxUb77K9ml-VirUA,35
6
+ renard/pipeline/character_unification.py,sha256=SsMaBHfGgRAvZyYbVcm6pxnIqHqD_JyQndGvwSjsGCc,17074
7
+ renard/pipeline/characters_extraction.py,sha256=bMic8dtlYKUmAlTzQqDPraYy5VsGWoGkho35mA8w3_Y,396
8
+ renard/pipeline/core.py,sha256=LILUIQZp9f3FzqjBocUS7dKzX7lHQQVdL29jyqU1UeY,27754
9
+ renard/pipeline/corefs/__init__.py,sha256=9c9AaXBcRrDBf1jhTtJ7DyjOJhX_Zej3FjlcGak7MK8,44
10
+ renard/pipeline/corefs/corefs.py,sha256=d47Sd8ekwhQQV6rQ0F9QyAX2GOTqUnkDUA-eKgMtMS4,11417
11
+ renard/pipeline/graph_extraction.py,sha256=Ga3wfUW9tDtatcTv2taLrNky9jz2wUwZ8uzoXJoSVk8,22928
12
+ renard/pipeline/ner/__init__.py,sha256=Dqxcf_EKhK1UwiCscZ3gGHInlcxJyvpR4o-ZCLEyV48,38
13
+ renard/pipeline/ner/ner.py,sha256=8zUtaqaGNirfGFRyMpDzdqtO3abrRLyLtjmwnqBNwUI,9893
14
+ renard/pipeline/ner/retrieval.py,sha256=JIU3fi0Q1gl_YGP6kYx6zC9xz4UN6gnqdVuzWVXzzyM,12853
15
+ renard/pipeline/preconfigured.py,sha256=j4-0OUZrmtC8rQfwGWEAAGNxc8-4hlY7N823Uami5lk,5392
16
+ renard/pipeline/preprocessing.py,sha256=OsdsYzmRweAiQV_CtP7uiz--OGogZtQlsdR8XX5DCk0,952
17
+ renard/pipeline/progress.py,sha256=PJ174ssaqr5qHaTrVQ8HqJtvpvX6QhtHM5PHT893_Xk,2689
18
+ renard/pipeline/quote_detection.py,sha256=FyldJhynIT843fB7rwVtHmDZJqTKkjGml6qTLjsIhMA,2045
19
+ renard/pipeline/sentiment_analysis.py,sha256=76MPin4L1-vSswJe5yGrbCSSDim1LYxSEgNj_BdQDvk,1464
20
+ renard/pipeline/speaker_attribution.py,sha256=Uts6JdUo_sbWyIb2AJ6SO5JuUbgROIpcbUNTg4dHo4U,4329
21
+ renard/pipeline/stanford_corenlp.py,sha256=14b6Ee6oPz1EL-bNRT688aNxVTk_Jwa_vJ20FiBODC4,8189
22
+ renard/pipeline/tokenization.py,sha256=BzLBG_QndbLLf2VtZtkIsFSbB0whvgrI4_hzVw_jxZY,2910
23
+ renard/plot_utils.py,sha256=qsQI-wbk_5KCXDvt1tPerq4UW4VWLrJpoCet4qkONwE,3344
24
+ renard/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
25
+ renard/resources/determiners/__init__.py,sha256=dAcx2hWb_aAd5Rv9rif7CQOvjKcSdIY_mCXJBQQtw60,55
26
+ renard/resources/determiners/determiners.py,sha256=lQ5XGmKWK8h6dcBp0tB2TcEJbkQ9KCHkACJ_gqWjexU,594
27
+ renard/resources/hypocorisms/__init__.py,sha256=vlsY9PqxQCIpijxm79Y0KYh2c0S4S1pgrC9w-AUQGvE,55
28
+ renard/resources/hypocorisms/datas/License.txt,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
29
+ renard/resources/hypocorisms/datas/hypocorisms.csv,sha256=CKTo7A5i14NzN6JRBz7U2NJnxrEo8VOlmmdhzEZnqlI,21470
30
+ renard/resources/hypocorisms/hypocorisms.py,sha256=rFFKKr-rEsd5wbz_SYjadrgKdEWwxMwVR1NQu_wcPqI,2887
31
+ renard/resources/pronouns/__init__.py,sha256=62h0zuXp8kCToTLTyg8D8rJ-MXQpT8Vyc6mljcD1RGU,49
32
+ renard/resources/pronouns/pronouns.py,sha256=YJ8hM6H8QHrF2Xx6O5blqc-Sqe1D1YFL0sRdqO_rroE,817
33
+ renard/resources/titles/__init__.py,sha256=Jcg4B7stsWiAaXbFgNl_L3ICtCQmFe9bo3YjdkVL50w,45
34
+ renard/resources/titles/titles.py,sha256=GsFccVJuTkgDWiAqWZpFd2R9pGvFKQZBOk4RWWuWDkw,968
35
+ renard/utils.py,sha256=WL6djr3iu5Kzo2Jq6qDllHXgvZcEnmqBxPkQf1drq7c,4072
36
+ renard_pipeline-0.6.0.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
37
+ renard_pipeline-0.6.0.dist-info/METADATA,sha256=e-pQfaGqbCfJ0ObiQBYhMY1WMwXCFm69t6Q31AwC_DA,4381
38
+ renard_pipeline-0.6.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
39
+ renard_pipeline-0.6.0.dist-info/RECORD,,
@@ -1,35 +0,0 @@
1
- renard/gender.py,sha256=HDtJQKOqIkV8F-Mxva95XFXWJoKRKckQ3fc93OBM6sw,102
2
- renard/graph_utils.py,sha256=5jwky9JgJ-WMVHfeaiXkAAQwEfhR2BFSrWhck1Qmpgo,5812
3
- renard/ner_utils.py,sha256=jN1AQkaV0Kx-Bc0oc3SYBEmSUuKPBbzXqByOlaqH62k,11263
4
- renard/nltk_utils.py,sha256=mUJiwMrEDZV4Fla7WuMR-hA_OC2ZIwSXgW_0Ew18VSo,977
5
- renard/pipeline/__init__.py,sha256=8Yim2mmny8YGvM7N5-na5zK-C9UDxUb77K9ml-VirUA,35
6
- renard/pipeline/character_unification.py,sha256=GJvPKw2zSMi0RpLLVlKsu7ewpxkrdxytND9PLxolbP4,15252
7
- renard/pipeline/characters_extraction.py,sha256=NzF8H9X19diW6rqwS5ERrRku7rFueO3S077H5C6kb7I,363
8
- renard/pipeline/core.py,sha256=luKNUTCDtZfwKzxVIaImyIMwFFvIknfT1LdQtongj24,22570
9
- renard/pipeline/corefs/__init__.py,sha256=9c9AaXBcRrDBf1jhTtJ7DyjOJhX_Zej3FjlcGak7MK8,44
10
- renard/pipeline/corefs/corefs.py,sha256=nzYT6S9ify3FlgGB3FSDpAhs2UQYgW9c3CL2GRYzTms,11508
11
- renard/pipeline/graph_extraction.py,sha256=n0T_nzNGiwE9bDubpPknHe7bbDhJ4ndnqmoMmyfbeWg,19468
12
- renard/pipeline/ner.py,sha256=5zqZlEjhO__0iuRQAN9rvhCbcd9QmNCcH9_NP_BaTbc,11261
13
- renard/pipeline/preconfigured.py,sha256=j4-0OUZrmtC8rQfwGWEAAGNxc8-4hlY7N823Uami5lk,5392
14
- renard/pipeline/preprocessing.py,sha256=OsdsYzmRweAiQV_CtP7uiz--OGogZtQlsdR8XX5DCk0,952
15
- renard/pipeline/progress.py,sha256=VQsIxTuz0QQnepXPevHhMU-dHXMa1RWsjmMfBgoWdiY,1684
16
- renard/pipeline/quote_detection.py,sha256=FyldJhynIT843fB7rwVtHmDZJqTKkjGml6qTLjsIhMA,2045
17
- renard/pipeline/sentiment_analysis.py,sha256=76MPin4L1-vSswJe5yGrbCSSDim1LYxSEgNj_BdQDvk,1464
18
- renard/pipeline/speaker_attribution.py,sha256=qCY-Z1haDDgZy8L4k8pAc6xIcSFmtcuuESu631QxRUY,4366
19
- renard/pipeline/stanford_corenlp.py,sha256=14b6Ee6oPz1EL-bNRT688aNxVTk_Jwa_vJ20FiBODC4,8189
20
- renard/pipeline/tokenization.py,sha256=RllOxSjaV_Sdu3CH8vKIbceNj3Noeey31mKircxWoyM,1806
21
- renard/plot_utils.py,sha256=bmIBybleFJ-YiVPLPPWYW8x1UHpkuXTE7O9lQlRiWrk,2133
22
- renard/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
23
- renard/resources/hypocorisms/__init__.py,sha256=vlsY9PqxQCIpijxm79Y0KYh2c0S4S1pgrC9w-AUQGvE,55
24
- renard/resources/hypocorisms/datas/License.txt,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
25
- renard/resources/hypocorisms/datas/hypocorisms.csv,sha256=CKTo7A5i14NzN6JRBz7U2NJnxrEo8VOlmmdhzEZnqlI,21470
26
- renard/resources/hypocorisms/hypocorisms.py,sha256=vgqdKTpOvz6pqWeCpc25FirLy4yFsYdFSyf_nNgmfQw,2848
27
- renard/resources/pronouns/__init__.py,sha256=62h0zuXp8kCToTLTyg8D8rJ-MXQpT8Vyc6mljcD1RGU,49
28
- renard/resources/pronouns/pronouns.py,sha256=YJ8hM6H8QHrF2Xx6O5blqc-Sqe1D1YFL0sRdqO_rroE,817
29
- renard/resources/titles/__init__.py,sha256=Jcg4B7stsWiAaXbFgNl_L3ICtCQmFe9bo3YjdkVL50w,45
30
- renard/resources/titles/titles.py,sha256=GsFccVJuTkgDWiAqWZpFd2R9pGvFKQZBOk4RWWuWDkw,968
31
- renard/utils.py,sha256=8J3swFqSi4YqhgYNXvttJ0s-DmJbl_yEYri6JpGEWH8,2340
32
- renard_pipeline-0.4.2.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
33
- renard_pipeline-0.4.2.dist-info/METADATA,sha256=R1ZbG6Mdk1B5Zk73QSKB-lZu7rDnvWKe3M5JiDqPFxM,3697
34
- renard_pipeline-0.4.2.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
35
- renard_pipeline-0.4.2.dist-info/RECORD,,