Skip to content

Commit

Permalink
TLDR-635 retrain paragraph, diplomas classifiers
Browse files Browse the repository at this point in the history
TLDR-635 refresh code for old datasets; rewrite Paragraph_feature_extractor

TLDR-635 fix TableAnnotation

TLDR-635 delete FirstWordFeatures

TLDR-635 delete FeatureExtractors.fit(..) and fit_transform(..)

TLDR-635 move LineEpsDataSet into line_lstm_classifier_trainer.py

TLDR-635 rewrite LineWithMetaExtraction

TLDR-635 Base line classifier retrain

TLDR-635 fixed paragraph tests

TLDR-635 refactor TocFeatureExtractor

TLDR-635 retrain diploma classifier

TLDR-635 rewrite paragraph added normalization indent_prev; indent_next; indent_prev_right; cancel local normalization by page_width

TLDR-635 retrain paragraph: change calculate upperletter percent, is_capitalize

TLDR-635 retrain paragraphs: added bold features

TLDR-635 retrain paragraphs: added list features

TLDR-635 retrain paragraph classifier

TLDR-635 retrain paragraph classifier

TLDR-635 retrain paragraph classifier

TLDR-635 fixed labeling tests
  • Loading branch information
oksidgy committed Aug 9, 2024
1 parent 4921d67 commit afc5412
Show file tree
Hide file tree
Showing 55 changed files with 1,049 additions and 725 deletions.
4 changes: 3 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,10 @@ It extracts a document’s logical structure and content: tables, text formattin
The document’s content is represented as a tree storing headings and lists of any level.
Dedoc can be integrated in a document contents and structure analysis system as a separate module.

## Workflow
## Star History
[![Star History Chart](https://api.star-history.com/svg?repos=ispras/dedoc&type=Date)](https://tar-history.com/#ispras/dedoc&Date)

## Workflow
![Workflow](https://github.com/ispras/dedoc/raw/master/docs/source/_static/workflow.png)

Workflow description is given [`here`](https://dedoc.readthedocs.io/en/latest/?badge=latest#workflow)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,10 @@ class TableAnnotation(Annotation):
"""
name = "table"

def __init__(self, name: str, start: int, end: int) -> None:
def __init__(self, value: str, start: int, end: int) -> None:
"""
:param name: unique identifier of the table which is referenced inside this annotation
:param value: unique identifier of the table which is referenced inside this annotation
:param start: start of the annotated text (usually zero)
:param end: end of the annotated text (usually end of the line)
"""
super().__init__(start=start, end=end, name=TableAnnotation.name, value=name, is_mergeable=False)
super().__init__(start=start, end=end, name=TableAnnotation.name, value=value, is_mergeable=False)
3 changes: 3 additions & 0 deletions dedoc/data_structures/line_with_meta.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,9 @@ def uid(self) -> str:
def set_line(self, line: str) -> None:
self._line = line

def set_metadata(self, metadata: LineMetadata) -> None:
self._metadata = metadata

def __repr__(self) -> str:
return (f"LineWithMeta({self.line[:65]}, "
f"tagHL={self.metadata.tag_hierarchy_level.level_1, self.metadata.tag_hierarchy_level.level_2, self.metadata.tag_hierarchy_level.line_type})")
Expand Down
4 changes: 2 additions & 2 deletions dedoc/download_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@
txtlayer_classifier="9ca1de749d8d37147b00a3a228e03ee1776c695f",
scan_orientation_efficient_net_b0="c60812552a1be624476c1e5b58599867b36f8d4e",
font_classifier="db4481ad60ab050cbb42079b64f97f9e431feb07",
paragraph_classifier="c26a10193499d3cbc77ffec9842bece24fa8950b",
line_type_classifiers="0568c6e1f49612c0c351f10b80a26dc05f796683",
paragraph_classifier="97c4b78bc20d87ec7d53389e09f1ca35c6ade067",
line_type_classifiers="18df71c2e5d6985769d2d2dea6902e3e0a1fc9fe",
fintoc_classifiers="6a907b7d2437c3f61ac9c506f67175207982fae8"
)

Expand Down
2 changes: 1 addition & 1 deletion dedoc/readers/article_reader/article_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@ def __create_line_with_refs(self, content: List[Tuple[str, Tag]], bib2uid: dict,
if subpart.get("type") == "bibr" and target in bib2uid:
annotations.append(ReferenceAnnotation(value=bib2uid[target], start=start, end=start + len(sub_text)))
if subpart.get("type") == "table" and target in table2uid:
annotations.append(TableAnnotation(name=table2uid[target], start=start, end=start + len(sub_text)))
annotations.append(TableAnnotation(value=table2uid[target], start=start, end=start + len(sub_text)))
if subpart.get("type") == "figure" and target in attachment2uid:
annotations.append(AttachAnnotation(attach_uid=attachment2uid[target], start=start, end=start + len(sub_text)))
else:
Expand Down
2 changes: 1 addition & 1 deletion dedoc/readers/docx_reader/data_structures/docx_document.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def __paragraphs2lines(self, image_refs: dict, table_refs: dict, diagram_refs: d

if i in table_refs:
for table_uid in table_refs[i]:
annotation = TableAnnotation(name=table_uid, start=0, end=len(line))
annotation = TableAnnotation(value=table_uid, start=0, end=len(line))
line.annotations.append(annotation)

paragraph_id += 1
Expand Down
2 changes: 1 addition & 1 deletion dedoc/readers/html2pdf_reader/html2pdf_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def _add_tables(self, document: UnstructuredDocument, tables: Dict[str, Table])
line_id += 1
lines.append(line)
elif previous_line is not None:
table_annotation = TableAnnotation(name=table_uid, start=0, end=len(line.line))
table_annotation = TableAnnotation(value=table_uid, start=0, end=len(line.line))
previous_line.annotations.append(table_annotation)
tables_result.append(tables[table_uid])
return UnstructuredDocument(lines=lines, tables=tables_result, attachments=document.attachments)
Expand Down
2 changes: 1 addition & 1 deletion dedoc/readers/pdf_reader/data_classes/page_with_bboxes.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

class PageWithBBox:

def __init__(self, image: ndarray, bboxes: List[TextWithBBox], page_num: int, attachments: List[PdfImageAttachment] = None,
def __init__(self, image: Optional[ndarray], bboxes: List[TextWithBBox], page_num: int, attachments: List[PdfImageAttachment] = None,
pdf_page_width: Optional[int] = None, pdf_page_height: Optional[int] = None) -> None:
self.image = image
self.bboxes = bboxes
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,29 +7,19 @@

class TxtlayerFeatureExtractor:

def __init__(self) -> None:
self.eng = "".join(list(map(chr, range(ord("a"), ord("z") + 1))))
self.rus = "".join([chr(i) for i in range(ord("а"), ord("а") + 32)] + ["ё"])
self.lower_letters = self.eng + self.rus
self.upper_letters = self.lower_letters.upper()
self.letters = self.upper_letters + self.lower_letters
self.digits = "".join([str(i) for i in range(10)])
self.special_symbols = "<>~!@#$%^&*_+-/\"|?.,:;'`= "
self.brackets = "{}[]()"
self.symbols = self.letters + self.digits + self.brackets + self.special_symbols

self.prohibited_symbols = {s: i for i, s in enumerate("[]<")}

def transform(self, texts: List[str]) -> pd.DataFrame:
from dedoc.structure_extractors.feature_extractors.char_features import letters, digits, special_symbols, brackets, rus, eng, prohibited_symbols, \
lower_letters, upper_letters, symbols, count_symbols

features = defaultdict(list)

for text in texts:
num_letters = self.__count_symbols(text, self.letters)
num_digits = self.__count_symbols(text, self.digits)
num_special_symbols = self.__count_symbols(text, self.special_symbols)
num_brackets = self.__count_symbols(text, self.brackets)
num_rus = self.__count_symbols(text, self.rus + self.rus.upper())
num_eng = self.__count_symbols(text, self.eng + self.eng.upper())
num_letters = count_symbols(text, letters)
num_digits = count_symbols(text, digits)
num_special_symbols = count_symbols(text, special_symbols)
num_brackets = count_symbols(text, brackets)
num_rus = count_symbols(text, rus + rus.upper())
num_eng = count_symbols(text, eng + eng.upper())

features["letters_proportion"].append(num_letters / len(text))
features["digits_proportion"].append(num_digits / len(text))
Expand All @@ -38,24 +28,24 @@ def transform(self, texts: List[str]) -> pd.DataFrame:
features["rus_proportion"].append(num_rus / len(text))
features["eng_proportion"].append(num_eng / len(text))

for symbol in self.letters + self.digits:
for symbol in letters + digits:
n = num_letters + num_digits
# proportion of occurring english and russian letters
features[f"{symbol}_proportion"].append(text.count(symbol) / n if n != 0 else 0.0)

for symbol in self.special_symbols + self.brackets:
for symbol in special_symbols + brackets:
# number of symbols
symbol_name = symbol if symbol not in self.prohibited_symbols else f"symbol{self.prohibited_symbols[symbol]}"
symbol_name = symbol if symbol not in prohibited_symbols else f"symbol{prohibited_symbols[symbol]}"
features[f"{symbol_name}_number"].append(text.count(symbol))

# proportion of letters with symbols
features["all_proportion"].append((num_letters + num_digits + num_brackets + num_special_symbols) / len(text) if len(text) != 0 else 0)

case_changes = sum(1 for s1, s2 in zip(text[:-1], text[1:]) if (s1 in self.lower_letters) and (s2 in self.upper_letters))
case_changes = sum(1 for s1, s2 in zip(text[:-1], text[1:]) if (s1 in lower_letters) and (s2 in upper_letters))
features["case_changes"].append(case_changes / len(text))
symbol_changes = sum(1 for s1, s2 in zip(text[:-1], text[1:]) if (s1 in self.symbols) != (s2 in self.symbols))
symbol_changes = sum(1 for s1, s2 in zip(text[:-1], text[1:]) if (s1 in symbols) != (s2 in symbols))
features["symbol_changes"].append(symbol_changes / len(text))
letter_changes = sum(1 for s1, s2 in zip(text[:-1], text[1:]) if (s1 in self.letters) and (s2 not in self.symbols))
letter_changes = sum(1 for s1, s2 in zip(text[:-1], text[1:]) if (s1 in letters) and (s2 not in symbols))
features["letter_changes"].append(letter_changes / len(text))

features["mean_word_length"].append(np.mean([len(word) for word in text.split()]))
Expand All @@ -70,6 +60,3 @@ def transform(self, texts: List[str]) -> pd.DataFrame:
features["median_char_ord"].append(np.median(all_characters_ord))
features = pd.DataFrame(features)
return features[sorted(features.columns)].astype(float)

def __count_symbols(self, text: str, symbol_list: str) -> int:
return sum(1 for symbol in text if symbol in symbol_list)
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def __get_f1_homogeneous(self, x: np.ndarray, x_clusters: np.ndarray) -> float:

w1 = np.std(x) * len(x)
w2 = np.std(x_clust0) * len(x_clust0) + np.std(x_clust1) * len(x_clust1)
f1 = w2 / w1
f1 = w2 / w1 if w1 != 0. else 0.
return f1

def __get_f_criterion_homogeneous(self, n: int, p: int = 2) -> float:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,13 @@ def __init__(self, n: int = 5) -> None:
self.n = n

def binarize(self, image: np.ndarray) -> np.ndarray:
gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
threshold = self.__get_threshold(gray_img)
if image.shape[-1] == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
threshold = self.__get_threshold(image)

gray_img[gray_img <= threshold] = 0
gray_img[gray_img > threshold] = 1
return gray_img
image[image <= threshold] = 0
image[image > threshold] = 1
return image

def __get_threshold(self, gray_img: np.ndarray) -> int:
c, x = np.histogram(gray_img, bins=255)
Expand All @@ -33,8 +34,8 @@ def __get_threshold(self, gray_img: np.ndarray) -> int:
omega_1 = omega_1 + c[t] / total
omega_2 = 1 - omega_1
mu_k = mu_k + t * (c[t] / total)
mu_1 = mu_k / omega_1
mu_2 = (sum_val - mu_k) / omega_2
mu_1 = mu_k / omega_1 if omega_1 != 0. else 0.
mu_2 = (sum_val - mu_k) / omega_2 if omega_2 != 0. else 0.
sum_of_neighbors = np.sum(c[max(1, t - self.n):min(255, t + self.n)])
denom = total
current_var = (1 - sum_of_neighbors / denom) * (omega_1 * mu_1 ** 2 + omega_2 * mu_2 ** 2)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def extract_metadata_and_set_annotations(self, page_with_lines: PageWithBBox, ca
lines = []
for bbox in page_with_lines.bboxes:
lines.append(LineMetadataExtractor.get_line_with_meta(bbox=bbox))
if page_with_lines.image.ndim == 3 and page_with_lines.image.shape[2] == 3:
if page_with_lines.image is not None and page_with_lines.image.ndim == 3 and page_with_lines.image.shape[2] == 3:
color_annotation = self.__get_color_annotation(bbox, page_with_lines.image)
bbox.annotations.append(color_annotation)
self.__add_spacing_annotations(lines)
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import json
import logging
import numbers
import os
import tempfile
import zipfile
Expand All @@ -10,7 +11,7 @@
from dedoc.config import get_config
from dedoc.download_models import download_from_hub
from dedoc.readers.pdf_reader.data_classes.line_with_location import LineWithLocation
from dedoc.readers.pdf_reader.pdf_image_reader.paragraph_extractor.paragraph_features import ParagraphFeatureExtractor
from dedoc.structure_extractors.feature_extractors.paragraph_feature_extractor import ParagraphFeatureExtractor
from dedoc.utils.parameter_utils import get_param_gpu_available


Expand Down Expand Up @@ -67,8 +68,14 @@ def extract(self, lines_with_links: List[LineWithLocation]) -> List[LineWithLoca
labels = ["not_paragraph"] * len(lines_with_links)
else:
labels = self.classifier.predict(data)

for label, line in zip(labels, lines_with_links):
if line.line.strip() == "":

if line.line.strip() == "" or label is None:
label = "not_paragraph"
elif isinstance(label, numbers.Integral):
label = self.classifier.classes_[label]

line.metadata.tag_hierarchy_level.can_be_multiline = label != "paragraph"

return lines_with_links
2 changes: 1 addition & 1 deletion dedoc/readers/pdf_reader/utils/line_object_linker.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def link_objects(self, lines: List[LineWithLocation], tables: List[ScanTable], i
lines_after=object_with_lines["next_lines"],
last_page_line=last_page_line)
if isinstance(page_object, ScanTable):
annotation = TableAnnotation(name=page_object.uid, start=0, end=len(best_line.line))
annotation = TableAnnotation(value=page_object.uid, start=0, end=len(best_line.line))
elif isinstance(page_object, PdfImageAttachment):
annotation = AttachAnnotation(attach_uid=page_object.uid, start=0, end=len(best_line.line))
else:
Expand Down
Loading

0 comments on commit afc5412

Please sign in to comment.