
* feat: adding new vlm-models support Signed-off-by: Peter Staar <taa@zurich.ibm.com> * fixed the transformers Signed-off-by: Peter Staar <taa@zurich.ibm.com> * got microsoft/Phi-4-multimodal-instruct to work Signed-off-by: Peter Staar <taa@zurich.ibm.com> * working on vlm's Signed-off-by: Peter Staar <taa@zurich.ibm.com> * refactoring the VLM part Signed-off-by: Peter Staar <taa@zurich.ibm.com> * all working, now serious refacgtoring necessary Signed-off-by: Peter Staar <taa@zurich.ibm.com> * refactoring the download_model Signed-off-by: Peter Staar <taa@zurich.ibm.com> * added the formulate_prompt Signed-off-by: Peter Staar <taa@zurich.ibm.com> * pixtral 12b runs via MLX and native transformers Signed-off-by: Peter Staar <taa@zurich.ibm.com> * added the VlmPredictionToken Signed-off-by: Peter Staar <taa@zurich.ibm.com> * refactoring minimal_vlm_pipeline Signed-off-by: Peter Staar <taa@zurich.ibm.com> * fixed the MyPy Signed-off-by: Peter Staar <taa@zurich.ibm.com> * added pipeline_model_specializations file Signed-off-by: Peter Staar <taa@zurich.ibm.com> * need to get Phi4 working again ... Signed-off-by: Peter Staar <taa@zurich.ibm.com> * finalising last points for vlms support Signed-off-by: Peter Staar <taa@zurich.ibm.com> * fixed the pipeline for Phi4 Signed-off-by: Peter Staar <taa@zurich.ibm.com> * streamlining all code Signed-off-by: Peter Staar <taa@zurich.ibm.com> * reformatted the code Signed-off-by: Peter Staar <taa@zurich.ibm.com> * fixing the tests Signed-off-by: Peter Staar <taa@zurich.ibm.com> * added the html backend to the VLM pipeline Signed-off-by: Peter Staar <taa@zurich.ibm.com> * fixed the static load_from_doctags Signed-off-by: Peter Staar <taa@zurich.ibm.com> * restore stable imports Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * use AutoModelForVision2Seq for Pixtral and review example (including rename) Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * remove unused value Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * refactor instances of VLM models Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * skip compare example in CI Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * use lowercase and uppercase only Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * add new minimal_vlm example and refactor pipeline_options_vlm_model for cleaner import Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * rename pipeline_vlm_model_spec Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * move more argument to options and simplify model init Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * add supported_devices Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * remove not-needed function Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * exclude minimal_vlm Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * missing file Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * add message for transformers version Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * rename to specs Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * use module import and remove MLX from non-darwin Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * remove hf_vlm_model and add extra_generation_args Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * use single HF VLM model class Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * remove torch type Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * add docs for vision models Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> --------- Signed-off-by: Peter Staar <taa@zurich.ibm.com> Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> Co-authored-by: Michele Dolfi <dol@zurich.ibm.com>
325 lines
12 KiB
Python
325 lines
12 KiB
Python
import csv
|
|
import io
|
|
import logging
|
|
import os
|
|
import subprocess
|
|
import tempfile
|
|
from collections.abc import Iterable
|
|
from pathlib import Path
|
|
from subprocess import DEVNULL, PIPE, Popen
|
|
from typing import List, Optional, Tuple, Type
|
|
|
|
import pandas as pd
|
|
from docling_core.types.doc import BoundingBox, CoordOrigin
|
|
from docling_core.types.doc.page import TextCell
|
|
|
|
from docling.datamodel.accelerator_options import AcceleratorOptions
|
|
from docling.datamodel.base_models import Page
|
|
from docling.datamodel.document import ConversionResult
|
|
from docling.datamodel.pipeline_options import (
|
|
OcrOptions,
|
|
TesseractCliOcrOptions,
|
|
)
|
|
from docling.datamodel.settings import settings
|
|
from docling.models.base_ocr_model import BaseOcrModel
|
|
from docling.utils.ocr_utils import (
|
|
map_tesseract_script,
|
|
parse_tesseract_orientation,
|
|
tesseract_box_to_bounding_rectangle,
|
|
)
|
|
from docling.utils.profiling import TimeRecorder
|
|
|
|
_log = logging.getLogger(__name__)
|
|
|
|
|
|
class TesseractOcrCliModel(BaseOcrModel):
|
|
def __init__(
|
|
self,
|
|
enabled: bool,
|
|
artifacts_path: Optional[Path],
|
|
options: TesseractCliOcrOptions,
|
|
accelerator_options: AcceleratorOptions,
|
|
):
|
|
super().__init__(
|
|
enabled=enabled,
|
|
artifacts_path=artifacts_path,
|
|
options=options,
|
|
accelerator_options=accelerator_options,
|
|
)
|
|
self.options: TesseractCliOcrOptions
|
|
|
|
self.scale = 3 # multiplier for 72 dpi == 216 dpi.
|
|
|
|
self._name: Optional[str] = None
|
|
self._version: Optional[str] = None
|
|
self._tesseract_languages: Optional[List[str]] = None
|
|
self._script_prefix: Optional[str] = None
|
|
self._is_auto: bool = "auto" in self.options.lang
|
|
|
|
if self.enabled:
|
|
try:
|
|
self._get_name_and_version()
|
|
self._set_languages_and_prefix()
|
|
|
|
except Exception as exc:
|
|
raise RuntimeError(
|
|
f"Tesseract is not available, aborting: {exc} "
|
|
"Install tesseract on your system and the tesseract binary is discoverable. "
|
|
"The actual command for Tesseract can be specified in `pipeline_options.ocr_options.tesseract_cmd='tesseract'`. "
|
|
"Alternatively, Docling has support for other OCR engines. See the documentation."
|
|
)
|
|
|
|
def _get_name_and_version(self) -> Tuple[str, str]:
|
|
if self._name is not None and self._version is not None:
|
|
return self._name, self._version # type: ignore
|
|
|
|
cmd = [self.options.tesseract_cmd, "--version"]
|
|
|
|
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
|
|
stdout, stderr = proc.communicate()
|
|
|
|
proc.wait()
|
|
|
|
# HACK: Windows versions of Tesseract output the version to stdout, Linux versions
|
|
# to stderr, so check both.
|
|
version_line = (
|
|
(stdout.decode("utf8").strip() or stderr.decode("utf8").strip())
|
|
.split("\n")[0]
|
|
.strip()
|
|
)
|
|
|
|
# If everything else fails...
|
|
if not version_line:
|
|
version_line = "tesseract XXX"
|
|
|
|
name, version = version_line.split(" ")
|
|
|
|
self._name = name
|
|
self._version = version
|
|
|
|
return name, version
|
|
|
|
def _run_tesseract(self, ifilename: str, osd: pd.DataFrame):
|
|
r"""
|
|
Run tesseract CLI
|
|
"""
|
|
cmd = [self.options.tesseract_cmd]
|
|
if self._is_auto:
|
|
lang = self._parse_language(osd)
|
|
if lang is not None:
|
|
cmd.append("-l")
|
|
cmd.append(lang)
|
|
elif self.options.lang is not None and len(self.options.lang) > 0:
|
|
cmd.append("-l")
|
|
cmd.append("+".join(self.options.lang))
|
|
|
|
if self.options.path is not None:
|
|
cmd.append("--tessdata-dir")
|
|
cmd.append(self.options.path)
|
|
|
|
cmd += [ifilename, "stdout", "tsv"]
|
|
_log.info("command: {}".format(" ".join(cmd)))
|
|
|
|
output = subprocess.run(cmd, stdout=PIPE, stderr=DEVNULL, check=True)
|
|
|
|
# _log.info(output)
|
|
|
|
# Decode the byte string to a regular string
|
|
decoded_data = output.stdout.decode("utf-8")
|
|
# _log.info(decoded_data)
|
|
|
|
# Read the TSV file generated by Tesseract
|
|
df_result = pd.read_csv(
|
|
io.StringIO(decoded_data), quoting=csv.QUOTE_NONE, sep="\t"
|
|
)
|
|
|
|
# Display the dataframe (optional)
|
|
# _log.info("df: ", df.head())
|
|
|
|
# Filter rows that contain actual text (ignore header or empty rows)
|
|
df_filtered = df_result[
|
|
df_result["text"].notna() & (df_result["text"].apply(str).str.strip() != "")
|
|
]
|
|
|
|
return df_filtered
|
|
|
|
def _perform_osd(self, ifilename: str) -> pd.DataFrame:
|
|
r"""
|
|
Run tesseract in PSM 0 mode to detect the language
|
|
"""
|
|
|
|
cmd = [self.options.tesseract_cmd]
|
|
cmd.extend(["--psm", "0", "-l", "osd", ifilename, "stdout"])
|
|
_log.info("command: {}".format(" ".join(cmd)))
|
|
output = subprocess.run(cmd, capture_output=True, check=True)
|
|
decoded_data = output.stdout.decode("utf-8")
|
|
df_detected = pd.read_csv(
|
|
io.StringIO(decoded_data), sep=":", header=None, names=["key", "value"]
|
|
)
|
|
return df_detected
|
|
|
|
def _parse_language(self, df_osd: pd.DataFrame) -> Optional[str]:
|
|
assert self._tesseract_languages is not None
|
|
scripts = df_osd.loc[df_osd["key"] == "Script"].value.tolist()
|
|
if len(scripts) == 0:
|
|
_log.warning("Tesseract cannot detect the script of the page")
|
|
return None
|
|
|
|
script = map_tesseract_script(scripts[0].strip())
|
|
lang = f"{self._script_prefix}{script}"
|
|
|
|
# Check if the detected language has been installed
|
|
if lang not in self._tesseract_languages:
|
|
msg = f"Tesseract detected the script '{script}' and language '{lang}'."
|
|
msg += " However this language is not installed in your system and will be ignored."
|
|
_log.warning(msg)
|
|
return None
|
|
|
|
_log.debug(
|
|
f"Using tesseract model for the detected script '{script}' and language '{lang}'"
|
|
)
|
|
return lang
|
|
|
|
def _set_languages_and_prefix(self):
|
|
r"""
|
|
Read and set the languages installed in tesseract and decide the script prefix
|
|
"""
|
|
# Get all languages
|
|
cmd = [self.options.tesseract_cmd]
|
|
cmd.append("--list-langs")
|
|
_log.info("command: {}".format(" ".join(cmd)))
|
|
output = subprocess.run(cmd, stdout=PIPE, stderr=DEVNULL, check=True)
|
|
decoded_data = output.stdout.decode("utf-8")
|
|
df_list = pd.read_csv(io.StringIO(decoded_data), header=None)
|
|
self._tesseract_languages = df_list[0].tolist()[1:]
|
|
|
|
# Decide the script prefix
|
|
if any(lang.startswith("script/") for lang in self._tesseract_languages):
|
|
script_prefix = "script/"
|
|
else:
|
|
script_prefix = ""
|
|
|
|
self._script_prefix = script_prefix
|
|
|
|
def __call__(
|
|
self, conv_res: ConversionResult, page_batch: Iterable[Page]
|
|
) -> Iterable[Page]:
|
|
if not self.enabled:
|
|
yield from page_batch
|
|
return
|
|
|
|
for page_i, page in enumerate(page_batch):
|
|
assert page._backend is not None
|
|
if not page._backend.is_valid():
|
|
yield page
|
|
else:
|
|
with TimeRecorder(conv_res, "ocr"):
|
|
ocr_rects = self.get_ocr_rects(page)
|
|
|
|
all_ocr_cells = []
|
|
for ocr_rect_i, ocr_rect in enumerate(ocr_rects):
|
|
# Skip zero area boxes
|
|
if ocr_rect.area() == 0:
|
|
continue
|
|
high_res_image = page._backend.get_page_image(
|
|
scale=self.scale, cropbox=ocr_rect
|
|
)
|
|
try:
|
|
with tempfile.NamedTemporaryFile(
|
|
suffix=".png", mode="w+b", delete=False
|
|
) as image_file:
|
|
fname = image_file.name
|
|
high_res_image.save(image_file)
|
|
doc_orientation = 0
|
|
try:
|
|
df_osd = self._perform_osd(fname)
|
|
doc_orientation = _parse_orientation(df_osd)
|
|
except subprocess.CalledProcessError as exc:
|
|
_log.error(
|
|
"OSD failed (doc %s, page: %s, "
|
|
"OCR rectangle: %s, processed image file %s):\n %s",
|
|
conv_res.input.file,
|
|
page_i,
|
|
ocr_rect_i,
|
|
image_file,
|
|
exc.stderr,
|
|
)
|
|
# Skipping if OSD fail when in auto mode, otherwise proceed
|
|
# to OCR in the hope OCR will succeed while OSD failed
|
|
if self._is_auto:
|
|
continue
|
|
if doc_orientation != 0:
|
|
high_res_image = high_res_image.rotate(
|
|
-doc_orientation, expand=True
|
|
)
|
|
high_res_image.save(fname)
|
|
try:
|
|
df_result = self._run_tesseract(fname, df_osd)
|
|
except subprocess.CalledProcessError as exc:
|
|
_log.error(
|
|
"tesseract OCR failed (doc %s, page: %s, "
|
|
"OCR rectangle: %s, processed image file %s):\n %s",
|
|
conv_res.input.file,
|
|
page_i,
|
|
ocr_rect_i,
|
|
image_file,
|
|
exc.stderr,
|
|
)
|
|
continue
|
|
finally:
|
|
if os.path.exists(fname):
|
|
os.remove(fname)
|
|
|
|
# _log.info(df_result)
|
|
|
|
# Print relevant columns (bounding box and text)
|
|
for ix, row in df_result.iterrows():
|
|
text = row["text"]
|
|
conf = row["conf"]
|
|
|
|
left, top = float(row["left"]), float(row["top"])
|
|
right = left + float(row["width"])
|
|
bottom = top + row["height"]
|
|
bbox = BoundingBox(
|
|
l=left,
|
|
t=top,
|
|
r=right,
|
|
b=bottom,
|
|
coord_origin=CoordOrigin.TOPLEFT,
|
|
)
|
|
rect = tesseract_box_to_bounding_rectangle(
|
|
bbox,
|
|
original_offset=ocr_rect,
|
|
scale=self.scale,
|
|
orientation=doc_orientation,
|
|
im_size=high_res_image.size,
|
|
)
|
|
cell = TextCell(
|
|
index=ix,
|
|
text=str(text),
|
|
orig=str(text),
|
|
from_ocr=True,
|
|
confidence=conf / 100.0,
|
|
rect=rect,
|
|
)
|
|
all_ocr_cells.append(cell)
|
|
|
|
# Post-process the cells
|
|
page.cells = self.post_process_cells(all_ocr_cells, page.cells)
|
|
|
|
# DEBUG code:
|
|
if settings.debug.visualize_ocr:
|
|
self.draw_ocr_rects_and_cells(conv_res, page, ocr_rects)
|
|
|
|
yield page
|
|
|
|
@classmethod
|
|
def get_options_type(cls) -> Type[OcrOptions]:
|
|
return TesseractCliOcrOptions
|
|
|
|
|
|
def _parse_orientation(df_osd: pd.DataFrame) -> int:
|
|
orientations = df_osd.loc[df_osd["key"] == "Orientation in degrees"].value.tolist()
|
|
orientation = parse_tesseract_orientation(orientations[0].strip())
|
|
return orientation
|