fix: refine conversion result (#52)
- fields `output` & `assembled` need not be optional - introduced "synonym" `ConversionResult` for `ConvertedDocument` & deprecated the latter Signed-off-by: Panos Vagenas <35837085+vagenas@users.noreply.github.com>
This commit is contained in:
parent
fe817b11d7
commit
e46a66a176
@ -49,10 +49,10 @@ To convert invidual PDF documents, use `convert_single()`, for example:
|
||||
```python
|
||||
from docling.document_converter import DocumentConverter
|
||||
|
||||
source = "https://arxiv.org/pdf/2206.01062" # PDF path or URL
|
||||
source = "https://arxiv.org/pdf/2408.09869" # PDF path or URL
|
||||
converter = DocumentConverter()
|
||||
doc = converter.convert_single(source)
|
||||
print(doc.render_as_markdown()) # output: "## DocLayNet: A Large Human-Annotated Dataset for Document-Layout Analysis [...]"
|
||||
result = converter.convert_single(source)
|
||||
print(result.render_as_markdown()) # output: "## Docling Technical Report[...]"
|
||||
```
|
||||
|
||||
### Convert a batch of documents
|
||||
@ -118,7 +118,7 @@ You can convert PDFs from a binary stream instead of from the filesystem as foll
|
||||
buf = BytesIO(your_binary_stream)
|
||||
docs = [DocumentStream(filename="my_doc.pdf", stream=buf)]
|
||||
conv_input = DocumentConversionInput.from_streams(docs)
|
||||
converted_docs = doc_converter.convert(conv_input)
|
||||
results = doc_converter.convert(conv_input)
|
||||
```
|
||||
### Limit resource usage
|
||||
|
||||
|
@ -247,9 +247,9 @@ PageElement = Union[TextElement, TableElement, FigureElement]
|
||||
|
||||
|
||||
class AssembledUnit(BaseModel):
|
||||
elements: List[PageElement]
|
||||
body: List[PageElement]
|
||||
headers: List[PageElement]
|
||||
elements: List[PageElement] = []
|
||||
body: List[PageElement] = []
|
||||
headers: List[PageElement] = []
|
||||
|
||||
|
||||
class Page(BaseModel):
|
||||
|
@ -12,6 +12,7 @@ from docling_core.types import PageDimensions, PageReference, Prov, Ref
|
||||
from docling_core.types import Table as DsSchemaTable
|
||||
from docling_core.types import TableCell
|
||||
from pydantic import BaseModel
|
||||
from typing_extensions import deprecated
|
||||
|
||||
from docling.backend.abstract_backend import PdfDocumentBackend
|
||||
from docling.backend.docling_parse_backend import DoclingParseDocumentBackend
|
||||
@ -49,6 +50,15 @@ layout_label_to_ds_type = {
|
||||
"Text": "paragraph",
|
||||
}
|
||||
|
||||
_EMPTY_DOC = DsDocument(
|
||||
_name="",
|
||||
description=DsDocumentDescription(logs=[]),
|
||||
file_info=DsFileInfoObject(
|
||||
filename="",
|
||||
document_hash="",
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class InputDocument(BaseModel):
|
||||
file: PurePath = None
|
||||
@ -115,6 +125,7 @@ class InputDocument(BaseModel):
|
||||
# raise
|
||||
|
||||
|
||||
@deprecated("Use `ConversionResult` instead.")
|
||||
class ConvertedDocument(BaseModel):
|
||||
input: InputDocument
|
||||
|
||||
@ -122,11 +133,11 @@ class ConvertedDocument(BaseModel):
|
||||
errors: List[ErrorItem] = [] # structure to keep errors
|
||||
|
||||
pages: List[Page] = []
|
||||
assembled: Optional[AssembledUnit] = None
|
||||
assembled: AssembledUnit = AssembledUnit()
|
||||
|
||||
output: Optional[DsDocument] = None
|
||||
output: DsDocument = _EMPTY_DOC
|
||||
|
||||
def to_ds_document(self) -> DsDocument:
|
||||
def _to_ds_document(self) -> DsDocument:
|
||||
title = ""
|
||||
desc = DsDocumentDescription(logs=[])
|
||||
|
||||
@ -297,16 +308,10 @@ class ConvertedDocument(BaseModel):
|
||||
return ds_doc
|
||||
|
||||
def render_as_dict(self):
|
||||
if self.output:
|
||||
return self.output.model_dump(by_alias=True, exclude_none=True)
|
||||
else:
|
||||
return {}
|
||||
return self.output.model_dump(by_alias=True, exclude_none=True)
|
||||
|
||||
def render_as_markdown(self):
|
||||
if self.output:
|
||||
return self.output.export_to_markdown()
|
||||
else:
|
||||
return ""
|
||||
return self.output.export_to_markdown()
|
||||
|
||||
def render_element_images(
|
||||
self, element_types: Tuple[PageElement] = (FigureElement,)
|
||||
@ -323,6 +328,10 @@ class ConvertedDocument(BaseModel):
|
||||
yield element, cropped_im
|
||||
|
||||
|
||||
class ConversionResult(ConvertedDocument):
|
||||
pass
|
||||
|
||||
|
||||
class DocumentConversionInput(BaseModel):
|
||||
|
||||
_path_or_stream_iterator: Iterable[Union[Path, DocumentStream]] = None
|
||||
|
@ -7,7 +7,6 @@ from pathlib import Path
|
||||
from typing import Iterable, Optional, Type, Union
|
||||
|
||||
import requests
|
||||
from docling_core.types import Document
|
||||
from PIL import ImageDraw
|
||||
from pydantic import AnyHttpUrl, TypeAdapter, ValidationError
|
||||
|
||||
@ -22,7 +21,7 @@ from docling.datamodel.base_models import (
|
||||
PipelineOptions,
|
||||
)
|
||||
from docling.datamodel.document import (
|
||||
ConvertedDocument,
|
||||
ConversionResult,
|
||||
DocumentConversionInput,
|
||||
InputDocument,
|
||||
)
|
||||
@ -73,7 +72,7 @@ class DocumentConverter:
|
||||
|
||||
return Path(download_path)
|
||||
|
||||
def convert(self, input: DocumentConversionInput) -> Iterable[ConvertedDocument]:
|
||||
def convert(self, input: DocumentConversionInput) -> Iterable[ConversionResult]:
|
||||
|
||||
for input_batch in chunkify(
|
||||
input.docs(pdf_backend=self.pdf_backend), settings.perf.doc_batch_size
|
||||
@ -86,9 +85,9 @@ class DocumentConverter:
|
||||
# yield from pool.map(self.process_document, input_batch)
|
||||
|
||||
# Note: Pdfium backend is not thread-safe, thread pool usage was disabled.
|
||||
yield from map(self.process_document, input_batch)
|
||||
yield from map(self._process_document, input_batch)
|
||||
|
||||
def convert_single(self, source: Path | AnyHttpUrl | str) -> ConvertedDocument:
|
||||
def convert_single(self, source: Path | AnyHttpUrl | str) -> ConversionResult:
|
||||
"""Convert a single document.
|
||||
|
||||
Args:
|
||||
@ -99,7 +98,7 @@ class DocumentConverter:
|
||||
RuntimeError: If conversion fails.
|
||||
|
||||
Returns:
|
||||
Document: The converted document object.
|
||||
ConversionResult: The conversion result object.
|
||||
"""
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
try:
|
||||
@ -129,51 +128,49 @@ class DocumentConverter:
|
||||
f"Unexpected file path type encountered: {type(source)}"
|
||||
)
|
||||
conv_inp = DocumentConversionInput.from_paths(paths=[local_path])
|
||||
converted_docs_iter = self.convert(conv_inp)
|
||||
converted_doc: ConvertedDocument = next(converted_docs_iter)
|
||||
if converted_doc.status not in {
|
||||
conv_res_iter = self.convert(conv_inp)
|
||||
conv_res: ConversionResult = next(conv_res_iter)
|
||||
if conv_res.status not in {
|
||||
ConversionStatus.SUCCESS,
|
||||
ConversionStatus.PARTIAL_SUCCESS,
|
||||
}:
|
||||
raise RuntimeError(f"Conversion failed with status: {converted_doc.status}")
|
||||
return converted_doc
|
||||
raise RuntimeError(f"Conversion failed with status: {conv_res.status}")
|
||||
return conv_res
|
||||
|
||||
def process_document(self, in_doc: InputDocument) -> ConvertedDocument:
|
||||
def _process_document(self, in_doc: InputDocument) -> ConversionResult:
|
||||
start_doc_time = time.time()
|
||||
converted_doc = ConvertedDocument(input=in_doc)
|
||||
conv_res = ConversionResult(input=in_doc)
|
||||
|
||||
_log.info(f"Processing document {in_doc.file.name}")
|
||||
|
||||
if not in_doc.valid:
|
||||
converted_doc.status = ConversionStatus.FAILURE
|
||||
return converted_doc
|
||||
conv_res.status = ConversionStatus.FAILURE
|
||||
return conv_res
|
||||
|
||||
for i in range(0, in_doc.page_count):
|
||||
converted_doc.pages.append(Page(page_no=i))
|
||||
conv_res.pages.append(Page(page_no=i))
|
||||
|
||||
all_assembled_pages = []
|
||||
|
||||
try:
|
||||
# Iterate batches of pages (page_batch_size) in the doc
|
||||
for page_batch in chunkify(
|
||||
converted_doc.pages, settings.perf.page_batch_size
|
||||
):
|
||||
for page_batch in chunkify(conv_res.pages, settings.perf.page_batch_size):
|
||||
start_pb_time = time.time()
|
||||
# Pipeline
|
||||
|
||||
# 1. Initialise the page resources
|
||||
init_pages = map(
|
||||
functools.partial(self.initialize_page, in_doc), page_batch
|
||||
functools.partial(self._initialize_page, in_doc), page_batch
|
||||
)
|
||||
|
||||
# 2. Populate page image
|
||||
pages_with_images = map(
|
||||
functools.partial(self.populate_page_images, in_doc), init_pages
|
||||
functools.partial(self._populate_page_images, in_doc), init_pages
|
||||
)
|
||||
|
||||
# 3. Populate programmatic page cells
|
||||
pages_with_cells = map(
|
||||
functools.partial(self.parse_page_cells, in_doc),
|
||||
functools.partial(self._parse_page_cells, in_doc),
|
||||
pages_with_images,
|
||||
)
|
||||
|
||||
@ -202,13 +199,13 @@ class DocumentConverter:
|
||||
# Free up mem resources of PDF backend
|
||||
in_doc._backend.unload()
|
||||
|
||||
converted_doc.pages = all_assembled_pages
|
||||
self.assemble_doc(converted_doc)
|
||||
conv_res.pages = all_assembled_pages
|
||||
self._assemble_doc(conv_res)
|
||||
|
||||
status = ConversionStatus.SUCCESS
|
||||
for page in converted_doc.pages:
|
||||
for page in conv_res.pages:
|
||||
if not page._backend.is_valid():
|
||||
converted_doc.errors.append(
|
||||
conv_res.errors.append(
|
||||
ErrorItem(
|
||||
component_type=DoclingComponentType.PDF_BACKEND,
|
||||
module_name=type(page._backend).__name__,
|
||||
@ -217,10 +214,10 @@ class DocumentConverter:
|
||||
)
|
||||
status = ConversionStatus.PARTIAL_SUCCESS
|
||||
|
||||
converted_doc.status = status
|
||||
conv_res.status = status
|
||||
|
||||
except Exception as e:
|
||||
converted_doc.status = ConversionStatus.FAILURE
|
||||
conv_res.status = ConversionStatus.FAILURE
|
||||
trace = "\n".join(traceback.format_exception(e))
|
||||
_log.info(
|
||||
f"Encountered an error during conversion of document {in_doc.document_hash}:\n"
|
||||
@ -232,10 +229,10 @@ class DocumentConverter:
|
||||
f"Finished converting document time-pages={end_doc_time:.2f}/{in_doc.page_count}"
|
||||
)
|
||||
|
||||
return converted_doc
|
||||
return conv_res
|
||||
|
||||
# Initialise and load resources for a page, before downstream steps (populate images, cells, ...)
|
||||
def initialize_page(self, doc: InputDocument, page: Page) -> Page:
|
||||
def _initialize_page(self, doc: InputDocument, page: Page) -> Page:
|
||||
page._backend = doc._backend.load_page(page.page_no)
|
||||
page.size = page._backend.get_size()
|
||||
page.page_hash = create_hash(doc.document_hash + ":" + str(page.page_no))
|
||||
@ -243,7 +240,7 @@ class DocumentConverter:
|
||||
return page
|
||||
|
||||
# Generate the page image and store it in the page object
|
||||
def populate_page_images(self, doc: InputDocument, page: Page) -> Page:
|
||||
def _populate_page_images(self, doc: InputDocument, page: Page) -> Page:
|
||||
# default scale
|
||||
page.get_image(
|
||||
scale=1.0
|
||||
@ -259,7 +256,7 @@ class DocumentConverter:
|
||||
return page
|
||||
|
||||
# Extract and populate the page cells and store it in the page object
|
||||
def parse_page_cells(self, doc: InputDocument, page: Page) -> Page:
|
||||
def _parse_page_cells(self, doc: InputDocument, page: Page) -> Page:
|
||||
page.cells = page._backend.get_text_cells()
|
||||
|
||||
# DEBUG code:
|
||||
@ -274,12 +271,12 @@ class DocumentConverter:
|
||||
|
||||
return page
|
||||
|
||||
def assemble_doc(self, converted_doc: ConvertedDocument):
|
||||
def _assemble_doc(self, conv_res: ConversionResult):
|
||||
all_elements = []
|
||||
all_headers = []
|
||||
all_body = []
|
||||
|
||||
for p in converted_doc.pages:
|
||||
for p in conv_res.pages:
|
||||
|
||||
for el in p.assembled.body:
|
||||
all_body.append(el)
|
||||
@ -288,8 +285,8 @@ class DocumentConverter:
|
||||
for el in p.assembled.elements:
|
||||
all_elements.append(el)
|
||||
|
||||
converted_doc.assembled = AssembledUnit(
|
||||
conv_res.assembled = AssembledUnit(
|
||||
elements=all_elements, headers=all_headers, body=all_body
|
||||
)
|
||||
|
||||
converted_doc.output = self.glm_model(converted_doc)
|
||||
conv_res.output = self.glm_model(conv_res)
|
||||
|
@ -10,7 +10,7 @@ from docling_core.types import Ref
|
||||
from PIL import ImageDraw
|
||||
|
||||
from docling.datamodel.base_models import BoundingBox, Cluster, CoordOrigin
|
||||
from docling.datamodel.document import ConvertedDocument
|
||||
from docling.datamodel.document import ConversionResult
|
||||
|
||||
|
||||
class GlmModel:
|
||||
@ -20,8 +20,8 @@ class GlmModel:
|
||||
model = init_nlp_model(model_names="language;term;reference")
|
||||
self.model = model
|
||||
|
||||
def __call__(self, document: ConvertedDocument) -> DsDocument:
|
||||
ds_doc = document.to_ds_document()
|
||||
def __call__(self, conv_res: ConversionResult) -> DsDocument:
|
||||
ds_doc = conv_res._to_ds_document()
|
||||
ds_doc_dict = ds_doc.model_dump(by_alias=True)
|
||||
|
||||
glm_doc = self.model.apply_on_doc(ds_doc_dict)
|
||||
@ -34,7 +34,7 @@ class GlmModel:
|
||||
# DEBUG code:
|
||||
def draw_clusters_and_cells(ds_document, page_no):
|
||||
clusters_to_draw = []
|
||||
image = copy.deepcopy(document.pages[page_no].image)
|
||||
image = copy.deepcopy(conv_res.pages[page_no].image)
|
||||
for ix, elem in enumerate(ds_document.main_text):
|
||||
if isinstance(elem, BaseText):
|
||||
prov = elem.prov[0]
|
||||
@ -56,7 +56,7 @@ class GlmModel:
|
||||
bbox=BoundingBox.from_tuple(
|
||||
coord=prov.bbox,
|
||||
origin=CoordOrigin.BOTTOMLEFT,
|
||||
).to_top_left_origin(document.pages[page_no].size.height),
|
||||
).to_top_left_origin(conv_res.pages[page_no].size.height),
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -5,14 +5,14 @@ from pathlib import Path
|
||||
from typing import Iterable
|
||||
|
||||
from docling.datamodel.base_models import ConversionStatus, PipelineOptions
|
||||
from docling.datamodel.document import ConvertedDocument, DocumentConversionInput
|
||||
from docling.datamodel.document import ConversionResult, DocumentConversionInput
|
||||
from docling.document_converter import DocumentConverter
|
||||
|
||||
_log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def export_documents(
|
||||
converted_docs: Iterable[ConvertedDocument],
|
||||
conv_results: Iterable[ConversionResult],
|
||||
output_dir: Path,
|
||||
):
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
@ -21,27 +21,27 @@ def export_documents(
|
||||
failure_count = 0
|
||||
partial_success_count = 0
|
||||
|
||||
for doc in converted_docs:
|
||||
if doc.status == ConversionStatus.SUCCESS:
|
||||
for conv_res in conv_results:
|
||||
if conv_res.status == ConversionStatus.SUCCESS:
|
||||
success_count += 1
|
||||
doc_filename = doc.input.file.stem
|
||||
doc_filename = conv_res.input.file.stem
|
||||
|
||||
# Export Deep Search document JSON format:
|
||||
with (output_dir / f"{doc_filename}.json").open("w") as fp:
|
||||
fp.write(json.dumps(doc.render_as_dict()))
|
||||
fp.write(json.dumps(conv_res.render_as_dict()))
|
||||
|
||||
# Export Markdown format:
|
||||
with (output_dir / f"{doc_filename}.md").open("w") as fp:
|
||||
fp.write(doc.render_as_markdown())
|
||||
elif doc.status == ConversionStatus.PARTIAL_SUCCESS:
|
||||
fp.write(conv_res.render_as_markdown())
|
||||
elif conv_res.status == ConversionStatus.PARTIAL_SUCCESS:
|
||||
_log.info(
|
||||
f"Document {doc.input.file} was partially converted with the following errors:"
|
||||
f"Document {conv_res.input.file} was partially converted with the following errors:"
|
||||
)
|
||||
for item in doc.errors:
|
||||
for item in conv_res.errors:
|
||||
_log.info(f"\t{item.error_message}")
|
||||
partial_success_count += 1
|
||||
else:
|
||||
_log.info(f"Document {doc.input.file} failed to convert.")
|
||||
_log.info(f"Document {conv_res.input.file} failed to convert.")
|
||||
failure_count += 1
|
||||
|
||||
_log.info(
|
||||
@ -72,8 +72,8 @@ def main():
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
converted_docs = doc_converter.convert(input)
|
||||
export_documents(converted_docs, output_dir=Path("./scratch"))
|
||||
conv_results = doc_converter.convert(input)
|
||||
export_documents(conv_results, output_dir=Path("./scratch"))
|
||||
|
||||
end_time = time.time() - start_time
|
||||
|
||||
|
@ -7,14 +7,14 @@ from typing import Iterable
|
||||
from docling.backend.docling_parse_backend import DoclingParseDocumentBackend
|
||||
from docling.backend.pypdfium2_backend import PyPdfiumDocumentBackend
|
||||
from docling.datamodel.base_models import ConversionStatus, PipelineOptions
|
||||
from docling.datamodel.document import ConvertedDocument, DocumentConversionInput
|
||||
from docling.datamodel.document import ConversionResult, DocumentConversionInput
|
||||
from docling.document_converter import DocumentConverter
|
||||
|
||||
_log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def export_documents(
|
||||
converted_docs: Iterable[ConvertedDocument],
|
||||
conv_results: Iterable[ConversionResult],
|
||||
output_dir: Path,
|
||||
):
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
@ -22,20 +22,20 @@ def export_documents(
|
||||
success_count = 0
|
||||
failure_count = 0
|
||||
|
||||
for doc in converted_docs:
|
||||
if doc.status == ConversionStatus.SUCCESS:
|
||||
for conv_res in conv_results:
|
||||
if conv_res.status == ConversionStatus.SUCCESS:
|
||||
success_count += 1
|
||||
doc_filename = doc.input.file.stem
|
||||
doc_filename = conv_res.input.file.stem
|
||||
|
||||
# Export Deep Search document JSON format:
|
||||
with (output_dir / f"{doc_filename}.json").open("w") as fp:
|
||||
fp.write(json.dumps(doc.render_as_dict()))
|
||||
fp.write(json.dumps(conv_res.render_as_dict()))
|
||||
|
||||
# Export Markdown format:
|
||||
with (output_dir / f"{doc_filename}.md").open("w") as fp:
|
||||
fp.write(doc.render_as_markdown())
|
||||
fp.write(conv_res.render_as_markdown())
|
||||
else:
|
||||
_log.info(f"Document {doc.input.file} failed to convert.")
|
||||
_log.info(f"Document {conv_res.input.file} failed to convert.")
|
||||
failure_count += 1
|
||||
|
||||
_log.info(
|
||||
@ -113,8 +113,8 @@ def main():
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
converted_docs = doc_converter.convert(input)
|
||||
export_documents(converted_docs, output_dir=Path("./scratch"))
|
||||
conv_results = doc_converter.convert(input)
|
||||
export_documents(conv_results, output_dir=Path("./scratch"))
|
||||
|
||||
end_time = time.time() - start_time
|
||||
|
||||
|
@ -10,7 +10,7 @@ from docling.datamodel.base_models import (
|
||||
PageElement,
|
||||
TableElement,
|
||||
)
|
||||
from docling.datamodel.document import ConvertedDocument, DocumentConversionInput
|
||||
from docling.datamodel.document import DocumentConversionInput
|
||||
from docling.document_converter import DocumentConverter
|
||||
|
||||
_log = logging.getLogger(__name__)
|
||||
@ -39,25 +39,25 @@ def main():
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
converted_docs = doc_converter.convert(input_files)
|
||||
conv_results = doc_converter.convert(input_files)
|
||||
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
for doc in converted_docs:
|
||||
if doc.status != ConversionStatus.SUCCESS:
|
||||
_log.info(f"Document {doc.input.file} failed to convert.")
|
||||
for conv_res in conv_results:
|
||||
if conv_res.status != ConversionStatus.SUCCESS:
|
||||
_log.info(f"Document {conv_res.input.file} failed to convert.")
|
||||
continue
|
||||
|
||||
doc_filename = doc.input.file.stem
|
||||
doc_filename = conv_res.input.file.stem
|
||||
|
||||
# Export page images
|
||||
for page in doc.pages:
|
||||
for page in conv_res.pages:
|
||||
page_no = page.page_no + 1
|
||||
page_image_filename = output_dir / f"{doc_filename}-{page_no}.png"
|
||||
with page_image_filename.open("wb") as fp:
|
||||
page.image.save(fp, format="PNG")
|
||||
|
||||
# Export figures and tables
|
||||
for element, image in doc.render_element_images(
|
||||
for element, image in conv_res.render_element_images(
|
||||
element_types=(FigureElement, TableElement)
|
||||
):
|
||||
element_image_filename = (
|
||||
|
Loading…
Reference in New Issue
Block a user