fix: Revise DocTags, fix iterate_items to output content_layer in items (#965)
* Testing fix for docling-core dt Signed-off-by: Maksym Lysak <mly@zurich.ibm.com> * fix: Fix code_formula test unit, update test-cases Signed-off-by: Christoph Auer <cau@zurich.ibm.com> * fix: Fix code-formula model for new docling-core Signed-off-by: Christoph Auer <cau@zurich.ibm.com> * fix: Update fixes Signed-off-by: Christoph Auer <cau@zurich.ibm.com> * Update test cases for office formats Signed-off-by: Christoph Auer <cau@zurich.ibm.com> * Update deps and lockfile Signed-off-by: Christoph Auer <cau@zurich.ibm.com> * Clean up imports Signed-off-by: Christoph Auer <cau@zurich.ibm.com> --------- Signed-off-by: Maksym Lysak <mly@zurich.ibm.com> Signed-off-by: Christoph Auer <cau@zurich.ibm.com> Co-authored-by: Maksym Lysak <mly@zurich.ibm.com> Co-authored-by: Christoph Auer <cau@zurich.ibm.com>
This commit is contained in:
parent
77eb77bdc2
commit
6e75f0b5d3
245
poetry.lock
generated
245
poetry.lock
generated
@ -1,4 +1,4 @@
|
||||
# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand.
|
||||
# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "aiohappyeyeballs"
|
||||
@ -866,13 +866,13 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "docling-core"
|
||||
version = "2.18.1"
|
||||
version = "2.19.0"
|
||||
description = "A python library to define and validate data types in Docling."
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.9"
|
||||
files = [
|
||||
{file = "docling_core-2.18.1-py3-none-any.whl", hash = "sha256:6083ca0bed7534cd923dd06d348960cfe8ff598ba19c22a81b799747e698204a"},
|
||||
{file = "docling_core-2.18.1.tar.gz", hash = "sha256:c2cc3ff1aefa4a79a0d0c39046a9d158de48ac7d0bba225636dde0d42ef7df6c"},
|
||||
{file = "docling_core-2.19.0-py3-none-any.whl", hash = "sha256:caa1e13d98fa9a00608091c386609c75b3560c7291e842c252f0b6f8d5812dbd"},
|
||||
{file = "docling_core-2.19.0.tar.gz", hash = "sha256:ebf3062e31155bb5f0e6132056a2d239a0e6e693a75c5758886909bb9fef461a"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@ -3359,66 +3359,66 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "numpy"
|
||||
version = "2.2.2"
|
||||
version = "2.2.3"
|
||||
description = "Fundamental package for array computing in Python"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
files = [
|
||||
{file = "numpy-2.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7079129b64cb78bdc8d611d1fd7e8002c0a2565da6a47c4df8062349fee90e3e"},
|
||||
{file = "numpy-2.2.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2ec6c689c61df613b783aeb21f945c4cbe6c51c28cb70aae8430577ab39f163e"},
|
||||
{file = "numpy-2.2.2-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:40c7ff5da22cd391944a28c6a9c638a5eef77fcf71d6e3a79e1d9d9e82752715"},
|
||||
{file = "numpy-2.2.2-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:995f9e8181723852ca458e22de5d9b7d3ba4da3f11cc1cb113f093b271d7965a"},
|
||||
{file = "numpy-2.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b78ea78450fd96a498f50ee096f69c75379af5138f7881a51355ab0e11286c97"},
|
||||
{file = "numpy-2.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3fbe72d347fbc59f94124125e73fc4976a06927ebc503ec5afbfb35f193cd957"},
|
||||
{file = "numpy-2.2.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8e6da5cffbbe571f93588f562ed130ea63ee206d12851b60819512dd3e1ba50d"},
|
||||
{file = "numpy-2.2.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:09d6a2032faf25e8d0cadde7fd6145118ac55d2740132c1d845f98721b5ebcfd"},
|
||||
{file = "numpy-2.2.2-cp310-cp310-win32.whl", hash = "sha256:159ff6ee4c4a36a23fe01b7c3d07bd8c14cc433d9720f977fcd52c13c0098160"},
|
||||
{file = "numpy-2.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:64bd6e1762cd7f0986a740fee4dff927b9ec2c5e4d9a28d056eb17d332158014"},
|
||||
{file = "numpy-2.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:642199e98af1bd2b6aeb8ecf726972d238c9877b0f6e8221ee5ab945ec8a2189"},
|
||||
{file = "numpy-2.2.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6d9fc9d812c81e6168b6d405bf00b8d6739a7f72ef22a9214c4241e0dc70b323"},
|
||||
{file = "numpy-2.2.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:c7d1fd447e33ee20c1f33f2c8e6634211124a9aabde3c617687d8b739aa69eac"},
|
||||
{file = "numpy-2.2.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:451e854cfae0febe723077bd0cf0a4302a5d84ff25f0bfece8f29206c7bed02e"},
|
||||
{file = "numpy-2.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd249bc894af67cbd8bad2c22e7cbcd46cf87ddfca1f1289d1e7e54868cc785c"},
|
||||
{file = "numpy-2.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02935e2c3c0c6cbe9c7955a8efa8908dd4221d7755644c59d1bba28b94fd334f"},
|
||||
{file = "numpy-2.2.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a972cec723e0563aa0823ee2ab1df0cb196ed0778f173b381c871a03719d4826"},
|
||||
{file = "numpy-2.2.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d6d6a0910c3b4368d89dde073e630882cdb266755565155bc33520283b2d9df8"},
|
||||
{file = "numpy-2.2.2-cp311-cp311-win32.whl", hash = "sha256:860fd59990c37c3ef913c3ae390b3929d005243acca1a86facb0773e2d8d9e50"},
|
||||
{file = "numpy-2.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:da1eeb460ecce8d5b8608826595c777728cdf28ce7b5a5a8c8ac8d949beadcf2"},
|
||||
{file = "numpy-2.2.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ac9bea18d6d58a995fac1b2cb4488e17eceeac413af014b1dd26170b766d8467"},
|
||||
{file = "numpy-2.2.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:23ae9f0c2d889b7b2d88a3791f6c09e2ef827c2446f1c4a3e3e76328ee4afd9a"},
|
||||
{file = "numpy-2.2.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:3074634ea4d6df66be04f6728ee1d173cfded75d002c75fac79503a880bf3825"},
|
||||
{file = "numpy-2.2.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:8ec0636d3f7d68520afc6ac2dc4b8341ddb725039de042faf0e311599f54eb37"},
|
||||
{file = "numpy-2.2.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ffbb1acd69fdf8e89dd60ef6182ca90a743620957afb7066385a7bbe88dc748"},
|
||||
{file = "numpy-2.2.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0349b025e15ea9d05c3d63f9657707a4e1d471128a3b1d876c095f328f8ff7f0"},
|
||||
{file = "numpy-2.2.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:463247edcee4a5537841d5350bc87fe8e92d7dd0e8c71c995d2c6eecb8208278"},
|
||||
{file = "numpy-2.2.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9dd47ff0cb2a656ad69c38da850df3454da88ee9a6fde0ba79acceee0e79daba"},
|
||||
{file = "numpy-2.2.2-cp312-cp312-win32.whl", hash = "sha256:4525b88c11906d5ab1b0ec1f290996c0020dd318af8b49acaa46f198b1ffc283"},
|
||||
{file = "numpy-2.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:5acea83b801e98541619af398cc0109ff48016955cc0818f478ee9ef1c5c3dcb"},
|
||||
{file = "numpy-2.2.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b208cfd4f5fe34e1535c08983a1a6803fdbc7a1e86cf13dd0c61de0b51a0aadc"},
|
||||
{file = "numpy-2.2.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d0bbe7dd86dca64854f4b6ce2ea5c60b51e36dfd597300057cf473d3615f2369"},
|
||||
{file = "numpy-2.2.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:22ea3bb552ade325530e72a0c557cdf2dea8914d3a5e1fecf58fa5dbcc6f43cd"},
|
||||
{file = "numpy-2.2.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:128c41c085cab8a85dc29e66ed88c05613dccf6bc28b3866cd16050a2f5448be"},
|
||||
{file = "numpy-2.2.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:250c16b277e3b809ac20d1f590716597481061b514223c7badb7a0f9993c7f84"},
|
||||
{file = "numpy-2.2.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0c8854b09bc4de7b041148d8550d3bd712b5c21ff6a8ed308085f190235d7ff"},
|
||||
{file = "numpy-2.2.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b6fb9c32a91ec32a689ec6410def76443e3c750e7cfc3fb2206b985ffb2b85f0"},
|
||||
{file = "numpy-2.2.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:57b4012e04cc12b78590a334907e01b3a85efb2107df2b8733ff1ed05fce71de"},
|
||||
{file = "numpy-2.2.2-cp313-cp313-win32.whl", hash = "sha256:4dbd80e453bd34bd003b16bd802fac70ad76bd463f81f0c518d1245b1c55e3d9"},
|
||||
{file = "numpy-2.2.2-cp313-cp313-win_amd64.whl", hash = "sha256:5a8c863ceacae696aff37d1fd636121f1a512117652e5dfb86031c8d84836369"},
|
||||
{file = "numpy-2.2.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:b3482cb7b3325faa5f6bc179649406058253d91ceda359c104dac0ad320e1391"},
|
||||
{file = "numpy-2.2.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:9491100aba630910489c1d0158034e1c9a6546f0b1340f716d522dc103788e39"},
|
||||
{file = "numpy-2.2.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:41184c416143defa34cc8eb9d070b0a5ba4f13a0fa96a709e20584638254b317"},
|
||||
{file = "numpy-2.2.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:7dca87ca328f5ea7dafc907c5ec100d187911f94825f8700caac0b3f4c384b49"},
|
||||
{file = "numpy-2.2.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bc61b307655d1a7f9f4b043628b9f2b721e80839914ede634e3d485913e1fb2"},
|
||||
{file = "numpy-2.2.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fad446ad0bc886855ddf5909cbf8cb5d0faa637aaa6277fb4b19ade134ab3c7"},
|
||||
{file = "numpy-2.2.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:149d1113ac15005652e8d0d3f6fd599360e1a708a4f98e43c9c77834a28238cb"},
|
||||
{file = "numpy-2.2.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:106397dbbb1896f99e044efc90360d098b3335060375c26aa89c0d8a97c5f648"},
|
||||
{file = "numpy-2.2.2-cp313-cp313t-win32.whl", hash = "sha256:0eec19f8af947a61e968d5429f0bd92fec46d92b0008d0a6685b40d6adf8a4f4"},
|
||||
{file = "numpy-2.2.2-cp313-cp313t-win_amd64.whl", hash = "sha256:97b974d3ba0fb4612b77ed35d7627490e8e3dff56ab41454d9e8b23448940576"},
|
||||
{file = "numpy-2.2.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:b0531f0b0e07643eb089df4c509d30d72c9ef40defa53e41363eca8a8cc61495"},
|
||||
{file = "numpy-2.2.2-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:e9e82dcb3f2ebbc8cb5ce1102d5f1c5ed236bf8a11730fb45ba82e2841ec21df"},
|
||||
{file = "numpy-2.2.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0d4142eb40ca6f94539e4db929410f2a46052a0fe7a2c1c59f6179c39938d2a"},
|
||||
{file = "numpy-2.2.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:356ca982c188acbfa6af0d694284d8cf20e95b1c3d0aefa8929376fea9146f60"},
|
||||
{file = "numpy-2.2.2.tar.gz", hash = "sha256:ed6906f61834d687738d25988ae117683705636936cc605be0bb208b23df4d8f"},
|
||||
{file = "numpy-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cbc6472e01952d3d1b2772b720428f8b90e2deea8344e854df22b0618e9cce71"},
|
||||
{file = "numpy-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cdfe0c22692a30cd830c0755746473ae66c4a8f2e7bd508b35fb3b6a0813d787"},
|
||||
{file = "numpy-2.2.3-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:e37242f5324ffd9f7ba5acf96d774f9276aa62a966c0bad8dae692deebec7716"},
|
||||
{file = "numpy-2.2.3-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:95172a21038c9b423e68be78fd0be6e1b97674cde269b76fe269a5dfa6fadf0b"},
|
||||
{file = "numpy-2.2.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5b47c440210c5d1d67e1cf434124e0b5c395eee1f5806fdd89b553ed1acd0a3"},
|
||||
{file = "numpy-2.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0391ea3622f5c51a2e29708877d56e3d276827ac5447d7f45e9bc4ade8923c52"},
|
||||
{file = "numpy-2.2.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f6b3dfc7661f8842babd8ea07e9897fe3d9b69a1d7e5fbb743e4160f9387833b"},
|
||||
{file = "numpy-2.2.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1ad78ce7f18ce4e7df1b2ea4019b5817a2f6a8a16e34ff2775f646adce0a5027"},
|
||||
{file = "numpy-2.2.3-cp310-cp310-win32.whl", hash = "sha256:5ebeb7ef54a7be11044c33a17b2624abe4307a75893c001a4800857956b41094"},
|
||||
{file = "numpy-2.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:596140185c7fa113563c67c2e894eabe0daea18cf8e33851738c19f70ce86aeb"},
|
||||
{file = "numpy-2.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:16372619ee728ed67a2a606a614f56d3eabc5b86f8b615c79d01957062826ca8"},
|
||||
{file = "numpy-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5521a06a3148686d9269c53b09f7d399a5725c47bbb5b35747e1cb76326b714b"},
|
||||
{file = "numpy-2.2.3-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:7c8dde0ca2f77828815fd1aedfdf52e59071a5bae30dac3b4da2a335c672149a"},
|
||||
{file = "numpy-2.2.3-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:77974aba6c1bc26e3c205c2214f0d5b4305bdc719268b93e768ddb17e3fdd636"},
|
||||
{file = "numpy-2.2.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d42f9c36d06440e34226e8bd65ff065ca0963aeecada587b937011efa02cdc9d"},
|
||||
{file = "numpy-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2712c5179f40af9ddc8f6727f2bd910ea0eb50206daea75f58ddd9fa3f715bb"},
|
||||
{file = "numpy-2.2.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c8b0451d2ec95010d1db8ca733afc41f659f425b7f608af569711097fd6014e2"},
|
||||
{file = "numpy-2.2.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d9b4a8148c57ecac25a16b0e11798cbe88edf5237b0df99973687dd866f05e1b"},
|
||||
{file = "numpy-2.2.3-cp311-cp311-win32.whl", hash = "sha256:1f45315b2dc58d8a3e7754fe4e38b6fce132dab284a92851e41b2b344f6441c5"},
|
||||
{file = "numpy-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:9f48ba6f6c13e5e49f3d3efb1b51c8193215c42ac82610a04624906a9270be6f"},
|
||||
{file = "numpy-2.2.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:12c045f43b1d2915eca6b880a7f4a256f59d62df4f044788c8ba67709412128d"},
|
||||
{file = "numpy-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:87eed225fd415bbae787f93a457af7f5990b92a334e346f72070bf569b9c9c95"},
|
||||
{file = "numpy-2.2.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:712a64103d97c404e87d4d7c47fb0c7ff9acccc625ca2002848e0d53288b90ea"},
|
||||
{file = "numpy-2.2.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:a5ae282abe60a2db0fd407072aff4599c279bcd6e9a2475500fc35b00a57c532"},
|
||||
{file = "numpy-2.2.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5266de33d4c3420973cf9ae3b98b54a2a6d53a559310e3236c4b2b06b9c07d4e"},
|
||||
{file = "numpy-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b787adbf04b0db1967798dba8da1af07e387908ed1553a0d6e74c084d1ceafe"},
|
||||
{file = "numpy-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:34c1b7e83f94f3b564b35f480f5652a47007dd91f7c839f404d03279cc8dd021"},
|
||||
{file = "numpy-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4d8335b5f1b6e2bce120d55fb17064b0262ff29b459e8493d1785c18ae2553b8"},
|
||||
{file = "numpy-2.2.3-cp312-cp312-win32.whl", hash = "sha256:4d9828d25fb246bedd31e04c9e75714a4087211ac348cb39c8c5f99dbb6683fe"},
|
||||
{file = "numpy-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:83807d445817326b4bcdaaaf8e8e9f1753da04341eceec705c001ff342002e5d"},
|
||||
{file = "numpy-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7bfdb06b395385ea9b91bf55c1adf1b297c9fdb531552845ff1d3ea6e40d5aba"},
|
||||
{file = "numpy-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:23c9f4edbf4c065fddb10a4f6e8b6a244342d95966a48820c614891e5059bb50"},
|
||||
{file = "numpy-2.2.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:a0c03b6be48aaf92525cccf393265e02773be8fd9551a2f9adbe7db1fa2b60f1"},
|
||||
{file = "numpy-2.2.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:2376e317111daa0a6739e50f7ee2a6353f768489102308b0d98fcf4a04f7f3b5"},
|
||||
{file = "numpy-2.2.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8fb62fe3d206d72fe1cfe31c4a1106ad2b136fcc1606093aeab314f02930fdf2"},
|
||||
{file = "numpy-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52659ad2534427dffcc36aac76bebdd02b67e3b7a619ac67543bc9bfe6b7cdb1"},
|
||||
{file = "numpy-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1b416af7d0ed3271cad0f0a0d0bee0911ed7eba23e66f8424d9f3dfcdcae1304"},
|
||||
{file = "numpy-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:1402da8e0f435991983d0a9708b779f95a8c98c6b18a171b9f1be09005e64d9d"},
|
||||
{file = "numpy-2.2.3-cp313-cp313-win32.whl", hash = "sha256:136553f123ee2951bfcfbc264acd34a2fc2f29d7cdf610ce7daf672b6fbaa693"},
|
||||
{file = "numpy-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:5b732c8beef1d7bc2d9e476dbba20aaff6167bf205ad9aa8d30913859e82884b"},
|
||||
{file = "numpy-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:435e7a933b9fda8126130b046975a968cc2d833b505475e588339e09f7672890"},
|
||||
{file = "numpy-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7678556eeb0152cbd1522b684dcd215250885993dd00adb93679ec3c0e6e091c"},
|
||||
{file = "numpy-2.2.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:2e8da03bd561504d9b20e7a12340870dfc206c64ea59b4cfee9fceb95070ee94"},
|
||||
{file = "numpy-2.2.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:c9aa4496fd0e17e3843399f533d62857cef5900facf93e735ef65aa4bbc90ef0"},
|
||||
{file = "numpy-2.2.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4ca91d61a4bf61b0f2228f24bbfa6a9facd5f8af03759fe2a655c50ae2c6610"},
|
||||
{file = "numpy-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:deaa09cd492e24fd9b15296844c0ad1b3c976da7907e1c1ed3a0ad21dded6f76"},
|
||||
{file = "numpy-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:246535e2f7496b7ac85deffe932896a3577be7af8fb7eebe7146444680297e9a"},
|
||||
{file = "numpy-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:daf43a3d1ea699402c5a850e5313680ac355b4adc9770cd5cfc2940e7861f1bf"},
|
||||
{file = "numpy-2.2.3-cp313-cp313t-win32.whl", hash = "sha256:cf802eef1f0134afb81fef94020351be4fe1d6681aadf9c5e862af6602af64ef"},
|
||||
{file = "numpy-2.2.3-cp313-cp313t-win_amd64.whl", hash = "sha256:aee2512827ceb6d7f517c8b85aa5d3923afe8fc7a57d028cffcd522f1c6fd082"},
|
||||
{file = "numpy-2.2.3-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:3c2ec8a0f51d60f1e9c0c5ab116b7fc104b165ada3f6c58abf881cb2eb16044d"},
|
||||
{file = "numpy-2.2.3-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:ed2cf9ed4e8ebc3b754d398cba12f24359f018b416c380f577bbae112ca52fc9"},
|
||||
{file = "numpy-2.2.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39261798d208c3095ae4f7bc8eaeb3481ea8c6e03dc48028057d3cbdbdb8937e"},
|
||||
{file = "numpy-2.2.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:783145835458e60fa97afac25d511d00a1eca94d4a8f3ace9fe2043003c678e4"},
|
||||
{file = "numpy-2.2.3.tar.gz", hash = "sha256:dbdc15f0c81611925f382dfa97b3bd0bc2c1ce19d4fe50482cb0ddc12ba30020"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -4436,32 +4436,25 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "psutil"
|
||||
version = "6.1.1"
|
||||
description = "Cross-platform lib for process and system monitoring in Python."
|
||||
version = "7.0.0"
|
||||
description = "Cross-platform lib for process and system monitoring in Python. NOTE: the syntax of this script MUST be kept compatible with Python 2.7."
|
||||
optional = false
|
||||
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
|
||||
python-versions = ">=3.6"
|
||||
files = [
|
||||
{file = "psutil-6.1.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:9ccc4316f24409159897799b83004cb1e24f9819b0dcf9c0b68bdcb6cefee6a8"},
|
||||
{file = "psutil-6.1.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ca9609c77ea3b8481ab005da74ed894035936223422dc591d6772b147421f777"},
|
||||
{file = "psutil-6.1.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:8df0178ba8a9e5bc84fed9cfa61d54601b371fbec5c8eebad27575f1e105c0d4"},
|
||||
{file = "psutil-6.1.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:1924e659d6c19c647e763e78670a05dbb7feaf44a0e9c94bf9e14dfc6ba50468"},
|
||||
{file = "psutil-6.1.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:018aeae2af92d943fdf1da6b58665124897cfc94faa2ca92098838f83e1b1bca"},
|
||||
{file = "psutil-6.1.1-cp27-none-win32.whl", hash = "sha256:6d4281f5bbca041e2292be3380ec56a9413b790579b8e593b1784499d0005dac"},
|
||||
{file = "psutil-6.1.1-cp27-none-win_amd64.whl", hash = "sha256:c777eb75bb33c47377c9af68f30e9f11bc78e0f07fbf907be4a5d70b2fe5f030"},
|
||||
{file = "psutil-6.1.1-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:fc0ed7fe2231a444fc219b9c42d0376e0a9a1a72f16c5cfa0f68d19f1a0663e8"},
|
||||
{file = "psutil-6.1.1-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:0bdd4eab935276290ad3cb718e9809412895ca6b5b334f5a9111ee6d9aff9377"},
|
||||
{file = "psutil-6.1.1-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6e06c20c05fe95a3d7302d74e7097756d4ba1247975ad6905441ae1b5b66003"},
|
||||
{file = "psutil-6.1.1-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97f7cb9921fbec4904f522d972f0c0e1f4fabbdd4e0287813b21215074a0f160"},
|
||||
{file = "psutil-6.1.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33431e84fee02bc84ea36d9e2c4a6d395d479c9dd9bba2376c1f6ee8f3a4e0b3"},
|
||||
{file = "psutil-6.1.1-cp36-cp36m-win32.whl", hash = "sha256:384636b1a64b47814437d1173be1427a7c83681b17a450bfc309a1953e329603"},
|
||||
{file = "psutil-6.1.1-cp36-cp36m-win_amd64.whl", hash = "sha256:8be07491f6ebe1a693f17d4f11e69d0dc1811fa082736500f649f79df7735303"},
|
||||
{file = "psutil-6.1.1-cp37-abi3-win32.whl", hash = "sha256:eaa912e0b11848c4d9279a93d7e2783df352b082f40111e078388701fd479e53"},
|
||||
{file = "psutil-6.1.1-cp37-abi3-win_amd64.whl", hash = "sha256:f35cfccb065fff93529d2afb4a2e89e363fe63ca1e4a5da22b603a85833c2649"},
|
||||
{file = "psutil-6.1.1.tar.gz", hash = "sha256:cf8496728c18f2d0b45198f06895be52f36611711746b7f30c464b422b50e2f5"},
|
||||
{file = "psutil-7.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:101d71dc322e3cffd7cea0650b09b3d08b8e7c4109dd6809fe452dfd00e58b25"},
|
||||
{file = "psutil-7.0.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:39db632f6bb862eeccf56660871433e111b6ea58f2caea825571951d4b6aa3da"},
|
||||
{file = "psutil-7.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fcee592b4c6f146991ca55919ea3d1f8926497a713ed7faaf8225e174581e91"},
|
||||
{file = "psutil-7.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b1388a4f6875d7e2aff5c4ca1cc16c545ed41dd8bb596cefea80111db353a34"},
|
||||
{file = "psutil-7.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f098451abc2828f7dc6b58d44b532b22f2088f4999a937557b603ce72b1993"},
|
||||
{file = "psutil-7.0.0-cp36-cp36m-win32.whl", hash = "sha256:84df4eb63e16849689f76b1ffcb36db7b8de703d1bc1fe41773db487621b6c17"},
|
||||
{file = "psutil-7.0.0-cp36-cp36m-win_amd64.whl", hash = "sha256:1e744154a6580bc968a0195fd25e80432d3afec619daf145b9e5ba16cc1d688e"},
|
||||
{file = "psutil-7.0.0-cp37-abi3-win32.whl", hash = "sha256:ba3fcef7523064a6c9da440fc4d6bd07da93ac726b5733c29027d7dc95b39d99"},
|
||||
{file = "psutil-7.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:4cf3d4eb1aa9b348dec30105c55cd9b7d4629285735a102beb4441e38db90553"},
|
||||
{file = "psutil-7.0.0.tar.gz", hash = "sha256:7be9c3eba38beccb6495ea33afd982a44074b78f28c434a1f51cc07fd315c456"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
dev = ["abi3audit", "black", "check-manifest", "coverage", "packaging", "pylint", "pyperf", "pypinfo", "pytest-cov", "requests", "rstcheck", "ruff", "sphinx", "sphinx_rtd_theme", "toml-sort", "twine", "virtualenv", "vulture", "wheel"]
|
||||
dev = ["abi3audit", "black (==24.10.0)", "check-manifest", "coverage", "packaging", "pylint", "pyperf", "pypinfo", "pytest", "pytest-cov", "pytest-xdist", "requests", "rstcheck", "ruff", "setuptools", "sphinx", "sphinx_rtd_theme", "toml-sort", "twine", "virtualenv", "vulture", "wheel"]
|
||||
test = ["pytest", "pytest-xdist", "setuptools"]
|
||||
|
||||
[[package]]
|
||||
@ -6097,51 +6090,57 @@ test = ["array-api-strict", "asv", "gmpy2", "hypothesis (>=6.30)", "mpmath", "po
|
||||
|
||||
[[package]]
|
||||
name = "scipy"
|
||||
version = "1.15.1"
|
||||
version = "1.15.2"
|
||||
description = "Fundamental algorithms for scientific computing in Python"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
files = [
|
||||
{file = "scipy-1.15.1-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:c64ded12dcab08afff9e805a67ff4480f5e69993310e093434b10e85dc9d43e1"},
|
||||
{file = "scipy-1.15.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:5b190b935e7db569960b48840e5bef71dc513314cc4e79a1b7d14664f57fd4ff"},
|
||||
{file = "scipy-1.15.1-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:4b17d4220df99bacb63065c76b0d1126d82bbf00167d1730019d2a30d6ae01ea"},
|
||||
{file = "scipy-1.15.1-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:63b9b6cd0333d0eb1a49de6f834e8aeaefe438df8f6372352084535ad095219e"},
|
||||
{file = "scipy-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f151e9fb60fbf8e52426132f473221a49362091ce7a5e72f8aa41f8e0da4f25"},
|
||||
{file = "scipy-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21e10b1dd56ce92fba3e786007322542361984f8463c6d37f6f25935a5a6ef52"},
|
||||
{file = "scipy-1.15.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5dff14e75cdbcf07cdaa1c7707db6017d130f0af9ac41f6ce443a93318d6c6e0"},
|
||||
{file = "scipy-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:f82fcf4e5b377f819542fbc8541f7b5fbcf1c0017d0df0bc22c781bf60abc4d8"},
|
||||
{file = "scipy-1.15.1-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:5bd8d27d44e2c13d0c1124e6a556454f52cd3f704742985f6b09e75e163d20d2"},
|
||||
{file = "scipy-1.15.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:be3deeb32844c27599347faa077b359584ba96664c5c79d71a354b80a0ad0ce0"},
|
||||
{file = "scipy-1.15.1-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:5eb0ca35d4b08e95da99a9f9c400dc9f6c21c424298a0ba876fdc69c7afacedf"},
|
||||
{file = "scipy-1.15.1-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:74bb864ff7640dea310a1377d8567dc2cb7599c26a79ca852fc184cc851954ac"},
|
||||
{file = "scipy-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:667f950bf8b7c3a23b4199db24cb9bf7512e27e86d0e3813f015b74ec2c6e3df"},
|
||||
{file = "scipy-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:395be70220d1189756068b3173853029a013d8c8dd5fd3d1361d505b2aa58fa7"},
|
||||
{file = "scipy-1.15.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ce3a000cd28b4430426db2ca44d96636f701ed12e2b3ca1f2b1dd7abdd84b39a"},
|
||||
{file = "scipy-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:3fe1d95944f9cf6ba77aa28b82dd6bb2a5b52f2026beb39ecf05304b8392864b"},
|
||||
{file = "scipy-1.15.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c09aa9d90f3500ea4c9b393ee96f96b0ccb27f2f350d09a47f533293c78ea776"},
|
||||
{file = "scipy-1.15.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:0ac102ce99934b162914b1e4a6b94ca7da0f4058b6d6fd65b0cef330c0f3346f"},
|
||||
{file = "scipy-1.15.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:09c52320c42d7f5c7748b69e9f0389266fd4f82cf34c38485c14ee976cb8cb04"},
|
||||
{file = "scipy-1.15.1-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:cdde8414154054763b42b74fe8ce89d7f3d17a7ac5dd77204f0e142cdc9239e9"},
|
||||
{file = "scipy-1.15.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c9d8fc81d6a3b6844235e6fd175ee1d4c060163905a2becce8e74cb0d7554ce"},
|
||||
{file = "scipy-1.15.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fb57b30f0017d4afa5fe5f5b150b8f807618819287c21cbe51130de7ccdaed2"},
|
||||
{file = "scipy-1.15.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:491d57fe89927fa1aafbe260f4cfa5ffa20ab9f1435025045a5315006a91b8f5"},
|
||||
{file = "scipy-1.15.1-cp312-cp312-win_amd64.whl", hash = "sha256:900f3fa3db87257510f011c292a5779eb627043dd89731b9c461cd16ef76ab3d"},
|
||||
{file = "scipy-1.15.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:100193bb72fbff37dbd0bf14322314fc7cbe08b7ff3137f11a34d06dc0ee6b85"},
|
||||
{file = "scipy-1.15.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:2114a08daec64980e4b4cbdf5bee90935af66d750146b1d2feb0d3ac30613692"},
|
||||
{file = "scipy-1.15.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:6b3e71893c6687fc5e29208d518900c24ea372a862854c9888368c0b267387ab"},
|
||||
{file = "scipy-1.15.1-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:837299eec3d19b7e042923448d17d95a86e43941104d33f00da7e31a0f715d3c"},
|
||||
{file = "scipy-1.15.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82add84e8a9fb12af5c2c1a3a3f1cb51849d27a580cb9e6bd66226195142be6e"},
|
||||
{file = "scipy-1.15.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:070d10654f0cb6abd295bc96c12656f948e623ec5f9a4eab0ddb1466c000716e"},
|
||||
{file = "scipy-1.15.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:55cc79ce4085c702ac31e49b1e69b27ef41111f22beafb9b49fea67142b696c4"},
|
||||
{file = "scipy-1.15.1-cp313-cp313-win_amd64.whl", hash = "sha256:c352c1b6d7cac452534517e022f8f7b8d139cd9f27e6fbd9f3cbd0bfd39f5bef"},
|
||||
{file = "scipy-1.15.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0458839c9f873062db69a03de9a9765ae2e694352c76a16be44f93ea45c28d2b"},
|
||||
{file = "scipy-1.15.1-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:af0b61c1de46d0565b4b39c6417373304c1d4f5220004058bdad3061c9fa8a95"},
|
||||
{file = "scipy-1.15.1-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:71ba9a76c2390eca6e359be81a3e879614af3a71dfdabb96d1d7ab33da6f2364"},
|
||||
{file = "scipy-1.15.1-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:14eaa373c89eaf553be73c3affb11ec6c37493b7eaaf31cf9ac5dffae700c2e0"},
|
||||
{file = "scipy-1.15.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f735bc41bd1c792c96bc426dece66c8723283695f02df61dcc4d0a707a42fc54"},
|
||||
{file = "scipy-1.15.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:2722a021a7929d21168830790202a75dbb20b468a8133c74a2c0230c72626b6c"},
|
||||
{file = "scipy-1.15.1-cp313-cp313t-win_amd64.whl", hash = "sha256:bc7136626261ac1ed988dca56cfc4ab5180f75e0ee52e58f1e6aa74b5f3eacd5"},
|
||||
{file = "scipy-1.15.1.tar.gz", hash = "sha256:033a75ddad1463970c96a88063a1df87ccfddd526437136b6ee81ff0312ebdf6"},
|
||||
{file = "scipy-1.15.2-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:a2ec871edaa863e8213ea5df811cd600734f6400b4af272e1c011e69401218e9"},
|
||||
{file = "scipy-1.15.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:6f223753c6ea76983af380787611ae1291e3ceb23917393079dcc746ba60cfb5"},
|
||||
{file = "scipy-1.15.2-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:ecf797d2d798cf7c838c6d98321061eb3e72a74710e6c40540f0e8087e3b499e"},
|
||||
{file = "scipy-1.15.2-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:9b18aa747da280664642997e65aab1dd19d0c3d17068a04b3fe34e2559196cb9"},
|
||||
{file = "scipy-1.15.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87994da02e73549dfecaed9e09a4f9d58a045a053865679aeb8d6d43747d4df3"},
|
||||
{file = "scipy-1.15.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69ea6e56d00977f355c0f84eba69877b6df084516c602d93a33812aa04d90a3d"},
|
||||
{file = "scipy-1.15.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:888307125ea0c4466287191e5606a2c910963405ce9671448ff9c81c53f85f58"},
|
||||
{file = "scipy-1.15.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:9412f5e408b397ff5641080ed1e798623dbe1ec0d78e72c9eca8992976fa65aa"},
|
||||
{file = "scipy-1.15.2-cp310-cp310-win_amd64.whl", hash = "sha256:b5e025e903b4f166ea03b109bb241355b9c42c279ea694d8864d033727205e65"},
|
||||
{file = "scipy-1.15.2-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:92233b2df6938147be6fa8824b8136f29a18f016ecde986666be5f4d686a91a4"},
|
||||
{file = "scipy-1.15.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:62ca1ff3eb513e09ed17a5736929429189adf16d2d740f44e53270cc800ecff1"},
|
||||
{file = "scipy-1.15.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:4c6676490ad76d1c2894d77f976144b41bd1a4052107902238047fb6a473e971"},
|
||||
{file = "scipy-1.15.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:a8bf5cb4a25046ac61d38f8d3c3426ec11ebc350246a4642f2f315fe95bda655"},
|
||||
{file = "scipy-1.15.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a8e34cf4c188b6dd004654f88586d78f95639e48a25dfae9c5e34a6dc34547e"},
|
||||
{file = "scipy-1.15.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28a0d2c2075946346e4408b211240764759e0fabaeb08d871639b5f3b1aca8a0"},
|
||||
{file = "scipy-1.15.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:42dabaaa798e987c425ed76062794e93a243be8f0f20fff6e7a89f4d61cb3d40"},
|
||||
{file = "scipy-1.15.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6f5e296ec63c5da6ba6fa0343ea73fd51b8b3e1a300b0a8cae3ed4b1122c7462"},
|
||||
{file = "scipy-1.15.2-cp311-cp311-win_amd64.whl", hash = "sha256:597a0c7008b21c035831c39927406c6181bcf8f60a73f36219b69d010aa04737"},
|
||||
{file = "scipy-1.15.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c4697a10da8f8765bb7c83e24a470da5797e37041edfd77fd95ba3811a47c4fd"},
|
||||
{file = "scipy-1.15.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:869269b767d5ee7ea6991ed7e22b3ca1f22de73ab9a49c44bad338b725603301"},
|
||||
{file = "scipy-1.15.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:bad78d580270a4d32470563ea86c6590b465cb98f83d760ff5b0990cb5518a93"},
|
||||
{file = "scipy-1.15.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:b09ae80010f52efddb15551025f9016c910296cf70adbf03ce2a8704f3a5ad20"},
|
||||
{file = "scipy-1.15.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a6fd6eac1ce74a9f77a7fc724080d507c5812d61e72bd5e4c489b042455865e"},
|
||||
{file = "scipy-1.15.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b871df1fe1a3ba85d90e22742b93584f8d2b8e6124f8372ab15c71b73e428b8"},
|
||||
{file = "scipy-1.15.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:03205d57a28e18dfd39f0377d5002725bf1f19a46f444108c29bdb246b6c8a11"},
|
||||
{file = "scipy-1.15.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:601881dfb761311045b03114c5fe718a12634e5608c3b403737ae463c9885d53"},
|
||||
{file = "scipy-1.15.2-cp312-cp312-win_amd64.whl", hash = "sha256:e7c68b6a43259ba0aab737237876e5c2c549a031ddb7abc28c7b47f22e202ded"},
|
||||
{file = "scipy-1.15.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:01edfac9f0798ad6b46d9c4c9ca0e0ad23dbf0b1eb70e96adb9fa7f525eff0bf"},
|
||||
{file = "scipy-1.15.2-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:08b57a9336b8e79b305a143c3655cc5bdbe6d5ece3378578888d2afbb51c4e37"},
|
||||
{file = "scipy-1.15.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:54c462098484e7466362a9f1672d20888f724911a74c22ae35b61f9c5919183d"},
|
||||
{file = "scipy-1.15.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:cf72ff559a53a6a6d77bd8eefd12a17995ffa44ad86c77a5df96f533d4e6c6bb"},
|
||||
{file = "scipy-1.15.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9de9d1416b3d9e7df9923ab23cd2fe714244af10b763975bea9e4f2e81cebd27"},
|
||||
{file = "scipy-1.15.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb530e4794fc8ea76a4a21ccb67dea33e5e0e60f07fc38a49e821e1eae3b71a0"},
|
||||
{file = "scipy-1.15.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5ea7ed46d437fc52350b028b1d44e002646e28f3e8ddc714011aaf87330f2f32"},
|
||||
{file = "scipy-1.15.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:11e7ad32cf184b74380f43d3c0a706f49358b904fa7d5345f16ddf993609184d"},
|
||||
{file = "scipy-1.15.2-cp313-cp313-win_amd64.whl", hash = "sha256:a5080a79dfb9b78b768cebf3c9dcbc7b665c5875793569f48bf0e2b1d7f68f6f"},
|
||||
{file = "scipy-1.15.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:447ce30cee6a9d5d1379087c9e474628dab3db4a67484be1b7dc3196bfb2fac9"},
|
||||
{file = "scipy-1.15.2-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:c90ebe8aaa4397eaefa8455a8182b164a6cc1d59ad53f79943f266d99f68687f"},
|
||||
{file = "scipy-1.15.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:def751dd08243934c884a3221156d63e15234a3155cf25978b0a668409d45eb6"},
|
||||
{file = "scipy-1.15.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:302093e7dfb120e55515936cb55618ee0b895f8bcaf18ff81eca086c17bd80af"},
|
||||
{file = "scipy-1.15.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7cd5b77413e1855351cdde594eca99c1f4a588c2d63711388b6a1f1c01f62274"},
|
||||
{file = "scipy-1.15.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d0194c37037707b2afa7a2f2a924cf7bac3dc292d51b6a925e5fcb89bc5c776"},
|
||||
{file = "scipy-1.15.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:bae43364d600fdc3ac327db99659dcb79e6e7ecd279a75fe1266669d9a652828"},
|
||||
{file = "scipy-1.15.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f031846580d9acccd0044efd1a90e6f4df3a6e12b4b6bd694a7bc03a89892b28"},
|
||||
{file = "scipy-1.15.2-cp313-cp313t-win_amd64.whl", hash = "sha256:fe8a9eb875d430d81755472c5ba75e84acc980e4a8f6204d402849234d3017db"},
|
||||
{file = "scipy-1.15.2.tar.gz", hash = "sha256:cd58a314d92838f7e6f755c8a2167ead4f27e1fd5c1251fd54289569ef3495ec"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@ -7842,4 +7841,4 @@ vlm = ["transformers", "transformers"]
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = "^3.9"
|
||||
content-hash = "8ffe337614eaba9d560f7fc971c8ca3ebe7cbf58bb004af3a9616371e08047ac"
|
||||
content-hash = "2cca8bac31dd535e36045cf2f5f0380852c34f6bafad78834144d6ca56d2d79c"
|
||||
|
@ -26,7 +26,7 @@ packages = [{include = "docling"}]
|
||||
######################
|
||||
python = "^3.9"
|
||||
pydantic = "^2.0.0"
|
||||
docling-core = {extras = ["chunking"], version = "^2.18.0"}
|
||||
docling-core = {extras = ["chunking"], version = "^2.19.0"}
|
||||
docling-ibm-models = "^3.3.0"
|
||||
deepsearch-glm = "^1.0.0"
|
||||
docling-parse = "^3.3.0"
|
||||
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -1,465 +1,282 @@
|
||||
<document>
|
||||
<section_header_level_1><location><page_1><loc_16><loc_85><loc_82><loc_86></location>TableFormer: Table Structure Understanding with Transformers.</section_header_level_1>
|
||||
<section_header_level_1><location><page_1><loc_23><loc_78><loc_74><loc_81></location>Ahmed Nassar, Nikolaos Livathinos, Maksym Lysak, Peter Staar IBM Research</section_header_level_1>
|
||||
<text><location><page_1><loc_34><loc_77><loc_62><loc_78></location>{ ahn,nli,mly,taa } @zurich.ibm.com</text>
|
||||
<section_header_level_1><location><page_1><loc_24><loc_71><loc_31><loc_73></location>Abstract</section_header_level_1>
|
||||
<section_header_level_1><location><page_1><loc_52><loc_71><loc_67><loc_72></location>a. Picture of a table:</section_header_level_1>
|
||||
<section_header_level_1><location><page_1><loc_8><loc_30><loc_21><loc_32></location>1. Introduction</section_header_level_1>
|
||||
<text><location><page_1><loc_8><loc_10><loc_47><loc_29></location>The occurrence of tables in documents is ubiquitous. They often summarise quantitative or factual data, which is cumbersome to describe in verbose text but nevertheless extremely valuable. Unfortunately, this compact representation is often not easy to parse by machines. There are many implicit conventions used to obtain a compact table representation. For example, tables often have complex columnand row-headers in order to reduce duplicated cell content. Lines of different shapes and sizes are leveraged to separate content or indicate a tree structure. Additionally, tables can also have empty/missing table-entries or multi-row textual table-entries. Fig. 1 shows a table which presents all these issues.</text>
|
||||
<figure>
|
||||
<location><page_1><loc_52><loc_62><loc_88><loc_71></location>
|
||||
</figure>
|
||||
<table>
|
||||
<location><page_1><loc_52><loc_62><loc_88><loc_71></location>
|
||||
<caption>Tables organize valuable content in a concise and compact representation. This content is extremely valuable for systems such as search engines, Knowledge Graph's, etc, since they enhance their predictive capabilities. Unfortunately, tables come in a large variety of shapes and sizes. Furthermore, they can have complex column/row-header configurations, multiline rows, different variety of separation lines, missing entries, etc. As such, the correct identification of the table-structure from an image is a nontrivial task. In this paper, we present a new table-structure identification model. The latter improves the latest end-toend deep learning model (i.e. encoder-dual-decoder from PubTabNet) in two significant ways. First, we introduce a new object detection decoder for table-cells. In this way, we can obtain the content of the table-cells from programmatic PDF's directly from the PDF source and avoid the training of the custom OCR decoders. This architectural change leads to more accurate table-content extraction and allows us to tackle non-english tables. Second, we replace the LSTM decoders with transformer based decoders. This upgrade improves significantly the previous state-of-the-art tree-editing-distance-score (TEDS) from 91% to 98.5% on simple tables and from 88.7% to 95% on complex tables.</caption>
|
||||
<row_0><col_0><col_header>3</col_0><col_1><col_header>1</col_1></row_0>
|
||||
</table>
|
||||
<unordered_list>
|
||||
<list_item><location><page_1><loc_52><loc_58><loc_79><loc_60></location>b. Red-annotation of bounding boxes, Blue-predictions by TableFormer</list_item>
|
||||
<doctag><page_header><loc_15><loc_131><loc_30><loc_354>arXiv:2203.01017v2 [cs.CV] 11 Mar 2022</page_header>
|
||||
<section_header_level_1><loc_79><loc_68><loc_408><loc_76>TableFormer: Table Structure Understanding with Transformers.</section_header_level_1>
|
||||
<section_header_level_1><loc_116><loc_93><loc_370><loc_108>Ahmed Nassar, Nikolaos Livathinos, Maksym Lysak, Peter Staar IBM Research</section_header_level_1>
|
||||
<text><loc_170><loc_111><loc_309><loc_116>{ ahn,nli,mly,taa } @zurich.ibm.com</text>
|
||||
<section_header_level_1><loc_119><loc_136><loc_156><loc_143>Abstract</section_header_level_1>
|
||||
<section_header_level_1><loc_258><loc_138><loc_334><loc_143>a. Picture of a table:</section_header_level_1>
|
||||
<section_header_level_1><loc_41><loc_341><loc_104><loc_348>1. Introduction</section_header_level_1>
|
||||
<text><loc_41><loc_354><loc_234><loc_450>The occurrence of tables in documents is ubiquitous. They often summarise quantitative or factual data, which is cumbersome to describe in verbose text but nevertheless extremely valuable. Unfortunately, this compact representation is often not easy to parse by machines. There are many implicit conventions used to obtain a compact table representation. For example, tables often have complex columnand row-headers in order to reduce duplicated cell content. Lines of different shapes and sizes are leveraged to separate content or indicate a tree structure. Additionally, tables can also have empty/missing table-entries or multi-row textual table-entries. Fig. 1 shows a table which presents all these issues.</text>
|
||||
<picture><loc_258><loc_144><loc_439><loc_191></picture>
|
||||
<otsl><loc_258><loc_144><loc_439><loc_191><ched>3<ched>1<nl><caption><loc_41><loc_152><loc_234><loc_324>Tables organize valuable content in a concise and compact representation. This content is extremely valuable for systems such as search engines, Knowledge Graph's, etc, since they enhance their predictive capabilities. Unfortunately, tables come in a large variety of shapes and sizes. Furthermore, they can have complex column/row-header configurations, multiline rows, different variety of separation lines, missing entries, etc. As such, the correct identification of the table-structure from an image is a nontrivial task. In this paper, we present a new table-structure identification model. The latter improves the latest end-toend deep learning model (i.e. encoder-dual-decoder from PubTabNet) in two significant ways. First, we introduce a new object detection decoder for table-cells. In this way, we can obtain the content of the table-cells from programmatic PDF's directly from the PDF source and avoid the training of the custom OCR decoders. This architectural change leads to more accurate table-content extraction and allows us to tackle non-english tables. Second, we replace the LSTM decoders with transformer based decoders. This upgrade improves significantly the previous state-of-the-art tree-editing-distance-score (TEDS) from 91% to 98.5% on simple tables and from 88.7% to 95% on complex tables.</caption></otsl>
|
||||
<unordered_list><list_item><loc_258><loc_198><loc_397><loc_210>b. Red-annotation of bounding boxes, Blue-predictions by TableFormer</list_item>
|
||||
</unordered_list>
|
||||
<figure>
|
||||
<location><page_1><loc_51><loc_48><loc_88><loc_57></location>
|
||||
</figure>
|
||||
<unordered_list>
|
||||
<list_item><location><page_1><loc_52><loc_46><loc_80><loc_47></location>c. Structure predicted by TableFormer:</list_item>
|
||||
<picture><loc_257><loc_213><loc_441><loc_259></picture>
|
||||
<unordered_list><list_item><loc_258><loc_265><loc_401><loc_271>c. Structure predicted by TableFormer:</list_item>
|
||||
</unordered_list>
|
||||
<figure>
|
||||
<location><page_1><loc_52><loc_37><loc_88><loc_45></location>
|
||||
</figure>
|
||||
<table>
|
||||
<location><page_1><loc_52><loc_37><loc_88><loc_45></location>
|
||||
<caption>Figure 1: Picture of a table with subtle, complex features such as (1) multi-column headers, (2) cell with multi-row text and (3) cells with no content. Image from PubTabNet evaluation set, filename: 'PMC2944238 004 02'.</caption>
|
||||
<row_0><col_0><col_header>0</col_0><col_1><col_header>1</col_1><col_2><col_header>1</col_2><col_3><col_header>2 1</col_3><col_4><col_header>2 1</col_4><col_5><body></col_5></row_0>
|
||||
<row_1><col_0><body>3</col_0><col_1><body>4</col_1><col_2><body>5 3</col_2><col_3><body>6</col_3><col_4><body>7</col_4><col_5><body></col_5></row_1>
|
||||
<row_2><col_0><body>8</col_0><col_1><body>9</col_1><col_2><body>10</col_2><col_3><body>11</col_3><col_4><body>12</col_4><col_5><body>2</col_5></row_2>
|
||||
<row_3><col_0><body></col_0><col_1><body>13</col_1><col_2><body>14</col_2><col_3><body>15</col_3><col_4><body>16</col_4><col_5><body>2</col_5></row_3>
|
||||
<row_4><col_0><body></col_0><col_1><body>17</col_1><col_2><body>18</col_2><col_3><body>19</col_3><col_4><body>20</col_4><col_5><body>2</col_5></row_4>
|
||||
</table>
|
||||
<text><location><page_1><loc_50><loc_16><loc_89><loc_26></location>Recently, significant progress has been made with vision based approaches to extract tables in documents. For the sake of completeness, the issue of table extraction from documents is typically decomposed into two separate challenges, i.e. (1) finding the location of the table(s) on a document-page and (2) finding the structure of a given table in the document.</text>
|
||||
<text><location><page_1><loc_50><loc_10><loc_89><loc_16></location>The first problem is called table-location and has been previously addressed [30, 38, 19, 21, 23, 26, 8] with stateof-the-art object-detection networks (e.g. YOLO and later on Mask-RCNN [9]). For all practical purposes, it can be</text>
|
||||
<text><location><page_2><loc_8><loc_88><loc_47><loc_91></location>considered as a solved problem, given enough ground-truth data to train on.</text>
|
||||
<text><location><page_2><loc_8><loc_71><loc_47><loc_87></location>The second problem is called table-structure decomposition. The latter is a long standing problem in the community of document understanding [6, 4, 14]. Contrary to the table-location problem, there are no commonly used approaches that can easily be re-purposed to solve this problem. Lately, a set of new model-architectures has been proposed by the community to address table-structure decomposition [37, 36, 18, 20]. All these models have some weaknesses (see Sec. 2). The common denominator here is the reliance on textual features and/or the inability to provide the bounding box of each table-cell in the original image.</text>
|
||||
<text><location><page_2><loc_8><loc_53><loc_47><loc_71></location>In this paper, we want to address these weaknesses and present a robust table-structure decomposition algorithm. The design criteria for our model are the following. First, we want our algorithm to be language agnostic. In this way, we can obtain the structure of any table, irregardless of the language. Second, we want our algorithm to leverage as much data as possible from the original PDF document. For programmatic PDF documents, the text-cells can often be extracted much faster and with higher accuracy compared to OCR methods. Last but not least, we want to have a direct link between the table-cell and its bounding box in the image.</text>
|
||||
<text><location><page_2><loc_8><loc_45><loc_47><loc_53></location>To meet the design criteria listed above, we developed a new model called TableFormer and a synthetically generated table structure dataset called SynthTabNet $^{1}$. In particular, our contributions in this work can be summarised as follows:</text>
|
||||
<unordered_list>
|
||||
<list_item><location><page_2><loc_10><loc_38><loc_47><loc_44></location>· We propose TableFormer , a transformer based model that predicts tables structure and bounding boxes for the table content simultaneously in an end-to-end approach.</list_item>
|
||||
<list_item><location><page_2><loc_10><loc_31><loc_47><loc_37></location>· Across all benchmark datasets TableFormer significantly outperforms existing state-of-the-art metrics, while being much more efficient in training and inference to existing works.</list_item>
|
||||
<list_item><location><page_2><loc_10><loc_25><loc_47><loc_29></location>· We present SynthTabNet a synthetically generated dataset, with various appearance styles and complexity.</list_item>
|
||||
<list_item><location><page_2><loc_10><loc_19><loc_47><loc_24></location>· An augmented dataset based on PubTabNet [37], FinTabNet [36], and TableBank [17] with generated ground-truth for reproducibility.</list_item>
|
||||
<picture><loc_258><loc_274><loc_439><loc_313></picture>
|
||||
<otsl><loc_258><loc_274><loc_439><loc_313><ched>0<ched>1<lcel><ched>2 1<lcel><ecel><nl><fcel>3<fcel>4<fcel>5 3<fcel>6<fcel>7<ecel><nl><fcel>8<fcel>9<fcel>10<fcel>11<fcel>12<fcel>2<nl><ecel><fcel>13<fcel>14<fcel>15<fcel>16<ucel><nl><ecel><fcel>17<fcel>18<fcel>19<fcel>20<ucel><nl><caption><loc_252><loc_325><loc_445><loc_353>Figure 1: Picture of a table with subtle, complex features such as (1) multi-column headers, (2) cell with multi-row text and (3) cells with no content. Image from PubTabNet evaluation set, filename: 'PMC2944238 004 02'.</caption></otsl>
|
||||
<text><loc_252><loc_369><loc_445><loc_420>Recently, significant progress has been made with vision based approaches to extract tables in documents. For the sake of completeness, the issue of table extraction from documents is typically decomposed into two separate challenges, i.e. (1) finding the location of the table(s) on a document-page and (2) finding the structure of a given table in the document.</text>
|
||||
<text><loc_252><loc_422><loc_445><loc_450>The first problem is called table-location and has been previously addressed [30, 38, 19, 21, 23, 26, 8] with stateof-the-art object-detection networks (e.g. YOLO and later on Mask-RCNN [9]). For all practical purposes, it can be</text>
|
||||
<page_footer><loc_241><loc_463><loc_245><loc_469>1</page_footer>
|
||||
<page_break>
|
||||
<text><loc_41><loc_47><loc_234><loc_61>considered as a solved problem, given enough ground-truth data to train on.</text>
|
||||
<text><loc_41><loc_63><loc_234><loc_144>The second problem is called table-structure decomposition. The latter is a long standing problem in the community of document understanding [6, 4, 14]. Contrary to the table-location problem, there are no commonly used approaches that can easily be re-purposed to solve this problem. Lately, a set of new model-architectures has been proposed by the community to address table-structure decomposition [37, 36, 18, 20]. All these models have some weaknesses (see Sec. 2). The common denominator here is the reliance on textual features and/or the inability to provide the bounding box of each table-cell in the original image.</text>
|
||||
<text><loc_41><loc_146><loc_234><loc_235>In this paper, we want to address these weaknesses and present a robust table-structure decomposition algorithm. The design criteria for our model are the following. First, we want our algorithm to be language agnostic. In this way, we can obtain the structure of any table, irregardless of the language. Second, we want our algorithm to leverage as much data as possible from the original PDF document. For programmatic PDF documents, the text-cells can often be extracted much faster and with higher accuracy compared to OCR methods. Last but not least, we want to have a direct link between the table-cell and its bounding box in the image.</text>
|
||||
<text><loc_41><loc_237><loc_234><loc_273>To meet the design criteria listed above, we developed a new model called TableFormer and a synthetically generated table structure dataset called SynthTabNet $^{1}$. In particular, our contributions in this work can be summarised as follows:</text>
|
||||
<unordered_list><list_item><loc_50><loc_281><loc_234><loc_309>· We propose TableFormer , a transformer based model that predicts tables structure and bounding boxes for the table content simultaneously in an end-to-end approach.</list_item>
|
||||
<list_item><loc_50><loc_317><loc_234><loc_345>· Across all benchmark datasets TableFormer significantly outperforms existing state-of-the-art metrics, while being much more efficient in training and inference to existing works.</list_item>
|
||||
<list_item><loc_50><loc_353><loc_234><loc_374>· We present SynthTabNet a synthetically generated dataset, with various appearance styles and complexity.</list_item>
|
||||
<list_item><loc_50><loc_382><loc_234><loc_403>· An augmented dataset based on PubTabNet [37], FinTabNet [36], and TableBank [17] with generated ground-truth for reproducibility.</list_item>
|
||||
</unordered_list>
|
||||
<text><location><page_2><loc_8><loc_12><loc_47><loc_18></location>The paper is structured as follows. In Sec. 2, we give a brief overview of the current state-of-the-art. In Sec. 3, we describe the datasets on which we train. In Sec. 4, we introduce the TableFormer model-architecture and describe</text>
|
||||
<text><location><page_2><loc_50><loc_86><loc_89><loc_91></location>its results & performance in Sec. 5. As a conclusion, we describe how this new model-architecture can be re-purposed for other tasks in the computer-vision community.</text>
|
||||
<section_header_level_1><location><page_2><loc_50><loc_83><loc_81><loc_85></location>2. Previous work and State of the Art</section_header_level_1>
|
||||
<text><location><page_2><loc_50><loc_58><loc_89><loc_82></location>Identifying the structure of a table has been an outstanding problem in the document-parsing community, that motivates many organised public challenges [6, 4, 14]. The difficulty of the problem can be attributed to a number of factors. First, there is a large variety in the shapes and sizes of tables. Such large variety requires a flexible method. This is especially true for complex column- and row headers, which can be extremely intricate and demanding. A second factor of complexity is the lack of data with regard to table-structure. Until the publication of PubTabNet [37], there were no large datasets (i.e. > 100 K tables) that provided structure information. This happens primarily due to the fact that tables are notoriously time-consuming to annotate by hand. However, this has definitely changed in recent years with the deliverance of PubTabNet [37], FinTabNet [36], TableBank [17] etc.</text>
|
||||
<text><location><page_2><loc_50><loc_43><loc_89><loc_58></location>Before the rising popularity of deep neural networks, the community relied heavily on heuristic and/or statistical methods to do table structure identification [3, 7, 11, 5, 13, 28]. Although such methods work well on constrained tables [12], a more data-driven approach can be applied due to the advent of convolutional neural networks (CNNs) and the availability of large datasets. To the best-of-our knowledge, there are currently two different types of network architecture that are being pursued for state-of-the-art tablestructure identification.</text>
|
||||
<text><location><page_2><loc_50><loc_10><loc_89><loc_43></location>Image-to-Text networks : In this type of network, one predicts a sequence of tokens starting from an encoded image. Such sequences of tokens can be HTML table tags [37, 17] or LaTeX symbols[10]. The choice of symbols is ultimately not very important, since one can be transformed into the other. There are however subtle variations in the Image-to-Text networks. The easiest network architectures are "image-encoder → text-decoder" (IETD), similar to network architectures that try to provide captions to images [32]. In these IETD networks, one expects as output the LaTeX/HTML string of the entire table, i.e. the symbols necessary for creating the table with the content of the table. Another approach is the "image-encoder → dual decoder" (IEDD) networks. In these type of networks, one has two consecutive decoders with different purposes. The first decoder is the tag-decoder , i.e. it only produces the HTML/LaTeX tags which construct an empty table. The second content-decoder uses the encoding of the image in combination with the output encoding of each cell-tag (from the tag-decoder ) to generate the textual content of each table cell. The network architecture of IEDD is certainly more elaborate, but it has the advantage that one can pre-train the</text>
|
||||
<text><location><page_3><loc_8><loc_89><loc_41><loc_91></location>tag-decoder which is constrained to the table-tags.</text>
|
||||
<text><location><page_3><loc_8><loc_65><loc_47><loc_89></location>In practice, both network architectures (IETD and IEDD) require an implicit, custom trained object-characterrecognition (OCR) to obtain the content of the table-cells. In the case of IETD, this OCR engine is implicit in the decoder similar to [24]. For the IEDD, the OCR is solely embedded in the content-decoder. This reliance on a custom, implicit OCR decoder is of course problematic. OCR is a well known and extremely tough problem, that often needs custom training for each individual language. However, the limited availability for non-english content in the current datasets, makes it impractical to apply the IETD and IEDD methods on tables with other languages. Additionally, OCR can be completely omitted if the tables originate from programmatic PDF documents with known positions of each cell. The latter was the inspiration for the work of this paper.</text>
|
||||
<text><location><page_3><loc_8><loc_38><loc_47><loc_65></location>Graph Neural networks : Graph Neural networks (GNN's) take a radically different approach to tablestructure extraction. Note that one table cell can constitute out of multiple text-cells. To obtain the table-structure, one creates an initial graph, where each of the text-cells becomes a node in the graph similar to [33, 34, 2]. Each node is then associated with en embedding vector coming from the encoded image, its coordinates and the encoded text. Furthermore, nodes that represent adjacent text-cells are linked. Graph Convolutional Networks (GCN's) based methods take the image as an input, but also the position of the text-cells and their content [18]. The purpose of a GCN is to transform the input graph into a new graph, which replaces the old links with new ones. The new links then represent the table-structure. With this approach, one can avoid the need to build custom OCR decoders. However, the quality of the reconstructed structure is not comparable to the current state-of-the-art [18].</text>
|
||||
<text><location><page_3><loc_8><loc_21><loc_47><loc_38></location>Hybrid Deep Learning-Rule-Based approach : A popular current model for table-structure identification is the use of a hybrid Deep Learning-Rule-Based approach similar to [27, 29]. In this approach, one first detects the position of the table-cells with object detection (e.g. YoloVx or MaskRCNN), then classifies the table into different types (from its images) and finally uses different rule-sets to obtain its table-structure. Currently, this approach achieves stateof-the-art results, but is not an end-to-end deep-learning method. As such, new rules need to be written if different types of tables are encountered.</text>
|
||||
<section_header_level_1><location><page_3><loc_8><loc_18><loc_17><loc_20></location>3. Datasets</section_header_level_1>
|
||||
<text><location><page_3><loc_8><loc_10><loc_47><loc_17></location>We rely on large-scale datasets such as PubTabNet [37], FinTabNet [36], and TableBank [17] datasets to train and evaluate our models. These datasets span over various appearance styles and content. We also introduce our own synthetically generated SynthTabNet dataset to fix an im-</text>
|
||||
<figure>
|
||||
<location><page_3><loc_51><loc_68><loc_90><loc_90></location>
|
||||
<caption>Figure 2: Distribution of the tables across different table dimensions in PubTabNet + FinTabNet datasets</caption>
|
||||
</figure>
|
||||
<text><location><page_3><loc_50><loc_59><loc_71><loc_60></location>balance in the previous datasets.</text>
|
||||
<text><location><page_3><loc_50><loc_21><loc_89><loc_58></location>The PubTabNet dataset contains 509k tables delivered as annotated PNG images. The annotations consist of the table structure represented in HTML format, the tokenized text and its bounding boxes per table cell. Fig. 1 shows the appearance style of PubTabNet. Depending on its complexity, a table is characterized as "simple" when it does not contain row spans or column spans, otherwise it is "complex". The dataset is divided into Train and Val splits (roughly 98% and 2%). The Train split consists of 54% simple and 46% complex tables and the Val split of 51% and 49% respectively. The FinTabNet dataset contains 112k tables delivered as single-page PDF documents with mixed table structures and text content. Similarly to the PubTabNet, the annotations of FinTabNet include the table structure in HTML, the tokenized text and the bounding boxes on a table cell basis. The dataset is divided into Train, Test and Val splits (81%, 9.5%, 9.5%), and each one is almost equally divided into simple and complex tables (Train: 48% simple, 52% complex, Test: 48% simple, 52% complex, Test: 53% simple, 47% complex). Finally the TableBank dataset consists of 145k tables provided as JPEG images. The latter has annotations for the table structure, but only few with bounding boxes of the table cells. The entire dataset consists of simple tables and it is divided into 90% Train, 3% Test and 7% Val splits.</text>
|
||||
<text><location><page_3><loc_50><loc_10><loc_89><loc_20></location>Due to the heterogeneity across the dataset formats, it was necessary to combine all available data into one homogenized dataset before we could train our models for practical purposes. Given the size of PubTabNet, we adopted its annotation format and we extracted and converted all tables as PNG images with a resolution of 72 dpi. Additionally, we have filtered out tables with extreme sizes due to small</text>
|
||||
<text><location><page_4><loc_8><loc_88><loc_47><loc_91></location>amount of such tables, and kept only those ones ranging between 1*1 and 20*10 (rows/columns).</text>
|
||||
<text><location><page_4><loc_8><loc_60><loc_47><loc_87></location>The availability of the bounding boxes for all table cells is essential to train our models. In order to distinguish between empty and non-empty bounding boxes, we have introduced a binary class in the annotation. Unfortunately, the original datasets either omit the bounding boxes for whole tables (e.g. TableBank) or they narrow their scope only to non-empty cells. Therefore, it was imperative to introduce a data pre-processing procedure that generates the missing bounding boxes out of the annotation information. This procedure first parses the provided table structure and calculates the dimensions of the most fine-grained grid that covers the table structure. Notice that each table cell may occupy multiple grid squares due to row or column spans. In case of PubTabNet we had to compute missing bounding boxes for 48% of the simple and 69% of the complex tables. Regarding FinTabNet, 68% of the simple and 98% of the complex tables require the generation of bounding boxes.</text>
|
||||
<text><location><page_4><loc_8><loc_45><loc_47><loc_60></location>As it is illustrated in Fig. 2, the table distributions from all datasets are skewed towards simpler structures with fewer number of rows/columns. Additionally, there is very limited variance in the table styles, which in case of PubTabNet and FinTabNet means one styling format for the majority of the tables. Similar limitations appear also in the type of table content, which in some cases (e.g. FinTabNet) is restricted to a certain domain. Ultimately, the lack of diversity in the training dataset damages the ability of the models to generalize well on unseen data.</text>
|
||||
<text><location><page_4><loc_8><loc_21><loc_47><loc_45></location>Motivated by those observations we aimed at generating a synthetic table dataset named SynthTabNet . This approach offers control over: 1) the size of the dataset, 2) the table structure, 3) the table style and 4) the type of content. The complexity of the table structure is described by the size of the table header and the table body, as well as the percentage of the table cells covered by row spans and column spans. A set of carefully designed styling templates provides the basis to build a wide range of table appearances. Lastly, the table content is generated out of a curated collection of text corpora. By controlling the size and scope of the synthetic datasets we are able to train and evaluate our models in a variety of different conditions. For example, we can first generate a highly diverse dataset to train our models and then evaluate their performance on other synthetic datasets which are focused on a specific domain.</text>
|
||||
<text><location><page_4><loc_8><loc_10><loc_47><loc_20></location>In this regard, we have prepared four synthetic datasets, each one containing 150k examples. The corpora to generate the table text consists of the most frequent terms appearing in PubTabNet and FinTabNet together with randomly generated text. The first two synthetic datasets have been fine-tuned to mimic the appearance of the original datasets but encompass more complicated table structures. The third</text>
|
||||
<table>
|
||||
<location><page_4><loc_51><loc_80><loc_89><loc_91></location>
|
||||
<caption>Table 1: Both "Combined-Tabnet" and "CombinedTabnet" are variations of the following: (*) The CombinedTabnet dataset is the processed combination of PubTabNet and Fintabnet. (**) The combined dataset is the processed combination of PubTabNet, Fintabnet and TableBank.</caption>
|
||||
<row_0><col_0><body></col_0><col_1><col_header>Tags</col_1><col_2><col_header>Bbox</col_2><col_3><col_header>Size</col_3><col_4><col_header>Format</col_4></row_0>
|
||||
<row_1><col_0><row_header>PubTabNet</col_0><col_1><body>3</col_1><col_2><body>3</col_2><col_3><body>509k</col_3><col_4><body>PNG</col_4></row_1>
|
||||
<row_2><col_0><row_header>FinTabNet</col_0><col_1><body>3</col_1><col_2><body>3</col_2><col_3><body>112k</col_3><col_4><body>PDF</col_4></row_2>
|
||||
<row_3><col_0><row_header>TableBank</col_0><col_1><body>3</col_1><col_2><body>7</col_2><col_3><body>145k</col_3><col_4><body>JPEG</col_4></row_3>
|
||||
<row_4><col_0><row_header>Combined-Tabnet(*)</col_0><col_1><body>3</col_1><col_2><body>3</col_2><col_3><body>400k</col_3><col_4><body>PNG</col_4></row_4>
|
||||
<row_5><col_0><row_header>Combined(**)</col_0><col_1><body>3</col_1><col_2><body>3</col_2><col_3><body>500k</col_3><col_4><body>PNG</col_4></row_5>
|
||||
<row_6><col_0><row_header>SynthTabNet</col_0><col_1><body>3</col_1><col_2><body>3</col_2><col_3><body>600k</col_3><col_4><body>PNG</col_4></row_6>
|
||||
</table>
|
||||
<text><location><page_4><loc_50><loc_63><loc_89><loc_68></location>one adopts a colorful appearance with high contrast and the last one contains tables with sparse content. Lastly, we have combined all synthetic datasets into one big unified synthetic dataset of 600k examples.</text>
|
||||
<text><location><page_4><loc_52><loc_61><loc_89><loc_62></location>Tab. 1 summarizes the various attributes of the datasets.</text>
|
||||
<section_header_level_1><location><page_4><loc_50><loc_58><loc_73><loc_59></location>4. The TableFormer model</section_header_level_1>
|
||||
<text><location><page_4><loc_50><loc_44><loc_89><loc_57></location>Given the image of a table, TableFormer is able to predict: 1) a sequence of tokens that represent the structure of a table, and 2) a bounding box coupled to a subset of those tokens. The conversion of an image into a sequence of tokens is a well-known task [35, 16]. While attention is often used as an implicit method to associate each token of the sequence with a position in the original image, an explicit association between the individual table-cells and the image bounding boxes is also required.</text>
|
||||
<section_header_level_1><location><page_4><loc_50><loc_41><loc_69><loc_42></location>4.1. Model architecture.</section_header_level_1>
|
||||
<text><location><page_4><loc_50><loc_16><loc_89><loc_40></location>We now describe in detail the proposed method, which is composed of three main components, see Fig. 4. Our CNN Backbone Network encodes the input as a feature vector of predefined length. The input feature vector of the encoded image is passed to the Structure Decoder to produce a sequence of HTML tags that represent the structure of the table. With each prediction of an HTML standard data cell (' < td > ') the hidden state of that cell is passed to the Cell BBox Decoder. As for spanning cells, such as row or column span, the tag is broken down to ' < ', 'rowspan=' or 'colspan=', with the number of spanning cells (attribute), and ' > '. The hidden state attached to ' < ' is passed to the Cell BBox Decoder. A shared feed forward network (FFN) receives the hidden states from the Structure Decoder, to provide the final detection predictions of the bounding box coordinates and their classification.</text>
|
||||
<text><location><page_4><loc_50><loc_10><loc_89><loc_16></location>CNN Backbone Network. A ResNet-18 CNN is the backbone that receives the table image and encodes it as a vector of predefined length. The network has been modified by removing the linear and pooling layer, as we are not per-</text>
|
||||
<figure>
|
||||
<location><page_5><loc_12><loc_77><loc_85><loc_90></location>
|
||||
<caption>Figure 3: TableFormer takes in an image of the PDF and creates bounding box and HTML structure predictions that are synchronized. The bounding boxes grabs the content from the PDF and inserts it in the structure.</caption>
|
||||
</figure>
|
||||
<figure>
|
||||
<location><page_5><loc_9><loc_36><loc_47><loc_67></location>
|
||||
<caption>Figure 4: Given an input image of a table, the Encoder produces fixed-length features that represent the input image. The features are then passed to both the Structure Decoder and Cell BBox Decoder . During training, the Structure Decoder receives 'tokenized tags' of the HTML code that represent the table structure. Afterwards, a transformer encoder and decoder architecture is employed to produce features that are received by a linear layer, and the Cell BBox Decoder. The linear layer is applied to the features to predict the tags. Simultaneously, the Cell BBox Decoder selects features referring to the data cells (' < td > ', ' < ') and passes them through an attention network, an MLP, and a linear layer to predict the bounding boxes.</caption>
|
||||
</figure>
|
||||
<text><location><page_5><loc_50><loc_63><loc_89><loc_68></location>forming classification, and adding an adaptive pooling layer of size 28*28. ResNet by default downsamples the image resolution by 32 and then the encoded image is provided to both the Structure Decoder , and Cell BBox Decoder .</text>
|
||||
<text><location><page_5><loc_50><loc_48><loc_89><loc_62></location>Structure Decoder. The transformer architecture of this component is based on the work proposed in [31]. After extensive experimentation, the Structure Decoder is modeled as a transformer encoder with two encoder layers and a transformer decoder made from a stack of 4 decoder layers that comprise mainly of multi-head attention and feed forward layers. This configuration uses fewer layers and heads in comparison to networks applied to other problems (e.g. "Scene Understanding", "Image Captioning"), something which we relate to the simplicity of table images.</text>
|
||||
<text><location><page_5><loc_50><loc_31><loc_89><loc_47></location>The transformer encoder receives an encoded image from the CNN Backbone Network and refines it through a multi-head dot-product attention layer, followed by a Feed Forward Network. During training, the transformer decoder receives as input the output feature produced by the transformer encoder, and the tokenized input of the HTML ground-truth tags. Using a stack of multi-head attention layers, different aspects of the tag sequence could be inferred. This is achieved by each attention head on a layer operating in a different subspace, and then combining altogether their attention score.</text>
|
||||
<text><location><page_5><loc_50><loc_18><loc_89><loc_31></location>Cell BBox Decoder. Our architecture allows to simultaneously predict HTML tags and bounding boxes for each table cell without the need of a separate object detector end to end. This approach is inspired by DETR [1] which employs a Transformer Encoder, and Decoder that looks for a specific number of object queries (potential object detections). As our model utilizes a transformer architecture, the hidden state of the < td > ' and ' < ' HTML structure tags become the object query.</text>
|
||||
<text><location><page_5><loc_50><loc_10><loc_89><loc_17></location>The encoding generated by the CNN Backbone Network along with the features acquired for every data cell from the Transformer Decoder are then passed to the attention network. The attention network takes both inputs and learns to provide an attention weighted encoding. This weighted at-</text>
|
||||
<text><location><page_6><loc_8><loc_80><loc_47><loc_91></location>tention encoding is then multiplied to the encoded image to produce a feature for each table cell. Notice that this is different than the typical object detection problem where imbalances between the number of detections and the amount of objects may exist. In our case, we know up front that the produced detections always match with the table cells in number and correspondence.</text>
|
||||
<text><location><page_6><loc_8><loc_70><loc_47><loc_80></location>The output features for each table cell are then fed into the feed-forward network (FFN). The FFN consists of a Multi-Layer Perceptron (3 layers with ReLU activation function) that predicts the normalized coordinates for the bounding box of each table cell. Finally, the predicted bounding boxes are classified based on whether they are empty or not using a linear layer.</text>
|
||||
<text><location><page_6><loc_8><loc_44><loc_47><loc_69></location>Loss Functions. We formulate a multi-task loss Eq. 2 to train our network. The Cross-Entropy loss (denoted as l$_{s}$ ) is used to train the Structure Decoder which predicts the structure tokens. As for the Cell BBox Decoder it is trained with a combination of losses denoted as l$_{box}$ . l$_{box}$ consists of the generally used l$_{1}$ loss for object detection and the IoU loss ( l$_{iou}$ ) to be scale invariant as explained in [25]. In comparison to DETR, we do not use the Hungarian algorithm [15] to match the predicted bounding boxes with the ground-truth boxes, as we have already achieved a one-toone match through two steps: 1) Our token input sequence is naturally ordered, therefore the hidden states of the table data cells are also in order when they are provided as input to the Cell BBox Decoder , and 2) Our bounding boxes generation mechanism (see Sec. 3) ensures a one-to-one mapping between the cell content and its bounding box for all post-processed datasets.</text>
|
||||
<text><location><page_6><loc_8><loc_41><loc_47><loc_43></location>The loss used to train the TableFormer can be defined as following:</text>
|
||||
<formula><location><page_6><loc_20><loc_35><loc_47><loc_38></location></formula>
|
||||
<text><location><page_6><loc_8><loc_32><loc_46><loc_33></location>where λ ∈ [0, 1], and λ$_{iou}$, λ$_{l}$$_{1}$ ∈$_{R}$ are hyper-parameters.</text>
|
||||
<section_header_level_1><location><page_6><loc_8><loc_28><loc_28><loc_30></location>5. Experimental Results</section_header_level_1>
|
||||
<section_header_level_1><location><page_6><loc_8><loc_26><loc_29><loc_27></location>5.1. Implementation Details</section_header_level_1>
|
||||
<text><location><page_6><loc_8><loc_19><loc_47><loc_25></location>TableFormer uses ResNet-18 as the CNN Backbone Network . The input images are resized to 448*448 pixels and the feature map has a dimension of 28*28. Additionally, we enforce the following input constraints:</text>
|
||||
<formula><location><page_6><loc_15><loc_14><loc_47><loc_17></location></formula>
|
||||
<text><location><page_6><loc_8><loc_10><loc_47><loc_13></location>Although input constraints are used also by other methods, such as EDD, ours are less restrictive due to the improved</text>
|
||||
<text><location><page_6><loc_50><loc_86><loc_89><loc_91></location>runtime performance and lower memory footprint of TableFormer. This allows to utilize input samples with longer sequences and images with larger dimensions.</text>
|
||||
<text><location><page_6><loc_50><loc_59><loc_89><loc_85></location>The Transformer Encoder consists of two "Transformer Encoder Layers", with an input feature size of 512, feed forward network of 1024, and 4 attention heads. As for the Transformer Decoder it is composed of four "Transformer Decoder Layers" with similar input and output dimensions as the "Transformer Encoder Layers". Even though our model uses fewer layers and heads than the default implementation parameters, our extensive experimentation has proved this setup to be more suitable for table images. We attribute this finding to the inherent design of table images, which contain mostly lines and text, unlike the more elaborate content present in other scopes (e.g. the COCO dataset). Moreover, we have added ResNet blocks to the inputs of the Structure Decoder and Cell BBox Decoder. This prevents a decoder having a stronger influence over the learned weights which would damage the other prediction task (structure vs bounding boxes), but learn task specific weights instead. Lastly our dropout layers are set to 0.5.</text>
|
||||
<text><location><page_6><loc_50><loc_46><loc_89><loc_58></location>For training, TableFormer is trained with 3 Adam optimizers, each one for the CNN Backbone Network , Structure Decoder , and Cell BBox Decoder . Taking the PubTabNet as an example for our parameter set up, the initializing learning rate is 0.001 for 12 epochs with a batch size of 24, and λ set to 0.5. Afterwards, we reduce the learning rate to 0.0001, the batch size to 18 and train for 12 more epochs or convergence.</text>
|
||||
<text><location><page_6><loc_50><loc_30><loc_89><loc_45></location>TableFormer is implemented with PyTorch and Torchvision libraries [22]. To speed up the inference, the image undergoes a single forward pass through the CNN Backbone Network and transformer encoder. This eliminates the overhead of generating the same features for each decoding step. Similarly, we employ a 'caching' technique to preform faster autoregressive decoding. This is achieved by storing the features of decoded tokens so we can reuse them for each time step. Therefore, we only compute the attention for each new tag.</text>
|
||||
<section_header_level_1><location><page_6><loc_50><loc_26><loc_65><loc_27></location>5.2. Generalization</section_header_level_1>
|
||||
<text><location><page_6><loc_50><loc_15><loc_89><loc_24></location>TableFormer is evaluated on three major publicly available datasets of different nature to prove the generalization and effectiveness of our model. The datasets used for evaluation are the PubTabNet, FinTabNet and TableBank which stem from the scientific, financial and general domains respectively.</text>
|
||||
<text><location><page_6><loc_50><loc_10><loc_89><loc_14></location>We also share our baseline results on the challenging SynthTabNet dataset. Throughout our experiments, the same parameters stated in Sec. 5.1 are utilized.</text>
|
||||
<section_header_level_1><location><page_7><loc_8><loc_89><loc_27><loc_91></location>5.3. Datasets and Metrics</section_header_level_1>
|
||||
<text><location><page_7><loc_8><loc_83><loc_47><loc_88></location>The Tree-Edit-Distance-Based Similarity (TEDS) metric was introduced in [37]. It represents the prediction, and ground-truth as a tree structure of HTML tags. This similarity is calculated as:</text>
|
||||
<formula><location><page_7><loc_14><loc_78><loc_47><loc_81></location></formula>
|
||||
<text><location><page_7><loc_8><loc_73><loc_47><loc_77></location>where T$_{a}$ and T$_{b}$ represent tables in tree structure HTML format. EditDist denotes the tree-edit distance, and | T | represents the number of nodes in T .</text>
|
||||
<section_header_level_1><location><page_7><loc_8><loc_70><loc_28><loc_72></location>5.4. Quantitative Analysis</section_header_level_1>
|
||||
<text><location><page_7><loc_8><loc_50><loc_47><loc_69></location>Structure. As shown in Tab. 2, TableFormer outperforms all SOTA methods across different datasets by a large margin for predicting the table structure from an image. All the more, our model outperforms pre-trained methods. During the evaluation we do not apply any table filtering. We also provide our baseline results on the SynthTabNet dataset. It has been observed that large tables (e.g. tables that occupy half of the page or more) yield poor predictions. We attribute this issue to the image resizing during the preprocessing step, that produces downsampled images with indistinguishable features. This problem can be addressed by treating such big tables with a separate model which accepts a large input image size.</text>
|
||||
<table>
|
||||
<location><page_7><loc_9><loc_26><loc_46><loc_48></location>
|
||||
<caption>Table 2: Structure results on PubTabNet (PTN), FinTabNet (FTN), TableBank (TB) and SynthTabNet (STN).</caption>
|
||||
<row_0><col_0><col_header>Model</col_0><col_1><col_header>Dataset</col_1><col_2><col_header>Simple</col_2><col_3><col_header>TEDS Complex</col_3><col_4><col_header>All</col_4></row_0>
|
||||
<row_1><col_0><row_header>EDD</col_0><col_1><body>PTN</col_1><col_2><body>91.1</col_2><col_3><body>88.7</col_3><col_4><body>89.9</col_4></row_1>
|
||||
<row_2><col_0><row_header>GTE</col_0><col_1><body>PTN</col_1><col_2><body>-</col_2><col_3><body>-</col_3><col_4><body>93.01</col_4></row_2>
|
||||
<row_3><col_0><row_header>TableFormer</col_0><col_1><body>PTN</col_1><col_2><body>98.5</col_2><col_3><body>95.0</col_3><col_4><body>96.75</col_4></row_3>
|
||||
<row_4><col_0><row_header>EDD</col_0><col_1><body>FTN</col_1><col_2><body>88.4</col_2><col_3><body>92.08</col_3><col_4><body>90.6</col_4></row_4>
|
||||
<row_5><col_0><row_header>GTE</col_0><col_1><body>FTN</col_1><col_2><body>-</col_2><col_3><body>-</col_3><col_4><body>87.14</col_4></row_5>
|
||||
<row_6><col_0><row_header>GTE (FT)</col_0><col_1><body>FTN</col_1><col_2><body>-</col_2><col_3><body>-</col_3><col_4><body>91.02</col_4></row_6>
|
||||
<row_7><col_0><row_header>TableFormer</col_0><col_1><body>FTN</col_1><col_2><body>97.5</col_2><col_3><body>96.0</col_3><col_4><body>96.8</col_4></row_7>
|
||||
<row_8><col_0><row_header>EDD</col_0><col_1><body>TB</col_1><col_2><body>86.0</col_2><col_3><body>-</col_3><col_4><body>86.0</col_4></row_8>
|
||||
<row_9><col_0><row_header>TableFormer</col_0><col_1><body>TB</col_1><col_2><body>89.6</col_2><col_3><body>-</col_3><col_4><body>89.6</col_4></row_9>
|
||||
<row_10><col_0><row_header>TableFormer</col_0><col_1><body>STN</col_1><col_2><body>96.9</col_2><col_3><body>95.7</col_3><col_4><body>96.7</col_4></row_10>
|
||||
</table>
|
||||
<text><location><page_7><loc_8><loc_21><loc_43><loc_22></location>FT: Model was trained on PubTabNet then finetuned.</text>
|
||||
<text><location><page_7><loc_8><loc_10><loc_47><loc_19></location>Cell Detection. Like any object detector, our Cell BBox Detector provides bounding boxes that can be improved with post-processing during inference. We make use of the grid-like structure of tables to refine the predictions. A detailed explanation on the post-processing is available in the supplementary material. As shown in Tab. 3, we evaluate</text>
|
||||
<text><location><page_7><loc_50><loc_71><loc_89><loc_91></location>our Cell BBox Decoder accuracy for cells with a class label of 'content' only using the PASCAL VOC mAP metric for pre-processing and post-processing. Note that we do not have post-processing results for SynthTabNet as images are only provided. To compare the performance of our proposed approach, we've integrated TableFormer's Cell BBox Decoder into EDD architecture. As mentioned previously, the Structure Decoder provides the Cell BBox Decoder with the features needed to predict the bounding box predictions. Therefore, the accuracy of the Structure Decoder directly influences the accuracy of the Cell BBox Decoder . If the Structure Decoder predicts an extra column, this will result in an extra column of predicted bounding boxes.</text>
|
||||
<table>
|
||||
<location><page_7><loc_50><loc_62><loc_87><loc_69></location>
|
||||
<caption>Table 3: Cell Bounding Box detection results on PubTabNet, and FinTabNet. PP: Post-processing.</caption>
|
||||
<row_0><col_0><col_header>Model</col_0><col_1><col_header>Dataset</col_1><col_2><col_header>mAP</col_2><col_3><col_header>mAP (PP)</col_3></row_0>
|
||||
<row_1><col_0><body>EDD+BBox</col_0><col_1><body>PubTabNet</col_1><col_2><body>79.2</col_2><col_3><body>82.7</col_3></row_1>
|
||||
<row_2><col_0><body>TableFormer</col_0><col_1><body>PubTabNet</col_1><col_2><body>82.1</col_2><col_3><body>86.8</col_3></row_2>
|
||||
<row_3><col_0><body>TableFormer</col_0><col_1><body>SynthTabNet</col_1><col_2><body>87.7</col_2><col_3><body>-</col_3></row_3>
|
||||
</table>
|
||||
<text><location><page_7><loc_50><loc_34><loc_89><loc_54></location>Cell Content. In this section, we evaluate the entire pipeline of recovering a table with content. Here we put our approach to test by capitalizing on extracting content from the PDF cells rather than decoding from images. Tab. 4 shows the TEDs score of HTML code representing the structure of the table along with the content inserted in the data cell and compared with the ground-truth. Our method achieved a 5.3% increase over the state-of-the-art, and commercial solutions. We believe our scores would be higher if the HTML ground-truth matched the extracted PDF cell content. Unfortunately, there are small discrepancies such as spacings around words or special characters with various unicode representations.</text>
|
||||
<table>
|
||||
<location><page_7><loc_54><loc_19><loc_85><loc_32></location>
|
||||
<caption>Table 4: Results of structure with content retrieved using cell detection on PubTabNet. In all cases the input is PDF documents with cropped tables.</caption>
|
||||
<row_0><col_0><body>Model</col_0><col_1><col_header>Simple</col_1><col_2><col_header>TEDS Complex</col_2><col_3><col_header>All</col_3></row_0>
|
||||
<row_1><col_0><row_header>Tabula</col_0><col_1><body>78.0</col_1><col_2><body>57.8</col_2><col_3><body>67.9</col_3></row_1>
|
||||
<row_2><col_0><row_header>Traprange</col_0><col_1><body>60.8</col_1><col_2><body>49.9</col_2><col_3><body>55.4</col_3></row_2>
|
||||
<row_3><col_0><row_header>Camelot</col_0><col_1><body>80.0</col_1><col_2><body>66.0</col_2><col_3><body>73.0</col_3></row_3>
|
||||
<row_4><col_0><row_header>Acrobat Pro</col_0><col_1><body>68.9</col_1><col_2><body>61.8</col_2><col_3><body>65.3</col_3></row_4>
|
||||
<row_5><col_0><row_header>EDD</col_0><col_1><body>91.2</col_1><col_2><body>85.4</col_2><col_3><body>88.3</col_3></row_5>
|
||||
<row_6><col_0><row_header>TableFormer</col_0><col_1><body>95.4</col_1><col_2><body>90.1</col_2><col_3><body>93.6</col_3></row_6>
|
||||
</table>
|
||||
<unordered_list>
|
||||
<list_item><location><page_8><loc_9><loc_89><loc_10><loc_90></location>a.</list_item>
|
||||
<list_item><location><page_8><loc_11><loc_89><loc_82><loc_90></location>Red - PDF cells, Green - predicted bounding boxes, Blue - post-processed predictions matched to PDF cells</list_item>
|
||||
<text><loc_41><loc_411><loc_234><loc_439>The paper is structured as follows. In Sec. 2, we give a brief overview of the current state-of-the-art. In Sec. 3, we describe the datasets on which we train. In Sec. 4, we introduce the TableFormer model-architecture and describe</text>
|
||||
<footnote><loc_50><loc_445><loc_150><loc_450>$^{1}$https://github.com/IBM/SynthTabNet</footnote>
|
||||
<page_footer><loc_241><loc_463><loc_245><loc_469>2</page_footer>
|
||||
<text><loc_252><loc_47><loc_445><loc_68>its results & performance in Sec. 5. As a conclusion, we describe how this new model-architecture can be re-purposed for other tasks in the computer-vision community.</text>
|
||||
<section_header_level_1><loc_252><loc_77><loc_407><loc_84>2. Previous work and State of the Art</section_header_level_1>
|
||||
<text><loc_252><loc_90><loc_445><loc_209>Identifying the structure of a table has been an outstanding problem in the document-parsing community, that motivates many organised public challenges [6, 4, 14]. The difficulty of the problem can be attributed to a number of factors. First, there is a large variety in the shapes and sizes of tables. Such large variety requires a flexible method. This is especially true for complex column- and row headers, which can be extremely intricate and demanding. A second factor of complexity is the lack of data with regard to table-structure. Until the publication of PubTabNet [37], there were no large datasets (i.e. > 100 K tables) that provided structure information. This happens primarily due to the fact that tables are notoriously time-consuming to annotate by hand. However, this has definitely changed in recent years with the deliverance of PubTabNet [37], FinTabNet [36], TableBank [17] etc.</text>
|
||||
<text><loc_252><loc_211><loc_445><loc_284>Before the rising popularity of deep neural networks, the community relied heavily on heuristic and/or statistical methods to do table structure identification [3, 7, 11, 5, 13, 28]. Although such methods work well on constrained tables [12], a more data-driven approach can be applied due to the advent of convolutional neural networks (CNNs) and the availability of large datasets. To the best-of-our knowledge, there are currently two different types of network architecture that are being pursued for state-of-the-art tablestructure identification.</text>
|
||||
<text><loc_252><loc_286><loc_445><loc_450>Image-to-Text networks : In this type of network, one predicts a sequence of tokens starting from an encoded image. Such sequences of tokens can be HTML table tags [37, 17] or LaTeX symbols[10]. The choice of symbols is ultimately not very important, since one can be transformed into the other. There are however subtle variations in the Image-to-Text networks. The easiest network architectures are "image-encoder → text-decoder" (IETD), similar to network architectures that try to provide captions to images [32]. In these IETD networks, one expects as output the LaTeX/HTML string of the entire table, i.e. the symbols necessary for creating the table with the content of the table. Another approach is the "image-encoder → dual decoder" (IEDD) networks. In these type of networks, one has two consecutive decoders with different purposes. The first decoder is the tag-decoder , i.e. it only produces the HTML/LaTeX tags which construct an empty table. The second content-decoder uses the encoding of the image in combination with the output encoding of each cell-tag (from the tag-decoder ) to generate the textual content of each table cell. The network architecture of IEDD is certainly more elaborate, but it has the advantage that one can pre-train the</text>
|
||||
<page_break>
|
||||
<text><loc_41><loc_47><loc_204><loc_53>tag-decoder which is constrained to the table-tags.</text>
|
||||
<text><loc_41><loc_55><loc_234><loc_174>In practice, both network architectures (IETD and IEDD) require an implicit, custom trained object-characterrecognition (OCR) to obtain the content of the table-cells. In the case of IETD, this OCR engine is implicit in the decoder similar to [24]. For the IEDD, the OCR is solely embedded in the content-decoder. This reliance on a custom, implicit OCR decoder is of course problematic. OCR is a well known and extremely tough problem, that often needs custom training for each individual language. However, the limited availability for non-english content in the current datasets, makes it impractical to apply the IETD and IEDD methods on tables with other languages. Additionally, OCR can be completely omitted if the tables originate from programmatic PDF documents with known positions of each cell. The latter was the inspiration for the work of this paper.</text>
|
||||
<text><loc_41><loc_176><loc_234><loc_310>Graph Neural networks : Graph Neural networks (GNN's) take a radically different approach to tablestructure extraction. Note that one table cell can constitute out of multiple text-cells. To obtain the table-structure, one creates an initial graph, where each of the text-cells becomes a node in the graph similar to [33, 34, 2]. Each node is then associated with en embedding vector coming from the encoded image, its coordinates and the encoded text. Furthermore, nodes that represent adjacent text-cells are linked. Graph Convolutional Networks (GCN's) based methods take the image as an input, but also the position of the text-cells and their content [18]. The purpose of a GCN is to transform the input graph into a new graph, which replaces the old links with new ones. The new links then represent the table-structure. With this approach, one can avoid the need to build custom OCR decoders. However, the quality of the reconstructed structure is not comparable to the current state-of-the-art [18].</text>
|
||||
<text><loc_41><loc_312><loc_234><loc_393>Hybrid Deep Learning-Rule-Based approach : A popular current model for table-structure identification is the use of a hybrid Deep Learning-Rule-Based approach similar to [27, 29]. In this approach, one first detects the position of the table-cells with object detection (e.g. YoloVx or MaskRCNN), then classifies the table into different types (from its images) and finally uses different rule-sets to obtain its table-structure. Currently, this approach achieves stateof-the-art results, but is not an end-to-end deep-learning method. As such, new rules need to be written if different types of tables are encountered.</text>
|
||||
<section_header_level_1><loc_41><loc_401><loc_86><loc_408>3. Datasets</section_header_level_1>
|
||||
<text><loc_41><loc_414><loc_234><loc_450>We rely on large-scale datasets such as PubTabNet [37], FinTabNet [36], and TableBank [17] datasets to train and evaluate our models. These datasets span over various appearance styles and content. We also introduce our own synthetically generated SynthTabNet dataset to fix an im-</text>
|
||||
<page_footer><loc_241><loc_463><loc_245><loc_469>3</page_footer>
|
||||
<picture><loc_255><loc_50><loc_450><loc_158><caption><loc_252><loc_169><loc_445><loc_182>Figure 2: Distribution of the tables across different table dimensions in PubTabNet + FinTabNet datasets</caption></picture>
|
||||
<text><loc_252><loc_200><loc_357><loc_206>balance in the previous datasets.</text>
|
||||
<text><loc_252><loc_209><loc_445><loc_396>The PubTabNet dataset contains 509k tables delivered as annotated PNG images. The annotations consist of the table structure represented in HTML format, the tokenized text and its bounding boxes per table cell. Fig. 1 shows the appearance style of PubTabNet. Depending on its complexity, a table is characterized as "simple" when it does not contain row spans or column spans, otherwise it is "complex". The dataset is divided into Train and Val splits (roughly 98% and 2%). The Train split consists of 54% simple and 46% complex tables and the Val split of 51% and 49% respectively. The FinTabNet dataset contains 112k tables delivered as single-page PDF documents with mixed table structures and text content. Similarly to the PubTabNet, the annotations of FinTabNet include the table structure in HTML, the tokenized text and the bounding boxes on a table cell basis. The dataset is divided into Train, Test and Val splits (81%, 9.5%, 9.5%), and each one is almost equally divided into simple and complex tables (Train: 48% simple, 52% complex, Test: 48% simple, 52% complex, Test: 53% simple, 47% complex). Finally the TableBank dataset consists of 145k tables provided as JPEG images. The latter has annotations for the table structure, but only few with bounding boxes of the table cells. The entire dataset consists of simple tables and it is divided into 90% Train, 3% Test and 7% Val splits.</text>
|
||||
<text><loc_252><loc_399><loc_445><loc_450>Due to the heterogeneity across the dataset formats, it was necessary to combine all available data into one homogenized dataset before we could train our models for practical purposes. Given the size of PubTabNet, we adopted its annotation format and we extracted and converted all tables as PNG images with a resolution of 72 dpi. Additionally, we have filtered out tables with extreme sizes due to small</text>
|
||||
<page_break>
|
||||
<text><loc_41><loc_47><loc_234><loc_61>amount of such tables, and kept only those ones ranging between 1*1 and 20*10 (rows/columns).</text>
|
||||
<text><loc_41><loc_64><loc_234><loc_198>The availability of the bounding boxes for all table cells is essential to train our models. In order to distinguish between empty and non-empty bounding boxes, we have introduced a binary class in the annotation. Unfortunately, the original datasets either omit the bounding boxes for whole tables (e.g. TableBank) or they narrow their scope only to non-empty cells. Therefore, it was imperative to introduce a data pre-processing procedure that generates the missing bounding boxes out of the annotation information. This procedure first parses the provided table structure and calculates the dimensions of the most fine-grained grid that covers the table structure. Notice that each table cell may occupy multiple grid squares due to row or column spans. In case of PubTabNet we had to compute missing bounding boxes for 48% of the simple and 69% of the complex tables. Regarding FinTabNet, 68% of the simple and 98% of the complex tables require the generation of bounding boxes.</text>
|
||||
<text><loc_41><loc_201><loc_234><loc_274>As it is illustrated in Fig. 2, the table distributions from all datasets are skewed towards simpler structures with fewer number of rows/columns. Additionally, there is very limited variance in the table styles, which in case of PubTabNet and FinTabNet means one styling format for the majority of the tables. Similar limitations appear also in the type of table content, which in some cases (e.g. FinTabNet) is restricted to a certain domain. Ultimately, the lack of diversity in the training dataset damages the ability of the models to generalize well on unseen data.</text>
|
||||
<text><loc_41><loc_277><loc_234><loc_396>Motivated by those observations we aimed at generating a synthetic table dataset named SynthTabNet . This approach offers control over: 1) the size of the dataset, 2) the table structure, 3) the table style and 4) the type of content. The complexity of the table structure is described by the size of the table header and the table body, as well as the percentage of the table cells covered by row spans and column spans. A set of carefully designed styling templates provides the basis to build a wide range of table appearances. Lastly, the table content is generated out of a curated collection of text corpora. By controlling the size and scope of the synthetic datasets we are able to train and evaluate our models in a variety of different conditions. For example, we can first generate a highly diverse dataset to train our models and then evaluate their performance on other synthetic datasets which are focused on a specific domain.</text>
|
||||
<text><loc_41><loc_399><loc_234><loc_450>In this regard, we have prepared four synthetic datasets, each one containing 150k examples. The corpora to generate the table text consists of the most frequent terms appearing in PubTabNet and FinTabNet together with randomly generated text. The first two synthetic datasets have been fine-tuned to mimic the appearance of the original datasets but encompass more complicated table structures. The third</text>
|
||||
<page_footer><loc_241><loc_463><loc_245><loc_469>4</page_footer>
|
||||
<otsl><loc_254><loc_46><loc_444><loc_98><ecel><ched>Tags<ched>Bbox<ched>Size<ched>Format<nl><rhed>PubTabNet<fcel>3<fcel>3<fcel>509k<fcel>PNG<nl><rhed>FinTabNet<fcel>3<fcel>3<fcel>112k<fcel>PDF<nl><rhed>TableBank<fcel>3<fcel>7<fcel>145k<fcel>JPEG<nl><rhed>Combined-Tabnet(*)<fcel>3<fcel>3<fcel>400k<fcel>PNG<nl><rhed>Combined(**)<fcel>3<fcel>3<fcel>500k<fcel>PNG<nl><rhed>SynthTabNet<fcel>3<fcel>3<fcel>600k<fcel>PNG<nl><caption><loc_252><loc_106><loc_445><loc_142>Table 1: Both "Combined-Tabnet" and "CombinedTabnet" are variations of the following: (*) The CombinedTabnet dataset is the processed combination of PubTabNet and Fintabnet. (**) The combined dataset is the processed combination of PubTabNet, Fintabnet and TableBank.</caption></otsl>
|
||||
<text><loc_252><loc_158><loc_445><loc_186>one adopts a colorful appearance with high contrast and the last one contains tables with sparse content. Lastly, we have combined all synthetic datasets into one big unified synthetic dataset of 600k examples.</text>
|
||||
<text><loc_262><loc_188><loc_443><loc_194>Tab. 1 summarizes the various attributes of the datasets.</text>
|
||||
<section_header_level_1><loc_252><loc_203><loc_364><loc_210>4. The TableFormer model</section_header_level_1>
|
||||
<text><loc_252><loc_216><loc_445><loc_282>Given the image of a table, TableFormer is able to predict: 1) a sequence of tokens that represent the structure of a table, and 2) a bounding box coupled to a subset of those tokens. The conversion of an image into a sequence of tokens is a well-known task [35, 16]. While attention is often used as an implicit method to associate each token of the sequence with a position in the original image, an explicit association between the individual table-cells and the image bounding boxes is also required.</text>
|
||||
<section_header_level_1><loc_252><loc_289><loc_343><loc_295>4.1. Model architecture.</section_header_level_1>
|
||||
<text><loc_252><loc_301><loc_445><loc_420>We now describe in detail the proposed method, which is composed of three main components, see Fig. 4. Our CNN Backbone Network encodes the input as a feature vector of predefined length. The input feature vector of the encoded image is passed to the Structure Decoder to produce a sequence of HTML tags that represent the structure of the table. With each prediction of an HTML standard data cell (' < td > ') the hidden state of that cell is passed to the Cell BBox Decoder. As for spanning cells, such as row or column span, the tag is broken down to ' < ', 'rowspan=' or 'colspan=', with the number of spanning cells (attribute), and ' > '. The hidden state attached to ' < ' is passed to the Cell BBox Decoder. A shared feed forward network (FFN) receives the hidden states from the Structure Decoder, to provide the final detection predictions of the bounding box coordinates and their classification.</text>
|
||||
<text><loc_252><loc_422><loc_445><loc_450>CNN Backbone Network. A ResNet-18 CNN is the backbone that receives the table image and encodes it as a vector of predefined length. The network has been modified by removing the linear and pooling layer, as we are not per-</text>
|
||||
<page_break>
|
||||
<picture><loc_61><loc_49><loc_425><loc_116><caption><loc_41><loc_129><loc_445><loc_142>Figure 3: TableFormer takes in an image of the PDF and creates bounding box and HTML structure predictions that are synchronized. The bounding boxes grabs the content from the PDF and inserts it in the structure.</caption></picture>
|
||||
<picture><loc_43><loc_163><loc_233><loc_320><caption><loc_41><loc_333><loc_234><loc_429>Figure 4: Given an input image of a table, the Encoder produces fixed-length features that represent the input image. The features are then passed to both the Structure Decoder and Cell BBox Decoder . During training, the Structure Decoder receives 'tokenized tags' of the HTML code that represent the table structure. Afterwards, a transformer encoder and decoder architecture is employed to produce features that are received by a linear layer, and the Cell BBox Decoder. The linear layer is applied to the features to predict the tags. Simultaneously, the Cell BBox Decoder selects features referring to the data cells (' < td > ', ' < ') and passes them through an attention network, an MLP, and a linear layer to predict the bounding boxes.</caption></picture>
|
||||
<text><loc_252><loc_158><loc_445><loc_186>forming classification, and adding an adaptive pooling layer of size 28*28. ResNet by default downsamples the image resolution by 32 and then the encoded image is provided to both the Structure Decoder , and Cell BBox Decoder .</text>
|
||||
<text><loc_252><loc_188><loc_445><loc_261>Structure Decoder. The transformer architecture of this component is based on the work proposed in [31]. After extensive experimentation, the Structure Decoder is modeled as a transformer encoder with two encoder layers and a transformer decoder made from a stack of 4 decoder layers that comprise mainly of multi-head attention and feed forward layers. This configuration uses fewer layers and heads in comparison to networks applied to other problems (e.g. "Scene Understanding", "Image Captioning"), something which we relate to the simplicity of table images.</text>
|
||||
<text><loc_252><loc_263><loc_445><loc_344>The transformer encoder receives an encoded image from the CNN Backbone Network and refines it through a multi-head dot-product attention layer, followed by a Feed Forward Network. During training, the transformer decoder receives as input the output feature produced by the transformer encoder, and the tokenized input of the HTML ground-truth tags. Using a stack of multi-head attention layers, different aspects of the tag sequence could be inferred. This is achieved by each attention head on a layer operating in a different subspace, and then combining altogether their attention score.</text>
|
||||
<text><loc_252><loc_346><loc_445><loc_412>Cell BBox Decoder. Our architecture allows to simultaneously predict HTML tags and bounding boxes for each table cell without the need of a separate object detector end to end. This approach is inspired by DETR [1] which employs a Transformer Encoder, and Decoder that looks for a specific number of object queries (potential object detections). As our model utilizes a transformer architecture, the hidden state of the < td > ' and ' < ' HTML structure tags become the object query.</text>
|
||||
<text><loc_252><loc_414><loc_445><loc_450>The encoding generated by the CNN Backbone Network along with the features acquired for every data cell from the Transformer Decoder are then passed to the attention network. The attention network takes both inputs and learns to provide an attention weighted encoding. This weighted at-</text>
|
||||
<page_footer><loc_241><loc_463><loc_245><loc_469>5</page_footer>
|
||||
<page_break>
|
||||
<text><loc_41><loc_47><loc_234><loc_98>tention encoding is then multiplied to the encoded image to produce a feature for each table cell. Notice that this is different than the typical object detection problem where imbalances between the number of detections and the amount of objects may exist. In our case, we know up front that the produced detections always match with the table cells in number and correspondence.</text>
|
||||
<text><loc_41><loc_101><loc_234><loc_152>The output features for each table cell are then fed into the feed-forward network (FFN). The FFN consists of a Multi-Layer Perceptron (3 layers with ReLU activation function) that predicts the normalized coordinates for the bounding box of each table cell. Finally, the predicted bounding boxes are classified based on whether they are empty or not using a linear layer.</text>
|
||||
<text><loc_41><loc_154><loc_234><loc_280>Loss Functions. We formulate a multi-task loss Eq. 2 to train our network. The Cross-Entropy loss (denoted as l$_{s}$ ) is used to train the Structure Decoder which predicts the structure tokens. As for the Cell BBox Decoder it is trained with a combination of losses denoted as l$_{box}$ . l$_{box}$ consists of the generally used l$_{1}$ loss for object detection and the IoU loss ( l$_{iou}$ ) to be scale invariant as explained in [25]. In comparison to DETR, we do not use the Hungarian algorithm [15] to match the predicted bounding boxes with the ground-truth boxes, as we have already achieved a one-toone match through two steps: 1) Our token input sequence is naturally ordered, therefore the hidden states of the table data cells are also in order when they are provided as input to the Cell BBox Decoder , and 2) Our bounding boxes generation mechanism (see Sec. 3) ensures a one-to-one mapping between the cell content and its bounding box for all post-processed datasets.</text>
|
||||
<text><loc_41><loc_283><loc_234><loc_296>The loss used to train the TableFormer can be defined as following:</text>
|
||||
<formula><loc_102><loc_311><loc_234><loc_326></formula>
|
||||
<text><loc_41><loc_335><loc_230><loc_341>where λ ∈ [0, 1], and λ$_{iou}$, λ$_{l}$$_{1}$ ∈$_{R}$ are hyper-parameters.</text>
|
||||
<section_header_level_1><loc_41><loc_351><loc_141><loc_358>5. Experimental Results</section_header_level_1>
|
||||
<section_header_level_1><loc_41><loc_364><loc_146><loc_370>5.1. Implementation Details</section_header_level_1>
|
||||
<text><loc_41><loc_376><loc_234><loc_404>TableFormer uses ResNet-18 as the CNN Backbone Network . The input images are resized to 448*448 pixels and the feature map has a dimension of 28*28. Additionally, we enforce the following input constraints:</text>
|
||||
<formula><loc_75><loc_413><loc_234><loc_428></formula>
|
||||
<text><loc_41><loc_437><loc_234><loc_450>Although input constraints are used also by other methods, such as EDD, ours are less restrictive due to the improved</text>
|
||||
<page_footer><loc_241><loc_463><loc_245><loc_469>6</page_footer>
|
||||
<text><loc_252><loc_47><loc_445><loc_68>runtime performance and lower memory footprint of TableFormer. This allows to utilize input samples with longer sequences and images with larger dimensions.</text>
|
||||
<text><loc_252><loc_73><loc_445><loc_207>The Transformer Encoder consists of two "Transformer Encoder Layers", with an input feature size of 512, feed forward network of 1024, and 4 attention heads. As for the Transformer Decoder it is composed of four "Transformer Decoder Layers" with similar input and output dimensions as the "Transformer Encoder Layers". Even though our model uses fewer layers and heads than the default implementation parameters, our extensive experimentation has proved this setup to be more suitable for table images. We attribute this finding to the inherent design of table images, which contain mostly lines and text, unlike the more elaborate content present in other scopes (e.g. the COCO dataset). Moreover, we have added ResNet blocks to the inputs of the Structure Decoder and Cell BBox Decoder. This prevents a decoder having a stronger influence over the learned weights which would damage the other prediction task (structure vs bounding boxes), but learn task specific weights instead. Lastly our dropout layers are set to 0.5.</text>
|
||||
<text><loc_252><loc_212><loc_445><loc_271>For training, TableFormer is trained with 3 Adam optimizers, each one for the CNN Backbone Network , Structure Decoder , and Cell BBox Decoder . Taking the PubTabNet as an example for our parameter set up, the initializing learning rate is 0.001 for 12 epochs with a batch size of 24, and λ set to 0.5. Afterwards, we reduce the learning rate to 0.0001, the batch size to 18 and train for 12 more epochs or convergence.</text>
|
||||
<text><loc_252><loc_276><loc_445><loc_350>TableFormer is implemented with PyTorch and Torchvision libraries [22]. To speed up the inference, the image undergoes a single forward pass through the CNN Backbone Network and transformer encoder. This eliminates the overhead of generating the same features for each decoding step. Similarly, we employ a 'caching' technique to preform faster autoregressive decoding. This is achieved by storing the features of decoded tokens so we can reuse them for each time step. Therefore, we only compute the attention for each new tag.</text>
|
||||
<section_header_level_1><loc_252><loc_366><loc_325><loc_372>5.2. Generalization</section_header_level_1>
|
||||
<text><loc_252><loc_381><loc_445><loc_424>TableFormer is evaluated on three major publicly available datasets of different nature to prove the generalization and effectiveness of our model. The datasets used for evaluation are the PubTabNet, FinTabNet and TableBank which stem from the scientific, financial and general domains respectively.</text>
|
||||
<text><loc_252><loc_430><loc_445><loc_450>We also share our baseline results on the challenging SynthTabNet dataset. Throughout our experiments, the same parameters stated in Sec. 5.1 are utilized.</text>
|
||||
<page_break>
|
||||
<section_header_level_1><loc_41><loc_47><loc_137><loc_53>5.3. Datasets and Metrics</section_header_level_1>
|
||||
<text><loc_41><loc_59><loc_234><loc_87>The Tree-Edit-Distance-Based Similarity (TEDS) metric was introduced in [37]. It represents the prediction, and ground-truth as a tree structure of HTML tags. This similarity is calculated as:</text>
|
||||
<formula><loc_70><loc_95><loc_234><loc_109></formula>
|
||||
<text><loc_41><loc_114><loc_234><loc_135>where T$_{a}$ and T$_{b}$ represent tables in tree structure HTML format. EditDist denotes the tree-edit distance, and | T | represents the number of nodes in T .</text>
|
||||
<section_header_level_1><loc_41><loc_142><loc_139><loc_148>5.4. Quantitative Analysis</section_header_level_1>
|
||||
<text><loc_41><loc_154><loc_234><loc_250>Structure. As shown in Tab. 2, TableFormer outperforms all SOTA methods across different datasets by a large margin for predicting the table structure from an image. All the more, our model outperforms pre-trained methods. During the evaluation we do not apply any table filtering. We also provide our baseline results on the SynthTabNet dataset. It has been observed that large tables (e.g. tables that occupy half of the page or more) yield poor predictions. We attribute this issue to the image resizing during the preprocessing step, that produces downsampled images with indistinguishable features. This problem can be addressed by treating such big tables with a separate model which accepts a large input image size.</text>
|
||||
<otsl><loc_44><loc_258><loc_231><loc_368><ched>Model<ched>Dataset<ched>Simple<ched>TEDS Complex<ched>All<nl><rhed>EDD<fcel>PTN<fcel>91.1<fcel>88.7<fcel>89.9<nl><rhed>GTE<fcel>PTN<fcel>-<fcel>-<fcel>93.01<nl><rhed>TableFormer<fcel>PTN<fcel>98.5<fcel>95.0<fcel>96.75<nl><rhed>EDD<fcel>FTN<fcel>88.4<fcel>92.08<fcel>90.6<nl><rhed>GTE<fcel>FTN<fcel>-<fcel>-<fcel>87.14<nl><rhed>GTE (FT)<fcel>FTN<fcel>-<fcel>-<fcel>91.02<nl><rhed>TableFormer<fcel>FTN<fcel>97.5<fcel>96.0<fcel>96.8<nl><rhed>EDD<fcel>TB<fcel>86.0<fcel>-<fcel>86.0<nl><rhed>TableFormer<fcel>TB<fcel>89.6<fcel>-<fcel>89.6<nl><rhed>TableFormer<fcel>STN<fcel>96.9<fcel>95.7<fcel>96.7<nl><caption><loc_41><loc_374><loc_234><loc_387>Table 2: Structure results on PubTabNet (PTN), FinTabNet (FTN), TableBank (TB) and SynthTabNet (STN).</caption></otsl>
|
||||
<text><loc_41><loc_389><loc_214><loc_395>FT: Model was trained on PubTabNet then finetuned.</text>
|
||||
<text><loc_41><loc_407><loc_234><loc_450>Cell Detection. Like any object detector, our Cell BBox Detector provides bounding boxes that can be improved with post-processing during inference. We make use of the grid-like structure of tables to refine the predictions. A detailed explanation on the post-processing is available in the supplementary material. As shown in Tab. 3, we evaluate</text>
|
||||
<page_footer><loc_241><loc_463><loc_245><loc_469>7</page_footer>
|
||||
<text><loc_252><loc_47><loc_445><loc_144>our Cell BBox Decoder accuracy for cells with a class label of 'content' only using the PASCAL VOC mAP metric for pre-processing and post-processing. Note that we do not have post-processing results for SynthTabNet as images are only provided. To compare the performance of our proposed approach, we've integrated TableFormer's Cell BBox Decoder into EDD architecture. As mentioned previously, the Structure Decoder provides the Cell BBox Decoder with the features needed to predict the bounding box predictions. Therefore, the accuracy of the Structure Decoder directly influences the accuracy of the Cell BBox Decoder . If the Structure Decoder predicts an extra column, this will result in an extra column of predicted bounding boxes.</text>
|
||||
<otsl><loc_252><loc_156><loc_436><loc_192><ched>Model<ched>Dataset<ched>mAP<ched>mAP (PP)<nl><fcel>EDD+BBox<fcel>PubTabNet<fcel>79.2<fcel>82.7<nl><fcel>TableFormer<fcel>PubTabNet<fcel>82.1<fcel>86.8<nl><fcel>TableFormer<fcel>SynthTabNet<fcel>87.7<fcel>-<nl><caption><loc_252><loc_200><loc_445><loc_213>Table 3: Cell Bounding Box detection results on PubTabNet, and FinTabNet. PP: Post-processing.</caption></otsl>
|
||||
<text><loc_252><loc_232><loc_445><loc_328>Cell Content. In this section, we evaluate the entire pipeline of recovering a table with content. Here we put our approach to test by capitalizing on extracting content from the PDF cells rather than decoding from images. Tab. 4 shows the TEDs score of HTML code representing the structure of the table along with the content inserted in the data cell and compared with the ground-truth. Our method achieved a 5.3% increase over the state-of-the-art, and commercial solutions. We believe our scores would be higher if the HTML ground-truth matched the extracted PDF cell content. Unfortunately, there are small discrepancies such as spacings around words or special characters with various unicode representations.</text>
|
||||
<otsl><loc_272><loc_341><loc_426><loc_406><fcel>Model<ched>Simple<ched>TEDS Complex<ched>All<nl><rhed>Tabula<fcel>78.0<fcel>57.8<fcel>67.9<nl><rhed>Traprange<fcel>60.8<fcel>49.9<fcel>55.4<nl><rhed>Camelot<fcel>80.0<fcel>66.0<fcel>73.0<nl><rhed>Acrobat Pro<fcel>68.9<fcel>61.8<fcel>65.3<nl><rhed>EDD<fcel>91.2<fcel>85.4<fcel>88.3<nl><rhed>TableFormer<fcel>95.4<fcel>90.1<fcel>93.6<nl><caption><loc_252><loc_415><loc_445><loc_435>Table 4: Results of structure with content retrieved using cell detection on PubTabNet. In all cases the input is PDF documents with cropped tables.</caption></otsl>
|
||||
<unordered_list><page_break>
|
||||
<list_item><loc_44><loc_50><loc_50><loc_55>a.</list_item>
|
||||
<list_item><loc_54><loc_50><loc_408><loc_55>Red - PDF cells, Green - predicted bounding boxes, Blue - post-processed predictions matched to PDF cells</list_item>
|
||||
</unordered_list>
|
||||
<section_header_level_1><location><page_8><loc_9><loc_87><loc_46><loc_88></location>Japanese language (previously unseen by TableFormer):</section_header_level_1>
|
||||
<section_header_level_1><location><page_8><loc_50><loc_87><loc_70><loc_88></location>Example table from FinTabNet:</section_header_level_1>
|
||||
<figure>
|
||||
<location><page_8><loc_8><loc_76><loc_49><loc_87></location>
|
||||
</figure>
|
||||
<figure>
|
||||
<location><page_8><loc_50><loc_77><loc_91><loc_88></location>
|
||||
<caption>b. Structure predicted by TableFormer, with superimposed matched PDF cell text:</caption>
|
||||
</figure>
|
||||
<table>
|
||||
<location><page_8><loc_9><loc_63><loc_49><loc_72></location>
|
||||
<row_0><col_0><body></col_0><col_1><body></col_1><col_2><col_header>論文ファイル</col_2><col_3><col_header>論文ファイル</col_3><col_4><col_header>参考文献</col_4><col_5><col_header>参考文献</col_5></row_0>
|
||||
<row_1><col_0><col_header>出典</col_0><col_1><col_header>ファイル 数</col_1><col_2><col_header>英語</col_2><col_3><col_header>日本語</col_3><col_4><col_header>英語</col_4><col_5><col_header>日本語</col_5></row_1>
|
||||
<row_2><col_0><row_header>Association for Computational Linguistics(ACL2003)</col_0><col_1><body>65</col_1><col_2><body>65</col_2><col_3><body>0</col_3><col_4><body>150</col_4><col_5><body>0</col_5></row_2>
|
||||
<row_3><col_0><row_header>Computational Linguistics(COLING2002)</col_0><col_1><body>140</col_1><col_2><body>140</col_2><col_3><body>0</col_3><col_4><body>150</col_4><col_5><body>0</col_5></row_3>
|
||||
<row_4><col_0><row_header>電気情報通信学会 2003 年総合大会</col_0><col_1><body>150</col_1><col_2><body>8</col_2><col_3><body>142</col_3><col_4><body>223</col_4><col_5><body>147</col_5></row_4>
|
||||
<row_5><col_0><row_header>情報処理学会第 65 回全国大会 (2003)</col_0><col_1><body>177</col_1><col_2><body>1</col_2><col_3><body>176</col_3><col_4><body>150</col_4><col_5><body>236</col_5></row_5>
|
||||
<row_6><col_0><row_header>第 17 回人工知能学会全国大会 (2003)</col_0><col_1><body>208</col_1><col_2><body>5</col_2><col_3><body>203</col_3><col_4><body>152</col_4><col_5><body>244</col_5></row_6>
|
||||
<row_7><col_0><row_header>自然言語処理研究会第 146 〜 155 回</col_0><col_1><body>98</col_1><col_2><body>2</col_2><col_3><body>96</col_3><col_4><body>150</col_4><col_5><body>232</col_5></row_7>
|
||||
<row_8><col_0><row_header>WWW から収集した論文</col_0><col_1><body>107</col_1><col_2><body>73</col_2><col_3><body>34</col_3><col_4><body>147</col_4><col_5><body>96</col_5></row_8>
|
||||
<row_9><col_0><body></col_0><col_1><body>945</col_1><col_2><body>294</col_2><col_3><body>651</col_3><col_4><body>1122</col_4><col_5><body>955</col_5></row_9>
|
||||
</table>
|
||||
<table>
|
||||
<location><page_8><loc_50><loc_64><loc_90><loc_72></location>
|
||||
<caption>Text is aligned to match original for ease of viewing</caption>
|
||||
<row_0><col_0><body></col_0><col_1><col_header>Shares (in millions)</col_1><col_2><col_header>Shares (in millions)</col_2><col_3><col_header>Weighted Average Grant Date Fair Value</col_3><col_4><col_header>Weighted Average Grant Date Fair Value</col_4></row_0>
|
||||
<row_1><col_0><body></col_0><col_1><col_header>RS U s</col_1><col_2><col_header>PSUs</col_2><col_3><col_header>RSUs</col_3><col_4><col_header>PSUs</col_4></row_1>
|
||||
<row_2><col_0><row_header>Nonvested on Janua ry 1</col_0><col_1><body>1. 1</col_1><col_2><body>0.3</col_2><col_3><body>90.10 $</col_3><col_4><body>$ 91.19</col_4></row_2>
|
||||
<row_3><col_0><row_header>Granted</col_0><col_1><body>0. 5</col_1><col_2><body>0.1</col_2><col_3><body>117.44</col_3><col_4><body>122.41</col_4></row_3>
|
||||
<row_4><col_0><row_header>Vested</col_0><col_1><body>(0. 5 )</col_1><col_2><body>(0.1)</col_2><col_3><body>87.08</col_3><col_4><body>81.14</col_4></row_4>
|
||||
<row_5><col_0><row_header>Canceled or forfeited</col_0><col_1><body>(0. 1 )</col_1><col_2><body>-</col_2><col_3><body>102.01</col_3><col_4><body>92.18</col_4></row_5>
|
||||
<row_6><col_0><row_header>Nonvested on December 31</col_0><col_1><body>1.0</col_1><col_2><body>0.3</col_2><col_3><body>104.85 $</col_3><col_4><body>$ 104.51</col_4></row_6>
|
||||
</table>
|
||||
<figure>
|
||||
<location><page_8><loc_8><loc_44><loc_35><loc_52></location>
|
||||
<caption>Figure 5: One of the benefits of TableFormer is that it is language agnostic, as an example, the left part of the illustration demonstrates TableFormer predictions on previously unseen language (Japanese). Additionally, we see that TableFormer is robust to variability in style and content, right side of the illustration shows the example of the TableFormer prediction from the FinTabNet dataset.</caption>
|
||||
</figure>
|
||||
<figure>
|
||||
<location><page_8><loc_63><loc_44><loc_89><loc_52></location>
|
||||
</figure>
|
||||
<figure>
|
||||
<location><page_8><loc_35><loc_44><loc_61><loc_52></location>
|
||||
<caption>Figure 6: An example of TableFormer predictions (bounding boxes and structure) from generated SynthTabNet table.</caption>
|
||||
</figure>
|
||||
<section_header_level_1><location><page_8><loc_8><loc_37><loc_27><loc_38></location>5.5. Qualitative Analysis</section_header_level_1>
|
||||
<text><location><page_8><loc_8><loc_10><loc_47><loc_32></location>We showcase several visualizations for the different components of our network on various "complex" tables within datasets presented in this work in Fig. 5 and Fig. 6 As it is shown, our model is able to predict bounding boxes for all table cells, even for the empty ones. Additionally, our post-processing techniques can extract the cell content by matching the predicted bounding boxes to the PDF cells based on their overlap and spatial proximity. The left part of Fig. 5 demonstrates also the adaptability of our method to any language, as it can successfully extract Japanese text, although the training set contains only English content. We provide more visualizations including the intermediate steps in the supplementary material. Overall these illustrations justify the versatility of our method across a diverse range of table appearances and content type.</text>
|
||||
<section_header_level_1><location><page_8><loc_50><loc_37><loc_75><loc_38></location>6. Future Work & Conclusion</section_header_level_1>
|
||||
<text><location><page_8><loc_50><loc_18><loc_89><loc_35></location>In this paper, we presented TableFormer an end-to-end transformer based approach to predict table structures and bounding boxes of cells from an image. This approach enables us to recreate the table structure, and extract the cell content from PDF or OCR by using bounding boxes. Additionally, it provides the versatility required in real-world scenarios when dealing with various types of PDF documents, and languages. Furthermore, our method outperforms all state-of-the-arts with a wide margin. Finally, we introduce "SynthTabNet" a challenging synthetically generated dataset that reinforces missing characteristics from other datasets.</text>
|
||||
<section_header_level_1><location><page_8><loc_50><loc_14><loc_60><loc_15></location>References</section_header_level_1>
|
||||
<unordered_list>
|
||||
<list_item><location><page_8><loc_51><loc_10><loc_89><loc_12></location>[1] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-</list_item>
|
||||
<section_header_level_1><loc_44><loc_60><loc_232><loc_64>Japanese language (previously unseen by TableFormer):</section_header_level_1>
|
||||
<section_header_level_1><loc_249><loc_60><loc_352><loc_64>Example table from FinTabNet:</section_header_level_1>
|
||||
<picture><loc_41><loc_65><loc_246><loc_118></picture>
|
||||
<picture><loc_250><loc_62><loc_453><loc_114><caption><loc_44><loc_131><loc_315><loc_136>b. Structure predicted by TableFormer, with superimposed matched PDF cell text:</caption></picture>
|
||||
<otsl><loc_44><loc_138><loc_244><loc_185><ecel><ecel><ched>論文ファイル<lcel><ched>参考文献<lcel><nl><ched>出典<ched>ファイル 数<ched>英語<ched>日本語<ched>英語<ched>日本語<nl><rhed>Association for Computational Linguistics(ACL2003)<fcel>65<fcel>65<fcel>0<fcel>150<fcel>0<nl><rhed>Computational Linguistics(COLING2002)<fcel>140<fcel>140<fcel>0<fcel>150<fcel>0<nl><rhed>電気情報通信学会 2003 年総合大会<fcel>150<fcel>8<fcel>142<fcel>223<fcel>147<nl><rhed>情報処理学会第 65 回全国大会 (2003)<fcel>177<fcel>1<fcel>176<fcel>150<fcel>236<nl><rhed>第 17 回人工知能学会全国大会 (2003)<fcel>208<fcel>5<fcel>203<fcel>152<fcel>244<nl><rhed>自然言語処理研究会第 146 〜 155 回<fcel>98<fcel>2<fcel>96<fcel>150<fcel>232<nl><rhed>WWW から収集した論文<fcel>107<fcel>73<fcel>34<fcel>147<fcel>96<nl><ecel><fcel>945<fcel>294<fcel>651<fcel>1122<fcel>955<nl></otsl>
|
||||
<otsl><loc_249><loc_138><loc_450><loc_182><ecel><ched>Shares (in millions)<lcel><ched>Weighted Average Grant Date Fair Value<lcel><nl><ecel><ched>RS U s<ched>PSUs<ched>RSUs<ched>PSUs<nl><rhed>Nonvested on Janua ry 1<fcel>1. 1<fcel>0.3<fcel>90.10 $<fcel>$ 91.19<nl><rhed>Granted<fcel>0. 5<fcel>0.1<fcel>117.44<fcel>122.41<nl><rhed>Vested<fcel>(0. 5 )<fcel>(0.1)<fcel>87.08<fcel>81.14<nl><rhed>Canceled or forfeited<fcel>(0. 1 )<fcel>-<fcel>102.01<fcel>92.18<nl><rhed>Nonvested on December 31<fcel>1.0<fcel>0.3<fcel>104.85 $<fcel>$ 104.51<nl><caption><loc_311><loc_185><loc_449><loc_189>Text is aligned to match original for ease of viewing</caption></otsl>
|
||||
<picture><loc_42><loc_240><loc_173><loc_280><caption><loc_41><loc_203><loc_445><loc_231>Figure 5: One of the benefits of TableFormer is that it is language agnostic, as an example, the left part of the illustration demonstrates TableFormer predictions on previously unseen language (Japanese). Additionally, we see that TableFormer is robust to variability in style and content, right side of the illustration shows the example of the TableFormer prediction from the FinTabNet dataset.</caption></picture>
|
||||
<picture><loc_313><loc_241><loc_443><loc_280></picture>
|
||||
<picture><loc_177><loc_240><loc_307><loc_280><caption><loc_51><loc_290><loc_435><loc_295>Figure 6: An example of TableFormer predictions (bounding boxes and structure) from generated SynthTabNet table.</caption></picture>
|
||||
<section_header_level_1><loc_41><loc_310><loc_134><loc_316>5.5. Qualitative Analysis</section_header_level_1>
|
||||
<text><loc_41><loc_339><loc_234><loc_450>We showcase several visualizations for the different components of our network on various "complex" tables within datasets presented in this work in Fig. 5 and Fig. 6 As it is shown, our model is able to predict bounding boxes for all table cells, even for the empty ones. Additionally, our post-processing techniques can extract the cell content by matching the predicted bounding boxes to the PDF cells based on their overlap and spatial proximity. The left part of Fig. 5 demonstrates also the adaptability of our method to any language, as it can successfully extract Japanese text, although the training set contains only English content. We provide more visualizations including the intermediate steps in the supplementary material. Overall these illustrations justify the versatility of our method across a diverse range of table appearances and content type.</text>
|
||||
<section_header_level_1><loc_252><loc_310><loc_377><loc_317>6. Future Work & Conclusion</section_header_level_1>
|
||||
<text><loc_252><loc_324><loc_445><loc_412>In this paper, we presented TableFormer an end-to-end transformer based approach to predict table structures and bounding boxes of cells from an image. This approach enables us to recreate the table structure, and extract the cell content from PDF or OCR by using bounding boxes. Additionally, it provides the versatility required in real-world scenarios when dealing with various types of PDF documents, and languages. Furthermore, our method outperforms all state-of-the-arts with a wide margin. Finally, we introduce "SynthTabNet" a challenging synthetically generated dataset that reinforces missing characteristics from other datasets.</text>
|
||||
<section_header_level_1><loc_252><loc_424><loc_298><loc_431>References</section_header_level_1>
|
||||
<unordered_list><list_item><loc_256><loc_438><loc_445><loc_450>[1] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-</list_item>
|
||||
</unordered_list>
|
||||
<unordered_list>
|
||||
<list_item><location><page_9><loc_11><loc_85><loc_47><loc_90></location>end object detection with transformers. In Andrea Vedaldi, Horst Bischof, Thomas Brox, and Jan-Michael Frahm, editors, Computer Vision - ECCV 2020 , pages 213-229, Cham, 2020. Springer International Publishing. 5</list_item>
|
||||
<list_item><location><page_9><loc_9><loc_81><loc_47><loc_85></location>[2] Zewen Chi, Heyan Huang, Heng-Da Xu, Houjin Yu, Wanxuan Yin, and Xian-Ling Mao. Complicated table structure recognition. arXiv preprint arXiv:1908.04729 , 2019. 3</list_item>
|
||||
<list_item><location><page_9><loc_9><loc_77><loc_47><loc_81></location>[3] Bertrand Couasnon and Aurelie Lemaitre. Recognition of Tables and Forms , pages 647-677. Springer London, London, 2014. 2</list_item>
|
||||
<list_item><location><page_9><loc_9><loc_71><loc_47><loc_76></location>[4] Herv'e D'ejean, Jean-Luc Meunier, Liangcai Gao, Yilun Huang, Yu Fang, Florian Kleber, and Eva-Maria Lang. ICDAR 2019 Competition on Table Detection and Recognition (cTDaR), Apr. 2019. http://sac.founderit.com/. 2</list_item>
|
||||
<list_item><location><page_9><loc_9><loc_66><loc_47><loc_71></location>[5] Basilios Gatos, Dimitrios Danatsas, Ioannis Pratikakis, and Stavros J Perantonis. Automatic table detection in document images. In International Conference on Pattern Recognition and Image Analysis , pages 609-618. Springer, 2005. 2</list_item>
|
||||
<list_item><location><page_9><loc_9><loc_60><loc_47><loc_65></location>[6] Max Gobel, Tamir Hassan, Ermelinda Oro, and Giorgio Orsi. Icdar 2013 table competition. In 2013 12th International Conference on Document Analysis and Recognition , pages 1449-1453, 2013. 2</list_item>
|
||||
<list_item><location><page_9><loc_9><loc_56><loc_47><loc_60></location>[7] EA Green and M Krishnamoorthy. Recognition of tables using table grammars. procs. In Symposium on Document Analysis and Recognition (SDAIR'95) , pages 261-277. 2</list_item>
|
||||
<list_item><location><page_9><loc_9><loc_49><loc_47><loc_56></location>[8] Khurram Azeem Hashmi, Alain Pagani, Marcus Liwicki, Didier Stricker, and Muhammad Zeshan Afzal. Castabdetectors: Cascade network for table detection in document images with recursive feature pyramid and switchable atrous convolution. Journal of Imaging , 7(10), 2021. 1</list_item>
|
||||
<list_item><location><page_9><loc_9><loc_45><loc_47><loc_49></location>[9] Kaiming He, Georgia Gkioxari, Piotr Dollar, and Ross Girshick. Mask r-cnn. In Proceedings of the IEEE International Conference on Computer Vision (ICCV) , Oct 2017. 1</list_item>
|
||||
<list_item><location><page_9><loc_8><loc_39><loc_47><loc_44></location>[10] Yelin He, X. Qi, Jiaquan Ye, Peng Gao, Yihao Chen, Bingcong Li, Xin Tang, and Rong Xiao. Pingan-vcgroup's solution for icdar 2021 competition on scientific table image recognition to latex. ArXiv , abs/2105.01846, 2021. 2</list_item>
|
||||
<list_item><location><page_9><loc_8><loc_32><loc_47><loc_39></location>[11] Jianying Hu, Ramanujan S Kashi, Daniel P Lopresti, and Gordon Wilfong. Medium-independent table detection. In Document Recognition and Retrieval VII , volume 3967, pages 291-302. International Society for Optics and Photonics, 1999. 2</list_item>
|
||||
<list_item><location><page_9><loc_8><loc_25><loc_47><loc_32></location>[12] Matthew Hurst. A constraint-based approach to table structure derivation. In Proceedings of the Seventh International Conference on Document Analysis and Recognition - Volume 2 , ICDAR '03, page 911, USA, 2003. IEEE Computer Society. 2</list_item>
|
||||
<list_item><location><page_9><loc_8><loc_18><loc_47><loc_25></location>[13] Thotreingam Kasar, Philippine Barlas, Sebastien Adam, Cl'ement Chatelain, and Thierry Paquet. Learning to detect tables in scanned document images using line information. In 2013 12th International Conference on Document Analysis and Recognition , pages 1185-1189. IEEE, 2013. 2</list_item>
|
||||
<list_item><location><page_9><loc_8><loc_14><loc_47><loc_18></location>[14] Pratik Kayal, Mrinal Anand, Harsh Desai, and Mayank Singh. Icdar 2021 competition on scientific table image recognition to latex, 2021. 2</list_item>
|
||||
<list_item><location><page_9><loc_8><loc_10><loc_47><loc_14></location>[15] Harold W Kuhn. The hungarian method for the assignment problem. Naval research logistics quarterly , 2(1-2):83-97, 1955. 6</list_item>
|
||||
<page_footer><loc_241><loc_463><loc_245><loc_469>8</page_footer>
|
||||
<unordered_list><page_break>
|
||||
<list_item><loc_57><loc_48><loc_234><loc_74>end object detection with transformers. In Andrea Vedaldi, Horst Bischof, Thomas Brox, and Jan-Michael Frahm, editors, Computer Vision - ECCV 2020 , pages 213-229, Cham, 2020. Springer International Publishing. 5</list_item>
|
||||
<list_item><loc_45><loc_76><loc_234><loc_95>[2] Zewen Chi, Heyan Huang, Heng-Da Xu, Houjin Yu, Wanxuan Yin, and Xian-Ling Mao. Complicated table structure recognition. arXiv preprint arXiv:1908.04729 , 2019. 3</list_item>
|
||||
<list_item><loc_45><loc_97><loc_234><loc_116>[3] Bertrand Couasnon and Aurelie Lemaitre. Recognition of Tables and Forms , pages 647-677. Springer London, London, 2014. 2</list_item>
|
||||
<list_item><loc_45><loc_118><loc_234><loc_143>[4] Herv'e D'ejean, Jean-Luc Meunier, Liangcai Gao, Yilun Huang, Yu Fang, Florian Kleber, and Eva-Maria Lang. ICDAR 2019 Competition on Table Detection and Recognition (cTDaR), Apr. 2019. http://sac.founderit.com/. 2</list_item>
|
||||
<list_item><loc_45><loc_146><loc_234><loc_171>[5] Basilios Gatos, Dimitrios Danatsas, Ioannis Pratikakis, and Stavros J Perantonis. Automatic table detection in document images. In International Conference on Pattern Recognition and Image Analysis , pages 609-618. Springer, 2005. 2</list_item>
|
||||
<list_item><loc_45><loc_173><loc_234><loc_199>[6] Max Gobel, Tamir Hassan, Ermelinda Oro, and Giorgio Orsi. Icdar 2013 table competition. In 2013 12th International Conference on Document Analysis and Recognition , pages 1449-1453, 2013. 2</list_item>
|
||||
<list_item><loc_45><loc_201><loc_234><loc_220>[7] EA Green and M Krishnamoorthy. Recognition of tables using table grammars. procs. In Symposium on Document Analysis and Recognition (SDAIR'95) , pages 261-277. 2</list_item>
|
||||
<list_item><loc_45><loc_222><loc_234><loc_255>[8] Khurram Azeem Hashmi, Alain Pagani, Marcus Liwicki, Didier Stricker, and Muhammad Zeshan Afzal. Castabdetectors: Cascade network for table detection in document images with recursive feature pyramid and switchable atrous convolution. Journal of Imaging , 7(10), 2021. 1</list_item>
|
||||
<list_item><loc_45><loc_257><loc_234><loc_276>[9] Kaiming He, Georgia Gkioxari, Piotr Dollar, and Ross Girshick. Mask r-cnn. In Proceedings of the IEEE International Conference on Computer Vision (ICCV) , Oct 2017. 1</list_item>
|
||||
<list_item><loc_41><loc_278><loc_234><loc_304>[10] Yelin He, X. Qi, Jiaquan Ye, Peng Gao, Yihao Chen, Bingcong Li, Xin Tang, and Rong Xiao. Pingan-vcgroup's solution for icdar 2021 competition on scientific table image recognition to latex. ArXiv , abs/2105.01846, 2021. 2</list_item>
|
||||
<list_item><loc_41><loc_306><loc_234><loc_339>[11] Jianying Hu, Ramanujan S Kashi, Daniel P Lopresti, and Gordon Wilfong. Medium-independent table detection. In Document Recognition and Retrieval VII , volume 3967, pages 291-302. International Society for Optics and Photonics, 1999. 2</list_item>
|
||||
<list_item><loc_41><loc_341><loc_234><loc_373>[12] Matthew Hurst. A constraint-based approach to table structure derivation. In Proceedings of the Seventh International Conference on Document Analysis and Recognition - Volume 2 , ICDAR '03, page 911, USA, 2003. IEEE Computer Society. 2</list_item>
|
||||
<list_item><loc_41><loc_375><loc_234><loc_408>[13] Thotreingam Kasar, Philippine Barlas, Sebastien Adam, Cl'ement Chatelain, and Thierry Paquet. Learning to detect tables in scanned document images using line information. In 2013 12th International Conference on Document Analysis and Recognition , pages 1185-1189. IEEE, 2013. 2</list_item>
|
||||
<list_item><loc_41><loc_410><loc_234><loc_429>[14] Pratik Kayal, Mrinal Anand, Harsh Desai, and Mayank Singh. Icdar 2021 competition on scientific table image recognition to latex, 2021. 2</list_item>
|
||||
<list_item><loc_41><loc_431><loc_234><loc_450>[15] Harold W Kuhn. The hungarian method for the assignment problem. Naval research logistics quarterly , 2(1-2):83-97, 1955. 6</list_item>
|
||||
</unordered_list>
|
||||
<unordered_list>
|
||||
<list_item><location><page_9><loc_50><loc_82><loc_89><loc_90></location>[16] Girish Kulkarni, Visruth Premraj, Vicente Ordonez, Sagnik Dhar, Siming Li, Yejin Choi, Alexander C. Berg, and Tamara L. Berg. Babytalk: Understanding and generating simple image descriptions. IEEE Transactions on Pattern Analysis and Machine Intelligence , 35(12):2891-2903, 2013. 4</list_item>
|
||||
<list_item><location><page_9><loc_50><loc_78><loc_89><loc_82></location>[17] Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou, and Zhoujun Li. Tablebank: A benchmark dataset for table detection and recognition, 2019. 2, 3</list_item>
|
||||
<list_item><location><page_9><loc_50><loc_67><loc_89><loc_78></location>[18] Yiren Li, Zheng Huang, Junchi Yan, Yi Zhou, Fan Ye, and Xianhui Liu. Gfte: Graph-based financial table extraction. In Alberto Del Bimbo, Rita Cucchiara, Stan Sclaroff, Giovanni Maria Farinella, Tao Mei, Marco Bertini, Hugo Jair Escalante, and Roberto Vezzani, editors, Pattern Recognition. ICPR International Workshops and Challenges , pages 644-658, Cham, 2021. Springer International Publishing. 2, 3</list_item>
|
||||
<list_item><location><page_9><loc_50><loc_59><loc_89><loc_67></location>[19] Nikolaos Livathinos, Cesar Berrospi, Maksym Lysak, Viktor Kuropiatnyk, Ahmed Nassar, Andre Carvalho, Michele Dolfi, Christoph Auer, Kasper Dinkla, and Peter Staar. Robust pdf document conversion using recurrent neural networks. Proceedings of the AAAI Conference on Artificial Intelligence , 35(17):15137-15145, May 2021. 1</list_item>
|
||||
<list_item><location><page_9><loc_50><loc_53><loc_89><loc_58></location>[20] Rujiao Long, Wen Wang, Nan Xue, Feiyu Gao, Zhibo Yang, Yongpan Wang, and Gui-Song Xia. Parsing table structures in the wild. In Proceedings of the IEEE/CVF International Conference on Computer Vision , pages 944-952, 2021. 2</list_item>
|
||||
<list_item><location><page_9><loc_50><loc_45><loc_89><loc_53></location>[21] Shubham Singh Paliwal, D Vishwanath, Rohit Rahul, Monika Sharma, and Lovekesh Vig. Tablenet: Deep learning model for end-to-end table detection and tabular data extraction from scanned document images. In 2019 International Conference on Document Analysis and Recognition (ICDAR) , pages 128-133. IEEE, 2019. 1</list_item>
|
||||
<list_item><location><page_9><loc_50><loc_30><loc_89><loc_44></location>[22] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alch'e-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems 32 , pages 8024-8035. Curran Associates, Inc., 2019. 6</list_item>
|
||||
<list_item><location><page_9><loc_50><loc_21><loc_89><loc_29></location>[23] Devashish Prasad, Ayan Gadpal, Kshitij Kapadni, Manish Visave, and Kavita Sultanpure. Cascadetabnet: An approach for end to end table detection and structure recognition from image-based documents. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops , pages 572-573, 2020. 1</list_item>
|
||||
<list_item><location><page_9><loc_50><loc_16><loc_89><loc_21></location>[24] Shah Rukh Qasim, Hassan Mahmood, and Faisal Shafait. Rethinking table recognition using graph neural networks. In 2019 International Conference on Document Analysis and Recognition (ICDAR) , pages 142-147. IEEE, 2019. 3</list_item>
|
||||
<list_item><location><page_9><loc_50><loc_10><loc_89><loc_15></location>[25] Hamid Rezatofighi, Nathan Tsoi, JunYoung Gwak, Amir Sadeghian, Ian Reid, and Silvio Savarese. Generalized intersection over union: A metric and a loss for bounding box regression. In Proceedings of the IEEE/CVF Conference on</list_item>
|
||||
<page_footer><loc_241><loc_463><loc_245><loc_469>9</page_footer>
|
||||
<unordered_list><list_item><loc_252><loc_48><loc_445><loc_88>[16] Girish Kulkarni, Visruth Premraj, Vicente Ordonez, Sagnik Dhar, Siming Li, Yejin Choi, Alexander C. Berg, and Tamara L. Berg. Babytalk: Understanding and generating simple image descriptions. IEEE Transactions on Pattern Analysis and Machine Intelligence , 35(12):2891-2903, 2013. 4</list_item>
|
||||
<list_item><loc_252><loc_90><loc_445><loc_109>[17] Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou, and Zhoujun Li. Tablebank: A benchmark dataset for table detection and recognition, 2019. 2, 3</list_item>
|
||||
<list_item><loc_252><loc_111><loc_445><loc_164>[18] Yiren Li, Zheng Huang, Junchi Yan, Yi Zhou, Fan Ye, and Xianhui Liu. Gfte: Graph-based financial table extraction. In Alberto Del Bimbo, Rita Cucchiara, Stan Sclaroff, Giovanni Maria Farinella, Tao Mei, Marco Bertini, Hugo Jair Escalante, and Roberto Vezzani, editors, Pattern Recognition. ICPR International Workshops and Challenges , pages 644-658, Cham, 2021. Springer International Publishing. 2, 3</list_item>
|
||||
<list_item><loc_252><loc_166><loc_445><loc_206>[19] Nikolaos Livathinos, Cesar Berrospi, Maksym Lysak, Viktor Kuropiatnyk, Ahmed Nassar, Andre Carvalho, Michele Dolfi, Christoph Auer, Kasper Dinkla, and Peter Staar. Robust pdf document conversion using recurrent neural networks. Proceedings of the AAAI Conference on Artificial Intelligence , 35(17):15137-15145, May 2021. 1</list_item>
|
||||
<list_item><loc_252><loc_208><loc_445><loc_234>[20] Rujiao Long, Wen Wang, Nan Xue, Feiyu Gao, Zhibo Yang, Yongpan Wang, and Gui-Song Xia. Parsing table structures in the wild. In Proceedings of the IEEE/CVF International Conference on Computer Vision , pages 944-952, 2021. 2</list_item>
|
||||
<list_item><loc_252><loc_236><loc_445><loc_276>[21] Shubham Singh Paliwal, D Vishwanath, Rohit Rahul, Monika Sharma, and Lovekesh Vig. Tablenet: Deep learning model for end-to-end table detection and tabular data extraction from scanned document images. In 2019 International Conference on Document Analysis and Recognition (ICDAR) , pages 128-133. IEEE, 2019. 1</list_item>
|
||||
<list_item><loc_252><loc_278><loc_445><loc_352>[22] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alch'e-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems 32 , pages 8024-8035. Curran Associates, Inc., 2019. 6</list_item>
|
||||
<list_item><loc_252><loc_354><loc_445><loc_394>[23] Devashish Prasad, Ayan Gadpal, Kshitij Kapadni, Manish Visave, and Kavita Sultanpure. Cascadetabnet: An approach for end to end table detection and structure recognition from image-based documents. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops , pages 572-573, 2020. 1</list_item>
|
||||
<list_item><loc_252><loc_396><loc_445><loc_422>[24] Shah Rukh Qasim, Hassan Mahmood, and Faisal Shafait. Rethinking table recognition using graph neural networks. In 2019 International Conference on Document Analysis and Recognition (ICDAR) , pages 142-147. IEEE, 2019. 3</list_item>
|
||||
<list_item><loc_252><loc_424><loc_445><loc_450>[25] Hamid Rezatofighi, Nathan Tsoi, JunYoung Gwak, Amir Sadeghian, Ian Reid, and Silvio Savarese. Generalized intersection over union: A metric and a loss for bounding box regression. In Proceedings of the IEEE/CVF Conference on</list_item>
|
||||
</unordered_list>
|
||||
<text><location><page_10><loc_11><loc_88><loc_47><loc_90></location>Computer Vision and Pattern Recognition , pages 658-666, 2019. 6</text>
|
||||
<unordered_list>
|
||||
<list_item><location><page_10><loc_8><loc_80><loc_47><loc_88></location>[26] Sebastian Schreiber, Stefan Agne, Ivo Wolf, Andreas Dengel, and Sheraz Ahmed. Deepdesrt: Deep learning for detection and structure recognition of tables in document images. In 2017 14th IAPR International Conference on Document Analysis and Recognition (ICDAR) , volume 01, pages 11621167, 2017. 1</list_item>
|
||||
<list_item><location><page_10><loc_8><loc_71><loc_47><loc_79></location>[27] Sebastian Schreiber, Stefan Agne, Ivo Wolf, Andreas Dengel, and Sheraz Ahmed. Deepdesrt: Deep learning for detection and structure recognition of tables in document images. In 2017 14th IAPR international conference on document analysis and recognition (ICDAR) , volume 1, pages 1162-1167. IEEE, 2017. 3</list_item>
|
||||
<list_item><location><page_10><loc_8><loc_66><loc_47><loc_71></location>[28] Faisal Shafait and Ray Smith. Table detection in heterogeneous documents. In Proceedings of the 9th IAPR International Workshop on Document Analysis Systems , pages 6572, 2010. 2</list_item>
|
||||
<list_item><location><page_10><loc_8><loc_59><loc_47><loc_65></location>[29] Shoaib Ahmed Siddiqui, Imran Ali Fateh, Syed Tahseen Raza Rizvi, Andreas Dengel, and Sheraz Ahmed. Deeptabstr: Deep learning based table structure recognition. In 2019 International Conference on Document Analysis and Recognition (ICDAR) , pages 1403-1409. IEEE, 2019. 3</list_item>
|
||||
<list_item><location><page_10><loc_8><loc_52><loc_47><loc_58></location>[30] Peter W J Staar, Michele Dolfi, Christoph Auer, and Costas Bekas. Corpus conversion service: A machine learning platform to ingest documents at scale. In Proceedings of the 24th ACM SIGKDD , KDD '18, pages 774-782, New York, NY, USA, 2018. ACM. 1</list_item>
|
||||
<list_item><location><page_10><loc_8><loc_42><loc_47><loc_51></location>[31] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Ł ukasz Kaiser, and Illia Polosukhin. Attention is all you need. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett, editors, Advances in Neural Information Processing Systems 30 , pages 5998-6008. Curran Associates, Inc., 2017. 5</list_item>
|
||||
<list_item><location><page_10><loc_8><loc_37><loc_47><loc_42></location>[32] Oriol Vinyals, Alexander Toshev, Samy Bengio, and Dumitru Erhan. Show and tell: A neural image caption generator. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) , June 2015. 2</list_item>
|
||||
<list_item><location><page_10><loc_8><loc_31><loc_47><loc_36></location>[33] Wenyuan Xue, Qingyong Li, and Dacheng Tao. Res2tim: reconstruct syntactic structures from table images. In 2019 International Conference on Document Analysis and Recognition (ICDAR) , pages 749-755. IEEE, 2019. 3</list_item>
|
||||
<list_item><location><page_10><loc_8><loc_25><loc_47><loc_31></location>[34] Wenyuan Xue, Baosheng Yu, Wen Wang, Dacheng Tao, and Qingyong Li. Tgrnet: A table graph reconstruction network for table structure recognition. arXiv preprint arXiv:2106.10598 , 2021. 3</list_item>
|
||||
<list_item><location><page_10><loc_8><loc_20><loc_47><loc_25></location>[35] Quanzeng You, Hailin Jin, Zhaowen Wang, Chen Fang, and Jiebo Luo. Image captioning with semantic attention. In Proceedings of the IEEE conference on computer vision and pattern recognition , pages 4651-4659, 2016. 4</list_item>
|
||||
<list_item><location><page_10><loc_8><loc_13><loc_47><loc_19></location>[36] Xinyi Zheng, Doug Burdick, Lucian Popa, Peter Zhong, and Nancy Xin Ru Wang. Global table extractor (gte): A framework for joint table identification and cell structure recognition using visual context. Winter Conference for Applications in Computer Vision (WACV) , 2021. 2, 3</list_item>
|
||||
<list_item><location><page_10><loc_8><loc_10><loc_47><loc_12></location>[37] Xu Zhong, Elaheh ShafieiBavani, and Antonio Jimeno Yepes. Image-based table recognition: Data, model,</list_item>
|
||||
<page_break>
|
||||
<text><loc_57><loc_48><loc_234><loc_60>Computer Vision and Pattern Recognition , pages 658-666, 2019. 6</text>
|
||||
<unordered_list><list_item><loc_41><loc_62><loc_234><loc_102>[26] Sebastian Schreiber, Stefan Agne, Ivo Wolf, Andreas Dengel, and Sheraz Ahmed. Deepdesrt: Deep learning for detection and structure recognition of tables in document images. In 2017 14th IAPR International Conference on Document Analysis and Recognition (ICDAR) , volume 01, pages 11621167, 2017. 1</list_item>
|
||||
<list_item><loc_41><loc_104><loc_234><loc_143>[27] Sebastian Schreiber, Stefan Agne, Ivo Wolf, Andreas Dengel, and Sheraz Ahmed. Deepdesrt: Deep learning for detection and structure recognition of tables in document images. In 2017 14th IAPR international conference on document analysis and recognition (ICDAR) , volume 1, pages 1162-1167. IEEE, 2017. 3</list_item>
|
||||
<list_item><loc_41><loc_145><loc_234><loc_171>[28] Faisal Shafait and Ray Smith. Table detection in heterogeneous documents. In Proceedings of the 9th IAPR International Workshop on Document Analysis Systems , pages 6572, 2010. 2</list_item>
|
||||
<list_item><loc_41><loc_173><loc_234><loc_206>[29] Shoaib Ahmed Siddiqui, Imran Ali Fateh, Syed Tahseen Raza Rizvi, Andreas Dengel, and Sheraz Ahmed. Deeptabstr: Deep learning based table structure recognition. In 2019 International Conference on Document Analysis and Recognition (ICDAR) , pages 1403-1409. IEEE, 2019. 3</list_item>
|
||||
<list_item><loc_41><loc_208><loc_234><loc_241>[30] Peter W J Staar, Michele Dolfi, Christoph Auer, and Costas Bekas. Corpus conversion service: A machine learning platform to ingest documents at scale. In Proceedings of the 24th ACM SIGKDD , KDD '18, pages 774-782, New York, NY, USA, 2018. ACM. 1</list_item>
|
||||
<list_item><loc_41><loc_243><loc_234><loc_290>[31] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Ł ukasz Kaiser, and Illia Polosukhin. Attention is all you need. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett, editors, Advances in Neural Information Processing Systems 30 , pages 5998-6008. Curran Associates, Inc., 2017. 5</list_item>
|
||||
<list_item><loc_41><loc_292><loc_234><loc_317>[32] Oriol Vinyals, Alexander Toshev, Samy Bengio, and Dumitru Erhan. Show and tell: A neural image caption generator. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) , June 2015. 2</list_item>
|
||||
<list_item><loc_41><loc_320><loc_234><loc_345>[33] Wenyuan Xue, Qingyong Li, and Dacheng Tao. Res2tim: reconstruct syntactic structures from table images. In 2019 International Conference on Document Analysis and Recognition (ICDAR) , pages 749-755. IEEE, 2019. 3</list_item>
|
||||
<list_item><loc_41><loc_347><loc_234><loc_373>[34] Wenyuan Xue, Baosheng Yu, Wen Wang, Dacheng Tao, and Qingyong Li. Tgrnet: A table graph reconstruction network for table structure recognition. arXiv preprint arXiv:2106.10598 , 2021. 3</list_item>
|
||||
<list_item><loc_41><loc_375><loc_234><loc_401>[35] Quanzeng You, Hailin Jin, Zhaowen Wang, Chen Fang, and Jiebo Luo. Image captioning with semantic attention. In Proceedings of the IEEE conference on computer vision and pattern recognition , pages 4651-4659, 2016. 4</list_item>
|
||||
<list_item><loc_41><loc_403><loc_234><loc_436>[36] Xinyi Zheng, Doug Burdick, Lucian Popa, Peter Zhong, and Nancy Xin Ru Wang. Global table extractor (gte): A framework for joint table identification and cell structure recognition using visual context. Winter Conference for Applications in Computer Vision (WACV) , 2021. 2, 3</list_item>
|
||||
<list_item><loc_41><loc_438><loc_234><loc_450>[37] Xu Zhong, Elaheh ShafieiBavani, and Antonio Jimeno Yepes. Image-based table recognition: Data, model,</list_item>
|
||||
</unordered_list>
|
||||
<unordered_list>
|
||||
<list_item><location><page_10><loc_54><loc_85><loc_89><loc_90></location>and evaluation. In Andrea Vedaldi, Horst Bischof, Thomas Brox, and Jan-Michael Frahm, editors, Computer Vision ECCV 2020 , pages 564-580, Cham, 2020. Springer International Publishing. 2, 3, 7</list_item>
|
||||
<list_item><location><page_10><loc_50><loc_80><loc_89><loc_85></location>[38] Xu Zhong, Jianbin Tang, and Antonio Jimeno Yepes. Publaynet: Largest dataset ever for document layout analysis. In 2019 International Conference on Document Analysis and Recognition (ICDAR) , pages 1015-1022, 2019. 1</list_item>
|
||||
<page_footer><loc_239><loc_463><loc_247><loc_469>10</page_footer>
|
||||
<unordered_list><list_item><loc_269><loc_48><loc_445><loc_74>and evaluation. In Andrea Vedaldi, Horst Bischof, Thomas Brox, and Jan-Michael Frahm, editors, Computer Vision ECCV 2020 , pages 564-580, Cham, 2020. Springer International Publishing. 2, 3, 7</list_item>
|
||||
<list_item><loc_252><loc_76><loc_445><loc_102>[38] Xu Zhong, Jianbin Tang, and Antonio Jimeno Yepes. Publaynet: Largest dataset ever for document layout analysis. In 2019 International Conference on Document Analysis and Recognition (ICDAR) , pages 1015-1022, 2019. 1</list_item>
|
||||
</unordered_list>
|
||||
<section_header_level_1><location><page_11><loc_22><loc_83><loc_76><loc_86></location>TableFormer: Table Structure Understanding with Transformers Supplementary Material</section_header_level_1>
|
||||
<section_header_level_1><location><page_11><loc_8><loc_78><loc_29><loc_80></location>1. Details on the datasets</section_header_level_1>
|
||||
<section_header_level_1><location><page_11><loc_8><loc_76><loc_25><loc_77></location>1.1. Data preparation</section_header_level_1>
|
||||
<text><location><page_11><loc_8><loc_51><loc_47><loc_75></location>As a first step of our data preparation process, we have calculated statistics over the datasets across the following dimensions: (1) table size measured in the number of rows and columns, (2) complexity of the table, (3) strictness of the provided HTML structure and (4) completeness (i.e. no omitted bounding boxes). A table is considered to be simple if it does not contain row spans or column spans. Additionally, a table has a strict HTML structure if every row has the same number of columns after taking into account any row or column spans. Therefore a strict HTML structure looks always rectangular. However, HTML is a lenient encoding format, i.e. tables with rows of different sizes might still be regarded as correct due to implicit display rules. These implicit rules leave room for ambiguity, which we want to avoid. As such, we prefer to have "strict" tables, i.e. tables where every row has exactly the same length.</text>
|
||||
<text><location><page_11><loc_8><loc_21><loc_47><loc_51></location>We have developed a technique that tries to derive a missing bounding box out of its neighbors. As a first step, we use the annotation data to generate the most fine-grained grid that covers the table structure. In case of strict HTML tables, all grid squares are associated with some table cell and in the presence of table spans a cell extends across multiple grid squares. When enough bounding boxes are known for a rectangular table, it is possible to compute the geometrical border lines between the grid rows and columns. Eventually this information is used to generate the missing bounding boxes. Additionally, the existence of unused grid squares indicates that the table rows have unequal number of columns and the overall structure is non-strict. The generation of missing bounding boxes for non-strict HTML tables is ambiguous and therefore quite challenging. Thus, we have decided to simply discard those tables. In case of PubTabNet we have computed missing bounding boxes for 48% of the simple and 69% of the complex tables. Regarding FinTabNet, 68% of the simple and 98% of the complex tables require the generation of bounding boxes.</text>
|
||||
<text><location><page_11><loc_8><loc_18><loc_47><loc_20></location>Figure 7 illustrates the distribution of the tables across different dimensions per dataset.</text>
|
||||
<section_header_level_1><location><page_11><loc_8><loc_15><loc_25><loc_16></location>1.2. Synthetic datasets</section_header_level_1>
|
||||
<text><location><page_11><loc_8><loc_10><loc_47><loc_14></location>Aiming to train and evaluate our models in a broader spectrum of table data we have synthesized four types of datasets. Each one contains tables with different appear-</text>
|
||||
<text><location><page_11><loc_50><loc_74><loc_89><loc_79></location>ances in regard to their size, structure, style and content. Every synthetic dataset contains 150k examples, summing up to 600k synthetic examples. All datasets are divided into Train, Test and Val splits (80%, 10%, 10%).</text>
|
||||
<text><location><page_11><loc_50><loc_71><loc_89><loc_73></location>The process of generating a synthetic dataset can be decomposed into the following steps:</text>
|
||||
<unordered_list>
|
||||
<list_item><location><page_11><loc_50><loc_60><loc_89><loc_70></location>1. Prepare styling and content templates: The styling templates have been manually designed and organized into groups of scope specific appearances (e.g. financial data, marketing data, etc.) Additionally, we have prepared curated collections of content templates by extracting the most frequently used terms out of non-synthetic datasets (e.g. PubTabNet, FinTabNet, etc.).</list_item>
|
||||
<list_item><location><page_11><loc_50><loc_43><loc_89><loc_60></location>2. Generate table structures: The structure of each synthetic dataset assumes a horizontal table header which potentially spans over multiple rows and a table body that may contain a combination of row spans and column spans. However, spans are not allowed to cross the header - body boundary. The table structure is described by the parameters: Total number of table rows and columns, number of header rows, type of spans (header only spans, row only spans, column only spans, both row and column spans), maximum span size and the ratio of the table area covered by spans.</list_item>
|
||||
<list_item><location><page_11><loc_50><loc_37><loc_89><loc_43></location>3. Generate content: Based on the dataset theme , a set of suitable content templates is chosen first. Then, this content can be combined with purely random text to produce the synthetic content.</list_item>
|
||||
<list_item><location><page_11><loc_50><loc_31><loc_89><loc_37></location>4. Apply styling templates: Depending on the domain of the synthetic dataset, a set of styling templates is first manually selected. Then, a style is randomly selected to format the appearance of the synthesized table.</list_item>
|
||||
<list_item><location><page_11><loc_50><loc_23><loc_89><loc_31></location>5. Render the complete tables: The synthetic table is finally rendered by a web browser engine to generate the bounding boxes for each table cell. A batching technique is utilized to optimize the runtime overhead of the rendering process.</list_item>
|
||||
<page_break>
|
||||
<section_header_level_1><loc_109><loc_70><loc_380><loc_86>TableFormer: Table Structure Understanding with Transformers Supplementary Material</section_header_level_1>
|
||||
<section_header_level_1><loc_41><loc_102><loc_144><loc_109>1. Details on the datasets</section_header_level_1>
|
||||
<section_header_level_1><loc_41><loc_114><loc_123><loc_120>1.1. Data preparation</section_header_level_1>
|
||||
<text><loc_41><loc_126><loc_234><loc_245>As a first step of our data preparation process, we have calculated statistics over the datasets across the following dimensions: (1) table size measured in the number of rows and columns, (2) complexity of the table, (3) strictness of the provided HTML structure and (4) completeness (i.e. no omitted bounding boxes). A table is considered to be simple if it does not contain row spans or column spans. Additionally, a table has a strict HTML structure if every row has the same number of columns after taking into account any row or column spans. Therefore a strict HTML structure looks always rectangular. However, HTML is a lenient encoding format, i.e. tables with rows of different sizes might still be regarded as correct due to implicit display rules. These implicit rules leave room for ambiguity, which we want to avoid. As such, we prefer to have "strict" tables, i.e. tables where every row has exactly the same length.</text>
|
||||
<text><loc_41><loc_247><loc_234><loc_396>We have developed a technique that tries to derive a missing bounding box out of its neighbors. As a first step, we use the annotation data to generate the most fine-grained grid that covers the table structure. In case of strict HTML tables, all grid squares are associated with some table cell and in the presence of table spans a cell extends across multiple grid squares. When enough bounding boxes are known for a rectangular table, it is possible to compute the geometrical border lines between the grid rows and columns. Eventually this information is used to generate the missing bounding boxes. Additionally, the existence of unused grid squares indicates that the table rows have unequal number of columns and the overall structure is non-strict. The generation of missing bounding boxes for non-strict HTML tables is ambiguous and therefore quite challenging. Thus, we have decided to simply discard those tables. In case of PubTabNet we have computed missing bounding boxes for 48% of the simple and 69% of the complex tables. Regarding FinTabNet, 68% of the simple and 98% of the complex tables require the generation of bounding boxes.</text>
|
||||
<text><loc_41><loc_398><loc_234><loc_411>Figure 7 illustrates the distribution of the tables across different dimensions per dataset.</text>
|
||||
<section_header_level_1><loc_41><loc_418><loc_125><loc_424>1.2. Synthetic datasets</section_header_level_1>
|
||||
<text><loc_41><loc_430><loc_234><loc_451>Aiming to train and evaluate our models in a broader spectrum of table data we have synthesized four types of datasets. Each one contains tables with different appear-</text>
|
||||
<text><loc_252><loc_103><loc_445><loc_131>ances in regard to their size, structure, style and content. Every synthetic dataset contains 150k examples, summing up to 600k synthetic examples. All datasets are divided into Train, Test and Val splits (80%, 10%, 10%).</text>
|
||||
<text><loc_252><loc_133><loc_445><loc_147>The process of generating a synthetic dataset can be decomposed into the following steps:</text>
|
||||
<unordered_list><list_item><loc_252><loc_149><loc_445><loc_200>1. Prepare styling and content templates: The styling templates have been manually designed and organized into groups of scope specific appearances (e.g. financial data, marketing data, etc.) Additionally, we have prepared curated collections of content templates by extracting the most frequently used terms out of non-synthetic datasets (e.g. PubTabNet, FinTabNet, etc.).</list_item>
|
||||
<list_item><loc_252><loc_202><loc_445><loc_283>2. Generate table structures: The structure of each synthetic dataset assumes a horizontal table header which potentially spans over multiple rows and a table body that may contain a combination of row spans and column spans. However, spans are not allowed to cross the header - body boundary. The table structure is described by the parameters: Total number of table rows and columns, number of header rows, type of spans (header only spans, row only spans, column only spans, both row and column spans), maximum span size and the ratio of the table area covered by spans.</list_item>
|
||||
<list_item><loc_252><loc_286><loc_445><loc_314>3. Generate content: Based on the dataset theme , a set of suitable content templates is chosen first. Then, this content can be combined with purely random text to produce the synthetic content.</list_item>
|
||||
<list_item><loc_252><loc_316><loc_445><loc_345>4. Apply styling templates: Depending on the domain of the synthetic dataset, a set of styling templates is first manually selected. Then, a style is randomly selected to format the appearance of the synthesized table.</list_item>
|
||||
<list_item><loc_252><loc_347><loc_445><loc_383>5. Render the complete tables: The synthetic table is finally rendered by a web browser engine to generate the bounding boxes for each table cell. A batching technique is utilized to optimize the runtime overhead of the rendering process.</list_item>
|
||||
</unordered_list>
|
||||
<section_header_level_1><location><page_11><loc_50><loc_18><loc_89><loc_21></location>2. Prediction post-processing for PDF documents</section_header_level_1>
|
||||
<text><location><page_11><loc_50><loc_10><loc_89><loc_17></location>Although TableFormer can predict the table structure and the bounding boxes for tables recognized inside PDF documents, this is not enough when a full reconstruction of the original table is required. This happens mainly due the following reasons:</text>
|
||||
<figure>
|
||||
<location><page_12><loc_9><loc_81><loc_89><loc_91></location>
|
||||
<caption>Figure 7: Distribution of the tables across different dimensions per dataset. Simple vs complex tables per dataset and split, strict vs non strict html structures per dataset and table complexity, missing bboxes per dataset and table complexity.</caption>
|
||||
</figure>
|
||||
<unordered_list>
|
||||
<list_item><location><page_12><loc_10><loc_71><loc_47><loc_73></location>· TableFormer output does not include the table cell content.</list_item>
|
||||
<list_item><location><page_12><loc_10><loc_67><loc_47><loc_69></location>· There are occasional inaccuracies in the predictions of the bounding boxes.</list_item>
|
||||
<section_header_level_1><loc_252><loc_393><loc_445><loc_408>2. Prediction post-processing for PDF documents</section_header_level_1>
|
||||
<text><loc_252><loc_415><loc_445><loc_451>Although TableFormer can predict the table structure and the bounding boxes for tables recognized inside PDF documents, this is not enough when a full reconstruction of the original table is required. This happens mainly due the following reasons:</text>
|
||||
<page_footer><loc_239><loc_463><loc_247><loc_469>11</page_footer>
|
||||
<page_break>
|
||||
<picture><loc_44><loc_47><loc_445><loc_93><caption><loc_41><loc_104><loc_445><loc_118>Figure 7: Distribution of the tables across different dimensions per dataset. Simple vs complex tables per dataset and split, strict vs non strict html structures per dataset and table complexity, missing bboxes per dataset and table complexity.</caption></picture>
|
||||
<unordered_list><list_item><loc_50><loc_133><loc_234><loc_146>· TableFormer output does not include the table cell content.</list_item>
|
||||
<list_item><loc_50><loc_154><loc_234><loc_167>· There are occasional inaccuracies in the predictions of the bounding boxes.</list_item>
|
||||
</unordered_list>
|
||||
<text><location><page_12><loc_8><loc_50><loc_47><loc_65></location>However, it is possible to mitigate those limitations by combining the TableFormer predictions with the information already present inside a programmatic PDF document. More specifically, PDF documents can be seen as a sequence of PDF cells where each cell is described by its content and bounding box. If we are able to associate the PDF cells with the predicted table cells, we can directly link the PDF cell content to the table cell structure and use the PDF bounding boxes to correct misalignments in the predicted table cell bounding boxes.</text>
|
||||
<text><location><page_12><loc_8><loc_47><loc_47><loc_50></location>Here is a step-by-step description of the prediction postprocessing:</text>
|
||||
<unordered_list>
|
||||
<list_item><location><page_12><loc_8><loc_42><loc_47><loc_47></location>1. Get the minimal grid dimensions - number of rows and columns for the predicted table structure. This represents the most granular grid for the underlying table structure.</list_item>
|
||||
<list_item><location><page_12><loc_8><loc_36><loc_47><loc_42></location>2. Generate pair-wise matches between the bounding boxes of the PDF cells and the predicted cells. The Intersection Over Union (IOU) metric is used to evaluate the quality of the matches.</list_item>
|
||||
<list_item><location><page_12><loc_8><loc_33><loc_47><loc_36></location>3. Use a carefully selected IOU threshold to designate the matches as "good" ones and "bad" ones.</list_item>
|
||||
<list_item><location><page_12><loc_8><loc_29><loc_47><loc_33></location>3.a. If all IOU scores in a column are below the threshold, discard all predictions (structure and bounding boxes) for that column.</list_item>
|
||||
<list_item><location><page_12><loc_8><loc_24><loc_47><loc_28></location>4. Find the best-fitting content alignment for the predicted cells with good IOU per each column. The alignment of the column can be identified by the following formula:</list_item>
|
||||
<text><loc_41><loc_176><loc_234><loc_250>However, it is possible to mitigate those limitations by combining the TableFormer predictions with the information already present inside a programmatic PDF document. More specifically, PDF documents can be seen as a sequence of PDF cells where each cell is described by its content and bounding box. If we are able to associate the PDF cells with the predicted table cells, we can directly link the PDF cell content to the table cell structure and use the PDF bounding boxes to correct misalignments in the predicted table cell bounding boxes.</text>
|
||||
<text><loc_41><loc_252><loc_234><loc_265>Here is a step-by-step description of the prediction postprocessing:</text>
|
||||
<unordered_list><list_item><loc_41><loc_267><loc_234><loc_288>1. Get the minimal grid dimensions - number of rows and columns for the predicted table structure. This represents the most granular grid for the underlying table structure.</list_item>
|
||||
<list_item><loc_41><loc_290><loc_234><loc_318>2. Generate pair-wise matches between the bounding boxes of the PDF cells and the predicted cells. The Intersection Over Union (IOU) metric is used to evaluate the quality of the matches.</list_item>
|
||||
<list_item><loc_41><loc_320><loc_234><loc_334>3. Use a carefully selected IOU threshold to designate the matches as "good" ones and "bad" ones.</list_item>
|
||||
<list_item><loc_41><loc_336><loc_234><loc_356>3.a. If all IOU scores in a column are below the threshold, discard all predictions (structure and bounding boxes) for that column.</list_item>
|
||||
<list_item><loc_41><loc_359><loc_234><loc_379>4. Find the best-fitting content alignment for the predicted cells with good IOU per each column. The alignment of the column can be identified by the following formula:</list_item>
|
||||
</unordered_list>
|
||||
<formula><location><page_12><loc_18><loc_17><loc_47><loc_21></location></formula>
|
||||
<text><location><page_12><loc_8><loc_13><loc_47><loc_16></location>where c is one of { left, centroid, right } and x$_{c}$ is the xcoordinate for the corresponding point.</text>
|
||||
<unordered_list>
|
||||
<list_item><location><page_12><loc_8><loc_10><loc_47><loc_13></location>5. Use the alignment computed in step 4, to compute the median x -coordinate for all table columns and the me-</list_item>
|
||||
<formula><loc_90><loc_394><loc_234><loc_413></formula>
|
||||
<text><loc_41><loc_421><loc_234><loc_435>where c is one of { left, centroid, right } and x$_{c}$ is the xcoordinate for the corresponding point.</text>
|
||||
<unordered_list><list_item><loc_41><loc_437><loc_234><loc_450>5. Use the alignment computed in step 4, to compute the median x -coordinate for all table columns and the me-</list_item>
|
||||
</unordered_list>
|
||||
<text><location><page_12><loc_50><loc_68><loc_89><loc_73></location>dian cell size for all table cells. The usage of median during the computations, helps to eliminate outliers caused by occasional column spans which are usually wider than the normal.</text>
|
||||
<unordered_list>
|
||||
<list_item><location><page_12><loc_50><loc_65><loc_89><loc_67></location>6. Snap all cells with bad IOU to their corresponding median x -coordinates and cell sizes.</list_item>
|
||||
<list_item><location><page_12><loc_50><loc_51><loc_89><loc_64></location>7. Generate a new set of pair-wise matches between the corrected bounding boxes and PDF cells. This time use a modified version of the IOU metric, where the area of the intersection between the predicted and PDF cells is divided by the PDF cell area. In case there are multiple matches for the same PDF cell, the prediction with the higher score is preferred. This covers the cases where the PDF cells are smaller than the area of predicted or corrected prediction cells.</list_item>
|
||||
<list_item><location><page_12><loc_50><loc_42><loc_89><loc_51></location>8. In some rare occasions, we have noticed that TableFormer can confuse a single column as two. When the postprocessing steps are applied, this results with two predicted columns pointing to the same PDF column. In such case we must de-duplicate the columns according to highest total column intersection score.</list_item>
|
||||
<list_item><location><page_12><loc_50><loc_28><loc_89><loc_41></location>9. Pick up the remaining orphan cells. There could be cases, when after applying all the previous post-processing steps, some PDF cells could still remain without any match to predicted cells. However, it is still possible to deduce the correct matching for an orphan PDF cell by mapping its bounding box on the geometry of the grid. This mapping decides if the content of the orphan cell will be appended to an already matched table cell, or a new table cell should be created to match with the orphan.</list_item>
|
||||
<text><loc_252><loc_133><loc_445><loc_161>dian cell size for all table cells. The usage of median during the computations, helps to eliminate outliers caused by occasional column spans which are usually wider than the normal.</text>
|
||||
<unordered_list><list_item><loc_252><loc_164><loc_445><loc_177>6. Snap all cells with bad IOU to their corresponding median x -coordinates and cell sizes.</list_item>
|
||||
<list_item><loc_252><loc_179><loc_445><loc_245>7. Generate a new set of pair-wise matches between the corrected bounding boxes and PDF cells. This time use a modified version of the IOU metric, where the area of the intersection between the predicted and PDF cells is divided by the PDF cell area. In case there are multiple matches for the same PDF cell, the prediction with the higher score is preferred. This covers the cases where the PDF cells are smaller than the area of predicted or corrected prediction cells.</list_item>
|
||||
<list_item><loc_252><loc_247><loc_445><loc_290>8. In some rare occasions, we have noticed that TableFormer can confuse a single column as two. When the postprocessing steps are applied, this results with two predicted columns pointing to the same PDF column. In such case we must de-duplicate the columns according to highest total column intersection score.</list_item>
|
||||
<list_item><loc_252><loc_293><loc_445><loc_359>9. Pick up the remaining orphan cells. There could be cases, when after applying all the previous post-processing steps, some PDF cells could still remain without any match to predicted cells. However, it is still possible to deduce the correct matching for an orphan PDF cell by mapping its bounding box on the geometry of the grid. This mapping decides if the content of the orphan cell will be appended to an already matched table cell, or a new table cell should be created to match with the orphan.</list_item>
|
||||
</unordered_list>
|
||||
<text><location><page_12><loc_50><loc_24><loc_89><loc_28></location>9a. Compute the top and bottom boundary of the horizontal band for each grid row (min/max y coordinates per row).</text>
|
||||
<unordered_list>
|
||||
<list_item><location><page_12><loc_50><loc_21><loc_89><loc_23></location>9b. Intersect the orphan's bounding box with the row bands, and map the cell to the closest grid row.</list_item>
|
||||
<list_item><location><page_12><loc_50><loc_16><loc_89><loc_20></location>9c. Compute the left and right boundary of the vertical band for each grid column (min/max x coordinates per column).</list_item>
|
||||
<list_item><location><page_12><loc_50><loc_13><loc_89><loc_16></location>9d. Intersect the orphan's bounding box with the column bands, and map the cell to the closest grid column.</list_item>
|
||||
<list_item><location><page_12><loc_50><loc_10><loc_89><loc_13></location>9e. If the table cell under the identified row and column is not empty, extend its content with the content of the or-</list_item>
|
||||
<text><loc_252><loc_361><loc_445><loc_381>9a. Compute the top and bottom boundary of the horizontal band for each grid row (min/max y coordinates per row).</text>
|
||||
<unordered_list><list_item><loc_252><loc_384><loc_445><loc_397>9b. Intersect the orphan's bounding box with the row bands, and map the cell to the closest grid row.</list_item>
|
||||
<list_item><loc_252><loc_399><loc_445><loc_420>9c. Compute the left and right boundary of the vertical band for each grid column (min/max x coordinates per column).</list_item>
|
||||
<list_item><loc_252><loc_422><loc_445><loc_435>9d. Intersect the orphan's bounding box with the column bands, and map the cell to the closest grid column.</list_item>
|
||||
<list_item><loc_252><loc_437><loc_445><loc_450>9e. If the table cell under the identified row and column is not empty, extend its content with the content of the or-</list_item>
|
||||
</unordered_list>
|
||||
<text><location><page_13><loc_8><loc_89><loc_15><loc_91></location>phan cell.</text>
|
||||
<text><location><page_13><loc_8><loc_86><loc_47><loc_89></location>9f. Otherwise create a new structural cell and match it wit the orphan cell.</text>
|
||||
<text><location><page_13><loc_8><loc_83><loc_47><loc_86></location>Aditional images with examples of TableFormer predictions and post-processing can be found below.</text>
|
||||
<table>
|
||||
<location><page_13><loc_14><loc_73><loc_39><loc_80></location>
|
||||
</table>
|
||||
<table>
|
||||
<location><page_13><loc_14><loc_63><loc_39><loc_70></location>
|
||||
</table>
|
||||
<table>
|
||||
<location><page_13><loc_14><loc_54><loc_39><loc_61></location>
|
||||
</table>
|
||||
<table>
|
||||
<location><page_13><loc_14><loc_38><loc_41><loc_50></location>
|
||||
<caption>Figure 8: Example of a table with multi-line header.</caption>
|
||||
</table>
|
||||
<table>
|
||||
<location><page_13><loc_51><loc_83><loc_91><loc_87></location>
|
||||
</table>
|
||||
<table>
|
||||
<location><page_13><loc_51><loc_77><loc_91><loc_80></location>
|
||||
</table>
|
||||
<table>
|
||||
<location><page_13><loc_51><loc_71><loc_91><loc_75></location>
|
||||
</table>
|
||||
<figure>
|
||||
<location><page_13><loc_51><loc_63><loc_70><loc_68></location>
|
||||
</figure>
|
||||
<table>
|
||||
<location><page_13><loc_51><loc_63><loc_70><loc_68></location>
|
||||
<caption>Figure 9: Example of a table with big empty distance between cells.</caption>
|
||||
</table>
|
||||
<table>
|
||||
<location><page_13><loc_55><loc_45><loc_80><loc_51></location>
|
||||
</table>
|
||||
<table>
|
||||
<location><page_13><loc_55><loc_37><loc_80><loc_43></location>
|
||||
</table>
|
||||
<table>
|
||||
<location><page_13><loc_55><loc_28><loc_80><loc_34></location>
|
||||
</table>
|
||||
<figure>
|
||||
<location><page_13><loc_55><loc_16><loc_85><loc_25></location>
|
||||
</figure>
|
||||
<table>
|
||||
<location><page_13><loc_55><loc_16><loc_85><loc_25></location>
|
||||
<caption>Figure 10: Example of a complex table with empty cells.</caption>
|
||||
</table>
|
||||
<table>
|
||||
<location><page_14><loc_8><loc_57><loc_46><loc_65></location>
|
||||
</table>
|
||||
<figure>
|
||||
<location><page_14><loc_8><loc_56><loc_46><loc_87></location>
|
||||
<caption>Figure 11: Simple table with different style and empty cells.</caption>
|
||||
</figure>
|
||||
<table>
|
||||
<location><page_14><loc_8><loc_38><loc_51><loc_43></location>
|
||||
</table>
|
||||
<table>
|
||||
<location><page_14><loc_8><loc_32><loc_51><loc_36></location>
|
||||
</table>
|
||||
<table>
|
||||
<location><page_14><loc_8><loc_25><loc_51><loc_30></location>
|
||||
</table>
|
||||
<figure>
|
||||
<location><page_14><loc_8><loc_17><loc_29><loc_23></location>
|
||||
<caption>Figure 12: Simple table predictions and post processing.</caption>
|
||||
</figure>
|
||||
<table>
|
||||
<location><page_14><loc_52><loc_73><loc_87><loc_80></location>
|
||||
</table>
|
||||
<table>
|
||||
<location><page_14><loc_52><loc_65><loc_87><loc_71></location>
|
||||
</table>
|
||||
<table>
|
||||
<location><page_14><loc_54><loc_55><loc_86><loc_64></location>
|
||||
</table>
|
||||
<figure>
|
||||
<location><page_14><loc_52><loc_55><loc_87><loc_89></location>
|
||||
<caption>Figure 13: Table predictions example on colorful table.</caption>
|
||||
</figure>
|
||||
<table>
|
||||
<location><page_14><loc_52><loc_40><loc_85><loc_46></location>
|
||||
</table>
|
||||
<table>
|
||||
<location><page_14><loc_52><loc_32><loc_85><loc_38></location>
|
||||
</table>
|
||||
<table>
|
||||
<location><page_14><loc_52><loc_25><loc_85><loc_31></location>
|
||||
</table>
|
||||
<table>
|
||||
<location><page_14><loc_52><loc_16><loc_87><loc_23></location>
|
||||
<caption>Figure 14: Example with multi-line text.</caption>
|
||||
</table>
|
||||
<figure>
|
||||
<location><page_15><loc_9><loc_69><loc_46><loc_83></location>
|
||||
</figure>
|
||||
<table>
|
||||
<location><page_15><loc_9><loc_69><loc_46><loc_83></location>
|
||||
</table>
|
||||
<figure>
|
||||
<location><page_15><loc_9><loc_53><loc_46><loc_67></location>
|
||||
</figure>
|
||||
<table>
|
||||
<location><page_15><loc_9><loc_53><loc_46><loc_67></location>
|
||||
</table>
|
||||
<figure>
|
||||
<location><page_15><loc_9><loc_37><loc_46><loc_51></location>
|
||||
</figure>
|
||||
<figure>
|
||||
<location><page_15><loc_8><loc_20><loc_52><loc_36></location>
|
||||
</figure>
|
||||
<table>
|
||||
<location><page_15><loc_8><loc_20><loc_52><loc_36></location>
|
||||
<caption>Figure 15: Example with triangular table.</caption>
|
||||
</table>
|
||||
<table>
|
||||
<location><page_15><loc_53><loc_72><loc_86><loc_85></location>
|
||||
</table>
|
||||
<table>
|
||||
<location><page_15><loc_53><loc_57><loc_86><loc_69></location>
|
||||
</table>
|
||||
<figure>
|
||||
<location><page_15><loc_53><loc_41><loc_86><loc_54></location>
|
||||
</figure>
|
||||
<table>
|
||||
<location><page_15><loc_53><loc_41><loc_86><loc_54></location>
|
||||
</table>
|
||||
<figure>
|
||||
<location><page_15><loc_58><loc_20><loc_81><loc_38></location>
|
||||
</figure>
|
||||
<table>
|
||||
<location><page_15><loc_58><loc_20><loc_81><loc_38></location>
|
||||
<caption>Figure 16: Example of how post-processing helps to restore mis-aligned bounding boxes prediction artifact.</caption>
|
||||
</table>
|
||||
<figure>
|
||||
<location><page_16><loc_11><loc_37><loc_86><loc_68></location>
|
||||
<caption>Figure 17: Example of long table. End-to-end example from initial PDF cells to prediction of bounding boxes, post processing and prediction of structure.</caption>
|
||||
</figure>
|
||||
</document>
|
||||
<page_footer><loc_239><loc_463><loc_247><loc_469>12</page_footer>
|
||||
<page_break>
|
||||
<text><loc_41><loc_47><loc_73><loc_53>phan cell.</text>
|
||||
<text><loc_41><loc_55><loc_234><loc_68>9f. Otherwise create a new structural cell and match it wit the orphan cell.</text>
|
||||
<text><loc_41><loc_70><loc_234><loc_83>Aditional images with examples of TableFormer predictions and post-processing can be found below.</text>
|
||||
<otsl><loc_69><loc_99><loc_195><loc_135></otsl>
|
||||
<otsl><loc_68><loc_148><loc_195><loc_184></otsl>
|
||||
<otsl><loc_69><loc_195><loc_195><loc_232></otsl>
|
||||
<otsl><loc_68><loc_250><loc_203><loc_308><caption><loc_52><loc_317><loc_223><loc_323>Figure 8: Example of a table with multi-line header.</caption></otsl>
|
||||
<page_footer><loc_239><loc_463><loc_247><loc_469>13</page_footer>
|
||||
<otsl><loc_254><loc_64><loc_454><loc_86></otsl>
|
||||
<otsl><loc_253><loc_98><loc_454><loc_117></otsl>
|
||||
<otsl><loc_253><loc_124><loc_454><loc_147></otsl>
|
||||
<picture><loc_253><loc_160><loc_348><loc_185></picture>
|
||||
<otsl><loc_253><loc_160><loc_348><loc_185><caption><loc_252><loc_194><loc_445><loc_207>Figure 9: Example of a table with big empty distance between cells.</caption></otsl>
|
||||
<otsl><loc_274><loc_245><loc_400><loc_276></otsl>
|
||||
<otsl><loc_274><loc_287><loc_400><loc_317></otsl>
|
||||
<otsl><loc_274><loc_328><loc_401><loc_358></otsl>
|
||||
<picture><loc_273><loc_374><loc_424><loc_420></picture>
|
||||
<otsl><loc_273><loc_374><loc_424><loc_420><caption><loc_255><loc_430><loc_443><loc_435>Figure 10: Example of a complex table with empty cells.</caption></otsl>
|
||||
<page_break>
|
||||
<otsl><loc_42><loc_173><loc_231><loc_217></otsl>
|
||||
<picture><loc_42><loc_66><loc_231><loc_218><caption><loc_41><loc_225><loc_234><loc_238>Figure 11: Simple table with different style and empty cells.</caption></picture>
|
||||
<otsl><loc_42><loc_286><loc_254><loc_310></otsl>
|
||||
<otsl><loc_42><loc_318><loc_254><loc_342></otsl>
|
||||
<otsl><loc_42><loc_350><loc_254><loc_374></otsl>
|
||||
<picture><loc_41><loc_386><loc_145><loc_414><caption><loc_45><loc_424><loc_230><loc_430>Figure 12: Simple table predictions and post processing.</caption></picture>
|
||||
<page_footer><loc_239><loc_463><loc_247><loc_469>14</page_footer>
|
||||
<otsl><loc_261><loc_102><loc_437><loc_135></otsl>
|
||||
<otsl><loc_261><loc_143><loc_437><loc_177></otsl>
|
||||
<otsl><loc_268><loc_182><loc_428><loc_226></otsl>
|
||||
<picture><loc_260><loc_57><loc_437><loc_227><caption><loc_258><loc_235><loc_440><loc_240>Figure 13: Table predictions example on colorful table.</caption></picture>
|
||||
<otsl><loc_261><loc_272><loc_424><loc_302></otsl>
|
||||
<otsl><loc_261><loc_309><loc_424><loc_338></otsl>
|
||||
<otsl><loc_261><loc_345><loc_425><loc_374></otsl>
|
||||
<otsl><loc_261><loc_385><loc_436><loc_422><caption><loc_282><loc_432><loc_416><loc_437>Figure 14: Example with multi-line text.</caption></otsl>
|
||||
<page_break>
|
||||
<picture><loc_45><loc_86><loc_228><loc_157></picture>
|
||||
<otsl><loc_45><loc_86><loc_228><loc_157></otsl>
|
||||
<picture><loc_44><loc_164><loc_228><loc_236></picture>
|
||||
<otsl><loc_44><loc_164><loc_228><loc_236></otsl>
|
||||
<picture><loc_45><loc_243><loc_229><loc_314></picture>
|
||||
<picture><loc_41><loc_319><loc_261><loc_399></picture>
|
||||
<otsl><loc_41><loc_319><loc_261><loc_399><caption><loc_69><loc_407><loc_206><loc_412>Figure 15: Example with triangular table.</caption></otsl>
|
||||
<page_footer><loc_239><loc_463><loc_247><loc_469>15</page_footer>
|
||||
<otsl><loc_264><loc_77><loc_430><loc_141></otsl>
|
||||
<otsl><loc_264><loc_153><loc_430><loc_217></otsl>
|
||||
<picture><loc_264><loc_229><loc_430><loc_293></picture>
|
||||
<otsl><loc_264><loc_229><loc_430><loc_293></otsl>
|
||||
<picture><loc_289><loc_308><loc_405><loc_401></picture>
|
||||
<otsl><loc_289><loc_308><loc_405><loc_401><caption><loc_252><loc_412><loc_445><loc_425>Figure 16: Example of how post-processing helps to restore mis-aligned bounding boxes prediction artifact.</caption></otsl>
|
||||
<page_break>
|
||||
<picture><loc_55><loc_160><loc_432><loc_314><caption><loc_41><loc_321><loc_445><loc_334>Figure 17: Example of long table. End-to-end example from initial PDF cells to prediction of bounding boxes, post processing and prediction of structure.</caption></picture>
|
||||
<page_footer><loc_239><loc_463><loc_247><loc_469>16</page_footer>
|
||||
</doctag>
|
@ -1,240 +1,157 @@
|
||||
<document>
|
||||
<section_header_level_1><location><page_1><loc_18><loc_85><loc_83><loc_89></location>DocLayNet: A Large Human-Annotated Dataset for Document-Layout Analysis</section_header_level_1>
|
||||
<text><location><page_1><loc_15><loc_77><loc_32><loc_83></location>Birgit Pfitzmann IBM Research Rueschlikon, Switzerland bpf@zurich.ibm.com</text>
|
||||
<text><location><page_1><loc_42><loc_77><loc_58><loc_83></location>Christoph Auer IBM Research Rueschlikon, Switzerland cau@zurich.ibm.com</text>
|
||||
<text><location><page_1><loc_69><loc_77><loc_85><loc_83></location>Michele Dolfi IBM Research Rueschlikon, Switzerland dol@zurich.ibm.com</text>
|
||||
<text><location><page_1><loc_28><loc_70><loc_45><loc_76></location>Ahmed S. Nassar IBM Research Rueschlikon, Switzerland ahn@zurich.ibm.com</text>
|
||||
<text><location><page_1><loc_55><loc_70><loc_72><loc_76></location>Peter Staar IBM Research Rueschlikon, Switzerland taa@zurich.ibm.com</text>
|
||||
<section_header_level_1><location><page_1><loc_9><loc_67><loc_18><loc_69></location>ABSTRACT</section_header_level_1>
|
||||
<text><location><page_1><loc_9><loc_33><loc_48><loc_67></location>Accurate document layout analysis is a key requirement for highquality PDF document conversion. With the recent availability of public, large ground-truth datasets such as PubLayNet and DocBank, deep-learning models have proven to be very effective at layout detection and segmentation. While these datasets are of adequate size to train such models, they severely lack in layout variability since they are sourced from scientific article repositories such as PubMed and arXiv only. Consequently, the accuracy of the layout segmentation drops significantly when these models are applied on more challenging and diverse layouts. In this paper, we present DocLayNet , a new, publicly available, document-layout annotation dataset in COCO format. It contains 80863 manually annotated pages from diverse data sources to represent a wide variability in layouts. For each PDF page, the layout annotations provide labelled bounding-boxes with a choice of 11 distinct classes. DocLayNet also provides a subset of double- and triple-annotated pages to determine the inter-annotator agreement. In multiple experiments, we provide baseline accuracy scores (in mAP) for a set of popular object detection models. We also demonstrate that these models fall approximately 10% behind the inter-annotator agreement. Furthermore, we provide evidence that DocLayNet is of sufficient size. Lastly, we compare models trained on PubLayNet, DocBank and DocLayNet, showing that layout predictions of the DocLayNettrained models are more robust and thus the preferred choice for general-purpose document-layout analysis.</text>
|
||||
<section_header_level_1><location><page_1><loc_9><loc_29><loc_22><loc_30></location>CCS CONCEPTS</section_header_level_1>
|
||||
<text><location><page_1><loc_9><loc_25><loc_49><loc_29></location>· Information systems → Document structure ; · Applied computing → Document analysis ; · Computing methodologies → Machine learning ; Computer vision ; Object detection ;</text>
|
||||
<text><location><page_1><loc_9><loc_15><loc_48><loc_20></location>Permission to make digital or hard copies of part or all of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for third-party components of this work must be honored. For all other uses, contact the owner/author(s).</text>
|
||||
<text><location><page_1><loc_9><loc_14><loc_32><loc_15></location>KDD '22, August 14-18, 2022, Washington, DC, USA</text>
|
||||
<text><location><page_1><loc_9><loc_13><loc_31><loc_14></location>© 2022 Copyright held by the owner/author(s).</text>
|
||||
<text><location><page_1><loc_9><loc_12><loc_26><loc_13></location>ACM ISBN 978-1-4503-9385-0/22/08.</text>
|
||||
<text><location><page_1><loc_9><loc_11><loc_27><loc_12></location>https://doi.org/10.1145/3534678.3539043</text>
|
||||
<figure>
|
||||
<location><page_1><loc_53><loc_34><loc_90><loc_68></location>
|
||||
<caption>Figure 1: Four examples of complex page layouts across different document categories</caption>
|
||||
</figure>
|
||||
<section_header_level_1><location><page_1><loc_52><loc_24><loc_62><loc_25></location>KEYWORDS</section_header_level_1>
|
||||
<text><location><page_1><loc_52><loc_21><loc_91><loc_23></location>PDF document conversion, layout segmentation, object-detection, data set, Machine Learning</text>
|
||||
<section_header_level_1><location><page_1><loc_52><loc_18><loc_66><loc_19></location>ACM Reference Format:</section_header_level_1>
|
||||
<text><location><page_1><loc_52><loc_11><loc_91><loc_18></location>Birgit Pfitzmann, Christoph Auer, Michele Dolfi, Ahmed S. Nassar, and Peter Staar. 2022. DocLayNet: A Large Human-Annotated Dataset for DocumentLayout Analysis. In Proceedings of the 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining (KDD '22), August 14-18, 2022, Washington, DC, USA. ACM, New York, NY, USA, 9 pages. https://doi.org/10.1145/ 3534678.3539043</text>
|
||||
<section_header_level_1><location><page_2><loc_9><loc_88><loc_26><loc_89></location>1 INTRODUCTION</section_header_level_1>
|
||||
<text><location><page_2><loc_9><loc_71><loc_50><loc_86></location>Despite the substantial improvements achieved with machine-learning (ML) approaches and deep neural networks in recent years, document conversion remains a challenging problem, as demonstrated by the numerous public competitions held on this topic [1-4]. The challenge originates from the huge variability in PDF documents regarding layout, language and formats (scanned, programmatic or a combination of both). Engineering a single ML model that can be applied on all types of documents and provides high-quality layout segmentation remains to this day extremely challenging [5]. To highlight the variability in document layouts, we show a few example documents from the DocLayNet dataset in Figure 1.</text>
|
||||
<text><location><page_2><loc_9><loc_37><loc_48><loc_71></location>A key problem in the process of document conversion is to understand the structure of a single document page, i.e. which segments of text should be grouped together in a unit. To train models for this task, there are currently two large datasets available to the community, PubLayNet [6] and DocBank [7]. They were introduced in 2019 and 2020 respectively and significantly accelerated the implementation of layout detection and segmentation models due to their sizes of 300K and 500K ground-truth pages. These sizes were achieved by leveraging an automation approach. The benefit of automated ground-truth generation is obvious: one can generate large ground-truth datasets at virtually no cost. However, the automation introduces a constraint on the variability in the dataset, because corresponding structured source data must be available. PubLayNet and DocBank were both generated from scientific document repositories (PubMed and arXiv), which provide XML or L A T E X sources. Those scientific documents present a limited variability in their layouts, because they are typeset in uniform templates provided by the publishers. Obviously, documents such as technical manuals, annual company reports, legal text, government tenders, etc. have very different and partially unique layouts. As a consequence, the layout predictions obtained from models trained on PubLayNet or DocBank is very reasonable when applied on scientific documents. However, for more artistic or free-style layouts, we see sub-par prediction quality from these models, which we demonstrate in Section 5.</text>
|
||||
<text><location><page_2><loc_9><loc_27><loc_48><loc_36></location>In this paper, we present the DocLayNet dataset. It provides pageby-page layout annotation ground-truth using bounding-boxes for 11 distinct class labels on 80863 unique document pages, of which a fraction carry double- or triple-annotations. DocLayNet is similar in spirit to PubLayNet and DocBank and will likewise be made available to the public 1 in order to stimulate the document-layout analysis community. It distinguishes itself in the following aspects:</text>
|
||||
<unordered_list>
|
||||
<list_item><location><page_2><loc_11><loc_22><loc_48><loc_26></location>(1) Human Annotation : In contrast to PubLayNet and DocBank, we relied on human annotation instead of automation approaches to generate the data set.</list_item>
|
||||
<list_item><location><page_2><loc_11><loc_20><loc_48><loc_22></location>(2) Large Layout Variability : We include diverse and complex layouts from a large variety of public sources.</list_item>
|
||||
<list_item><location><page_2><loc_11><loc_15><loc_48><loc_19></location>(3) Detailed Label Set : We define 11 class labels to distinguish layout features in high detail. PubLayNet provides 5 labels; DocBank provides 13, although not a superset of ours.</list_item>
|
||||
<list_item><location><page_2><loc_11><loc_13><loc_48><loc_15></location>(4) Redundant Annotations : A fraction of the pages in the DocLayNet data set carry more than one human annotation.</list_item>
|
||||
<doctag><page_header><loc_15><loc_138><loc_30><loc_350>arXiv:2206.01062v1 [cs.CV] 2 Jun 2022</page_header>
|
||||
<section_header_level_1><loc_88><loc_53><loc_413><loc_76>DocLayNet: A Large Human-Annotated Dataset for Document-Layout Analysis</section_header_level_1>
|
||||
<text><loc_74><loc_84><loc_158><loc_114>Birgit Pfitzmann IBM Research Rueschlikon, Switzerland bpf@zurich.ibm.com</text>
|
||||
<text><loc_208><loc_84><loc_292><loc_114>Christoph Auer IBM Research Rueschlikon, Switzerland cau@zurich.ibm.com</text>
|
||||
<text><loc_343><loc_84><loc_426><loc_114>Michele Dolfi IBM Research Rueschlikon, Switzerland dol@zurich.ibm.com</text>
|
||||
<text><loc_141><loc_121><loc_225><loc_151>Ahmed S. Nassar IBM Research Rueschlikon, Switzerland ahn@zurich.ibm.com</text>
|
||||
<text><loc_275><loc_121><loc_359><loc_151>Peter Staar IBM Research Rueschlikon, Switzerland taa@zurich.ibm.com</text>
|
||||
<section_header_level_1><loc_44><loc_156><loc_91><loc_163>ABSTRACT</section_header_level_1>
|
||||
<text><loc_44><loc_166><loc_241><loc_337>Accurate document layout analysis is a key requirement for highquality PDF document conversion. With the recent availability of public, large ground-truth datasets such as PubLayNet and DocBank, deep-learning models have proven to be very effective at layout detection and segmentation. While these datasets are of adequate size to train such models, they severely lack in layout variability since they are sourced from scientific article repositories such as PubMed and arXiv only. Consequently, the accuracy of the layout segmentation drops significantly when these models are applied on more challenging and diverse layouts. In this paper, we present DocLayNet , a new, publicly available, document-layout annotation dataset in COCO format. It contains 80863 manually annotated pages from diverse data sources to represent a wide variability in layouts. For each PDF page, the layout annotations provide labelled bounding-boxes with a choice of 11 distinct classes. DocLayNet also provides a subset of double- and triple-annotated pages to determine the inter-annotator agreement. In multiple experiments, we provide baseline accuracy scores (in mAP) for a set of popular object detection models. We also demonstrate that these models fall approximately 10% behind the inter-annotator agreement. Furthermore, we provide evidence that DocLayNet is of sufficient size. Lastly, we compare models trained on PubLayNet, DocBank and DocLayNet, showing that layout predictions of the DocLayNettrained models are more robust and thus the preferred choice for general-purpose document-layout analysis.</text>
|
||||
<section_header_level_1><loc_44><loc_348><loc_110><loc_354>CCS CONCEPTS</section_header_level_1>
|
||||
<text><loc_44><loc_357><loc_243><loc_377>· Information systems → Document structure ; · Applied computing → Document analysis ; · Computing methodologies → Machine learning ; Computer vision ; Object detection ;</text>
|
||||
<text><loc_44><loc_401><loc_241><loc_425>Permission to make digital or hard copies of part or all of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for third-party components of this work must be honored. For all other uses, contact the owner/author(s).</text>
|
||||
<text><loc_44><loc_426><loc_162><loc_430>KDD '22, August 14-18, 2022, Washington, DC, USA</text>
|
||||
<text><loc_44><loc_432><loc_153><loc_436>© 2022 Copyright held by the owner/author(s).</text>
|
||||
<text><loc_44><loc_437><loc_128><loc_441>ACM ISBN 978-1-4503-9385-0/22/08.</text>
|
||||
<text><loc_44><loc_442><loc_136><loc_446>https://doi.org/10.1145/3534678.3539043</text>
|
||||
<picture><loc_264><loc_158><loc_452><loc_332><caption><loc_260><loc_341><loc_457><loc_353>Figure 1: Four examples of complex page layouts across different document categories</caption></picture>
|
||||
<section_header_level_1><loc_260><loc_374><loc_310><loc_381>KEYWORDS</section_header_level_1>
|
||||
<text><loc_260><loc_384><loc_457><loc_396>PDF document conversion, layout segmentation, object-detection, data set, Machine Learning</text>
|
||||
<section_header_level_1><loc_260><loc_404><loc_331><loc_409>ACM Reference Format:</section_header_level_1>
|
||||
<text><loc_260><loc_410><loc_457><loc_447>Birgit Pfitzmann, Christoph Auer, Michele Dolfi, Ahmed S. Nassar, and Peter Staar. 2022. DocLayNet: A Large Human-Annotated Dataset for DocumentLayout Analysis. In Proceedings of the 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining (KDD '22), August 14-18, 2022, Washington, DC, USA. ACM, New York, NY, USA, 9 pages. https://doi.org/10.1145/ 3534678.3539043</text>
|
||||
<page_break>
|
||||
<page_header><loc_44><loc_38><loc_456><loc_43>KDD ’22, August 14-18, 2022, Washington, DC, USA Birgit Pfitzmann, Christoph Auer, Michele Dolfi, Ahmed S. Nassar, and Peter Staar</page_header>
|
||||
<section_header_level_1><loc_44><loc_54><loc_128><loc_61>1 INTRODUCTION</section_header_level_1>
|
||||
<text><loc_44><loc_70><loc_248><loc_145>Despite the substantial improvements achieved with machine-learning (ML) approaches and deep neural networks in recent years, document conversion remains a challenging problem, as demonstrated by the numerous public competitions held on this topic [1-4]. The challenge originates from the huge variability in PDF documents regarding layout, language and formats (scanned, programmatic or a combination of both). Engineering a single ML model that can be applied on all types of documents and provides high-quality layout segmentation remains to this day extremely challenging [5]. To highlight the variability in document layouts, we show a few example documents from the DocLayNet dataset in Figure 1.</text>
|
||||
<text><loc_44><loc_146><loc_241><loc_317>A key problem in the process of document conversion is to understand the structure of a single document page, i.e. which segments of text should be grouped together in a unit. To train models for this task, there are currently two large datasets available to the community, PubLayNet [6] and DocBank [7]. They were introduced in 2019 and 2020 respectively and significantly accelerated the implementation of layout detection and segmentation models due to their sizes of 300K and 500K ground-truth pages. These sizes were achieved by leveraging an automation approach. The benefit of automated ground-truth generation is obvious: one can generate large ground-truth datasets at virtually no cost. However, the automation introduces a constraint on the variability in the dataset, because corresponding structured source data must be available. PubLayNet and DocBank were both generated from scientific document repositories (PubMed and arXiv), which provide XML or L A T E X sources. Those scientific documents present a limited variability in their layouts, because they are typeset in uniform templates provided by the publishers. Obviously, documents such as technical manuals, annual company reports, legal text, government tenders, etc. have very different and partially unique layouts. As a consequence, the layout predictions obtained from models trained on PubLayNet or DocBank is very reasonable when applied on scientific documents. However, for more artistic or free-style layouts, we see sub-par prediction quality from these models, which we demonstrate in Section 5.</text>
|
||||
<text><loc_44><loc_319><loc_241><loc_366>In this paper, we present the DocLayNet dataset. It provides pageby-page layout annotation ground-truth using bounding-boxes for 11 distinct class labels on 80863 unique document pages, of which a fraction carry double- or triple-annotations. DocLayNet is similar in spirit to PubLayNet and DocBank and will likewise be made available to the public 1 in order to stimulate the document-layout analysis community. It distinguishes itself in the following aspects:</text>
|
||||
<unordered_list><list_item><loc_53><loc_369><loc_241><loc_388>(1) Human Annotation : In contrast to PubLayNet and DocBank, we relied on human annotation instead of automation approaches to generate the data set.</list_item>
|
||||
<list_item><loc_53><loc_390><loc_240><loc_402>(2) Large Layout Variability : We include diverse and complex layouts from a large variety of public sources.</list_item>
|
||||
<list_item><loc_53><loc_404><loc_241><loc_423>(3) Detailed Label Set : We define 11 class labels to distinguish layout features in high detail. PubLayNet provides 5 labels; DocBank provides 13, although not a superset of ours.</list_item>
|
||||
<list_item><loc_53><loc_424><loc_241><loc_437>(4) Redundant Annotations : A fraction of the pages in the DocLayNet data set carry more than one human annotation.</list_item>
|
||||
</unordered_list>
|
||||
<text><location><page_2><loc_56><loc_87><loc_91><loc_89></location>This enables experimentation with annotation uncertainty and quality control analysis.</text>
|
||||
<unordered_list>
|
||||
<list_item><location><page_2><loc_54><loc_80><loc_91><loc_86></location>(5) Pre-defined Train-, Test- & Validation-set : Like DocBank, we provide fixed train-, test- & validation-sets to ensure proportional representation of the class-labels. Further, we prevent leakage of unique layouts across sets, which has a large effect on model accuracy scores.</list_item>
|
||||
<footnote><loc_44><loc_443><loc_176><loc_447>$^{1}$https://developer.ibm.com/exchanges/data/all/doclaynet</footnote>
|
||||
<text><loc_279><loc_55><loc_456><loc_67>This enables experimentation with annotation uncertainty and quality control analysis.</text>
|
||||
<unordered_list><list_item><loc_269><loc_69><loc_457><loc_102>(5) Pre-defined Train-, Test- & Validation-set : Like DocBank, we provide fixed train-, test- & validation-sets to ensure proportional representation of the class-labels. Further, we prevent leakage of unique layouts across sets, which has a large effect on model accuracy scores.</list_item>
|
||||
</unordered_list>
|
||||
<text><location><page_2><loc_52><loc_72><loc_91><loc_79></location>All aspects outlined above are detailed in Section 3. In Section 4, we will elaborate on how we designed and executed this large-scale human annotation campaign. We will also share key insights and lessons learned that might prove helpful for other parties planning to set up annotation campaigns.</text>
|
||||
<text><location><page_2><loc_52><loc_61><loc_91><loc_72></location>In Section 5, we will present baseline accuracy numbers for a variety of object detection methods (Faster R-CNN, Mask R-CNN and YOLOv5) trained on DocLayNet. We further show how the model performance is impacted by varying the DocLayNet dataset size, reducing the label set and modifying the train/test-split. Last but not least, we compare the performance of models trained on PubLayNet, DocBank and DocLayNet and demonstrate that a model trained on DocLayNet provides overall more robust layout recovery.</text>
|
||||
<section_header_level_1><location><page_2><loc_52><loc_58><loc_69><loc_59></location>2 RELATED WORK</section_header_level_1>
|
||||
<text><location><page_2><loc_52><loc_41><loc_91><loc_56></location>While early approaches in document-layout analysis used rulebased algorithms and heuristics [8], the problem is lately addressed with deep learning methods. The most common approach is to leverage object detection models [9-15]. In the last decade, the accuracy and speed of these models has increased dramatically. Furthermore, most state-of-the-art object detection methods can be trained and applied with very little work, thanks to a standardisation effort of the ground-truth data format [16] and common deep-learning frameworks [17]. Reference data sets such as PubLayNet [6] and DocBank provide their data in the commonly accepted COCO format [16].</text>
|
||||
<text><location><page_2><loc_52><loc_30><loc_91><loc_41></location>Lately, new types of ML models for document-layout analysis have emerged in the community [18-21]. These models do not approach the problem of layout analysis purely based on an image representation of the page, as computer vision methods do. Instead, they combine the text tokens and image representation of a page in order to obtain a segmentation. While the reported accuracies appear to be promising, a broadly accepted data format which links geometric and textual features has yet to establish.</text>
|
||||
<section_header_level_1><location><page_2><loc_52><loc_27><loc_78><loc_29></location>3 THE DOCLAYNET DATASET</section_header_level_1>
|
||||
<text><location><page_2><loc_52><loc_15><loc_91><loc_25></location>DocLayNet contains 80863 PDF pages. Among these, 7059 carry two instances of human annotations, and 1591 carry three. This amounts to 91104 total annotation instances. The annotations provide layout information in the shape of labeled, rectangular boundingboxes. We define 11 distinct labels for layout features, namely Caption , Footnote , Formula , List-item , Page-footer , Page-header , Picture , Section-header , Table , Text , and Title . Our reasoning for picking this particular label set is detailed in Section 4.</text>
|
||||
<text><location><page_2><loc_52><loc_11><loc_91><loc_14></location>In addition to open intellectual property constraints for the source documents, we required that the documents in DocLayNet adhere to a few conditions. Firstly, we kept scanned documents</text>
|
||||
<figure>
|
||||
<location><page_3><loc_14><loc_72><loc_43><loc_88></location>
|
||||
<caption>Figure 2: Distribution of DocLayNet pages across document categories.</caption>
|
||||
</figure>
|
||||
<text><location><page_3><loc_9><loc_54><loc_48><loc_64></location>to a minimum, since they introduce difficulties in annotation (see Section 4). As a second condition, we focussed on medium to large documents ( > 10 pages) with technical content, dense in complex tables, figures, plots and captions. Such documents carry a lot of information value, but are often hard to analyse with high accuracy due to their challenging layouts. Counterexamples of documents not included in the dataset are receipts, invoices, hand-written documents or photographs showing "text in the wild".</text>
|
||||
<text><location><page_3><loc_9><loc_36><loc_48><loc_53></location>The pages in DocLayNet can be grouped into six distinct categories, namely Financial Reports , Manuals , Scientific Articles , Laws & Regulations , Patents and Government Tenders . Each document category was sourced from various repositories. For example, Financial Reports contain both free-style format annual reports 2 which expose company-specific, artistic layouts as well as the more formal SEC filings. The two largest categories ( Financial Reports and Manuals ) contain a large amount of free-style layouts in order to obtain maximum variability. In the other four categories, we boosted the variability by mixing documents from independent providers, such as different government websites or publishers. In Figure 2, we show the document categories contained in DocLayNet with their respective sizes.</text>
|
||||
<text><location><page_3><loc_9><loc_23><loc_48><loc_35></location>We did not control the document selection with regard to language. The vast majority of documents contained in DocLayNet (close to 95%) are published in English language. However, DocLayNet also contains a number of documents in other languages such as German (2.5%), French (1.0%) and Japanese (1.0%). While the document language has negligible impact on the performance of computer vision methods such as object detection and segmentation models, it might prove challenging for layout analysis methods which exploit textual features.</text>
|
||||
<text><location><page_3><loc_9><loc_14><loc_48><loc_23></location>To ensure that future benchmarks in the document-layout analysis community can be easily compared, we have split up DocLayNet into pre-defined train-, test- and validation-sets. In this way, we can avoid spurious variations in the evaluation scores due to random splitting in train-, test- and validation-sets. We also ensured that less frequent labels are represented in train and test sets in equal proportions.</text>
|
||||
<text><location><page_3><loc_52><loc_80><loc_91><loc_89></location>Table 1 shows the overall frequency and distribution of the labels among the different sets. Importantly, we ensure that subsets are only split on full-document boundaries. This avoids that pages of the same document are spread over train, test and validation set, which can give an undesired evaluation advantage to models and lead to overestimation of their prediction accuracy. We will show the impact of this decision in Section 5.</text>
|
||||
<text><location><page_3><loc_52><loc_66><loc_91><loc_79></location>In order to accommodate the different types of models currently in use by the community, we provide DocLayNet in an augmented COCO format [16]. This entails the standard COCO ground-truth file (in JSON format) with the associated page images (in PNG format, 1025 × 1025 pixels). Furthermore, custom fields have been added to each COCO record to specify document category, original document filename and page number. In addition, we also provide the original PDF pages, as well as sidecar files containing parsed PDF text and text-cell coordinates (in JSON). All additional files are linked to the primary page images by their matching filenames.</text>
|
||||
<text><location><page_3><loc_52><loc_26><loc_91><loc_65></location>Despite being cost-intense and far less scalable than automation, human annotation has several benefits over automated groundtruth generation. The first and most obvious reason to leverage human annotations is the freedom to annotate any type of document without requiring a programmatic source. For most PDF documents, the original source document is not available. The latter is not a hard constraint with human annotation, but it is for automated methods. A second reason to use human annotations is that the latter usually provide a more natural interpretation of the page layout. The human-interpreted layout can significantly deviate from the programmatic layout used in typesetting. For example, "invisible" tables might be used solely for aligning text paragraphs on columns. Such typesetting tricks might be interpreted by automated methods incorrectly as an actual table, while the human annotation will interpret it correctly as Text or other styles. The same applies to multi-line text elements, when authors decided to space them as "invisible" list elements without bullet symbols. A third reason to gather ground-truth through human annotation is to estimate a "natural" upper bound on the segmentation accuracy. As we will show in Section 4, certain documents featuring complex layouts can have different but equally acceptable layout interpretations. This natural upper bound for segmentation accuracy can be found by annotating the same pages multiple times by different people and evaluating the inter-annotator agreement. Such a baseline consistency evaluation is very useful to define expectations for a good target accuracy in trained deep neural network models and avoid overfitting (see Table 1). On the flip side, achieving high annotation consistency proved to be a key challenge in human annotation, as we outline in Section 4.</text>
|
||||
<section_header_level_1><location><page_3><loc_52><loc_22><loc_77><loc_23></location>4 ANNOTATION CAMPAIGN</section_header_level_1>
|
||||
<text><location><page_3><loc_52><loc_11><loc_91><loc_20></location>The annotation campaign was carried out in four phases. In phase one, we identified and prepared the data sources for annotation. In phase two, we determined the class labels and how annotations should be done on the documents in order to obtain maximum consistency. The latter was guided by a detailed requirement analysis and exhaustive experiments. In phase three, we trained the annotation staff and performed exams for quality assurance. In phase four,</text>
|
||||
<table>
|
||||
<location><page_4><loc_16><loc_63><loc_84><loc_83></location>
|
||||
<caption>Table 1: DocLayNet dataset overview. Along with the frequency of each class label, we present the relative occurrence (as % of row "Total") in the train, test and validation sets. The inter-annotator agreement is computed as the mAP@0.5-0.95 metric between pairwise annotations from the triple-annotated pages, from which we obtain accuracy ranges.</caption>
|
||||
<row_0><col_0><body></col_0><col_1><body></col_1><col_2><col_header>% of Total</col_2><col_3><col_header>% of Total</col_3><col_4><col_header>% of Total</col_4><col_5><col_header>% of Total</col_5><col_6><col_header>triple inter-annotator mAP @ 0.5-0.95 (%)</col_6><col_7><col_header>triple inter-annotator mAP @ 0.5-0.95 (%)</col_7><col_8><col_header>triple inter-annotator mAP @ 0.5-0.95 (%)</col_8><col_9><col_header>triple inter-annotator mAP @ 0.5-0.95 (%)</col_9><col_10><col_header>triple inter-annotator mAP @ 0.5-0.95 (%)</col_10><col_11><col_header>triple inter-annotator mAP @ 0.5-0.95 (%)</col_11></row_0>
|
||||
<row_1><col_0><col_header>class label</col_0><col_1><col_header>Count</col_1><col_2><col_header>Train</col_2><col_3><col_header>Test</col_3><col_4><col_header>Val</col_4><col_5><col_header>All</col_5><col_6><col_header>Fin</col_6><col_7><col_header>Man</col_7><col_8><col_header>Sci</col_8><col_9><col_header>Law</col_9><col_10><col_header>Pat</col_10><col_11><col_header>Ten</col_11></row_1>
|
||||
<row_2><col_0><row_header>Caption</col_0><col_1><body>22524</col_1><col_2><body>2.04</col_2><col_3><body>1.77</col_3><col_4><body>2.32</col_4><col_5><body>84-89</col_5><col_6><body>40-61</col_6><col_7><body>86-92</col_7><col_8><body>94-99</col_8><col_9><body>95-99</col_9><col_10><body>69-78</col_10><col_11><body>n/a</col_11></row_2>
|
||||
<row_3><col_0><row_header>Footnote</col_0><col_1><body>6318</col_1><col_2><body>0.60</col_2><col_3><body>0.31</col_3><col_4><body>0.58</col_4><col_5><body>83-91</col_5><col_6><body>n/a</col_6><col_7><body>100</col_7><col_8><body>62-88</col_8><col_9><body>85-94</col_9><col_10><body>n/a</col_10><col_11><body>82-97</col_11></row_3>
|
||||
<row_4><col_0><row_header>Formula</col_0><col_1><body>25027</col_1><col_2><body>2.25</col_2><col_3><body>1.90</col_3><col_4><body>2.96</col_4><col_5><body>83-85</col_5><col_6><body>n/a</col_6><col_7><body>n/a</col_7><col_8><body>84-87</col_8><col_9><body>86-96</col_9><col_10><body>n/a</col_10><col_11><body>n/a</col_11></row_4>
|
||||
<row_5><col_0><row_header>List-item</col_0><col_1><body>185660</col_1><col_2><body>17.19</col_2><col_3><body>13.34</col_3><col_4><body>15.82</col_4><col_5><body>87-88</col_5><col_6><body>74-83</col_6><col_7><body>90-92</col_7><col_8><body>97-97</col_8><col_9><body>81-85</col_9><col_10><body>75-88</col_10><col_11><body>93-95</col_11></row_5>
|
||||
<row_6><col_0><row_header>Page-footer</col_0><col_1><body>70878</col_1><col_2><body>6.51</col_2><col_3><body>5.58</col_3><col_4><body>6.00</col_4><col_5><body>93-94</col_5><col_6><body>88-90</col_6><col_7><body>95-96</col_7><col_8><body>100</col_8><col_9><body>92-97</col_9><col_10><body>100</col_10><col_11><body>96-98</col_11></row_6>
|
||||
<row_7><col_0><row_header>Page-header</col_0><col_1><body>58022</col_1><col_2><body>5.10</col_2><col_3><body>6.70</col_3><col_4><body>5.06</col_4><col_5><body>85-89</col_5><col_6><body>66-76</col_6><col_7><body>90-94</col_7><col_8><body>98-100</col_8><col_9><body>91-92</col_9><col_10><body>97-99</col_10><col_11><body>81-86</col_11></row_7>
|
||||
<row_8><col_0><row_header>Picture</col_0><col_1><body>45976</col_1><col_2><body>4.21</col_2><col_3><body>2.78</col_3><col_4><body>5.31</col_4><col_5><body>69-71</col_5><col_6><body>56-59</col_6><col_7><body>82-86</col_7><col_8><body>69-82</col_8><col_9><body>80-95</col_9><col_10><body>66-71</col_10><col_11><body>59-76</col_11></row_8>
|
||||
<row_9><col_0><row_header>Section-header</col_0><col_1><body>142884</col_1><col_2><body>12.60</col_2><col_3><body>15.77</col_3><col_4><body>12.85</col_4><col_5><body>83-84</col_5><col_6><body>76-81</col_6><col_7><body>90-92</col_7><col_8><body>94-95</col_8><col_9><body>87-94</col_9><col_10><body>69-73</col_10><col_11><body>78-86</col_11></row_9>
|
||||
<row_10><col_0><row_header>Table</col_0><col_1><body>34733</col_1><col_2><body>3.20</col_2><col_3><body>2.27</col_3><col_4><body>3.60</col_4><col_5><body>77-81</col_5><col_6><body>75-80</col_6><col_7><body>83-86</col_7><col_8><body>98-99</col_8><col_9><body>58-80</col_9><col_10><body>79-84</col_10><col_11><body>70-85</col_11></row_10>
|
||||
<row_11><col_0><row_header>Text</col_0><col_1><body>510377</col_1><col_2><body>45.82</col_2><col_3><body>49.28</col_3><col_4><body>45.00</col_4><col_5><body>84-86</col_5><col_6><body>81-86</col_6><col_7><body>88-93</col_7><col_8><body>89-93</col_8><col_9><body>87-92</col_9><col_10><body>71-79</col_10><col_11><body>87-95</col_11></row_11>
|
||||
<row_12><col_0><row_header>Title</col_0><col_1><body>5071</col_1><col_2><body>0.47</col_2><col_3><body>0.30</col_3><col_4><body>0.50</col_4><col_5><body>60-72</col_5><col_6><body>24-63</col_6><col_7><body>50-63</col_7><col_8><body>94-100</col_8><col_9><body>82-96</col_9><col_10><body>68-79</col_10><col_11><body>24-56</col_11></row_12>
|
||||
<row_13><col_0><row_header>Total</col_0><col_1><body>1107470</col_1><col_2><body>941123</col_2><col_3><body>99816</col_3><col_4><body>66531</col_4><col_5><body>82-83</col_5><col_6><body>71-74</col_6><col_7><body>79-81</col_7><col_8><body>89-94</col_8><col_9><body>86-91</col_9><col_10><body>71-76</col_10><col_11><body>68-85</col_11></row_13>
|
||||
</table>
|
||||
<figure>
|
||||
<location><page_4><loc_9><loc_32><loc_48><loc_61></location>
|
||||
<caption>Figure 3: Corpus Conversion Service annotation user interface. The PDF page is shown in the background, with overlaid text-cells (in darker shades). The annotation boxes can be drawn by dragging a rectangle over each segment with the respective label from the palette on the right.</caption>
|
||||
</figure>
|
||||
<text><location><page_4><loc_9><loc_15><loc_48><loc_20></location>we distributed the annotation workload and performed continuous quality controls. Phase one and two required a small team of experts only. For phases three and four, a group of 40 dedicated annotators were assembled and supervised.</text>
|
||||
<text><location><page_4><loc_9><loc_11><loc_48><loc_14></location>Phase 1: Data selection and preparation. Our inclusion criteria for documents were described in Section 3. A large effort went into ensuring that all documents are free to use. The data sources</text>
|
||||
<text><location><page_4><loc_52><loc_53><loc_91><loc_61></location>include publication repositories such as arXiv$^{3}$, government offices, company websites as well as data directory services for financial reports and patents. Scanned documents were excluded wherever possible because they can be rotated or skewed. This would not allow us to perform annotation with rectangular bounding-boxes and therefore complicate the annotation process.</text>
|
||||
<text><location><page_4><loc_52><loc_36><loc_91><loc_52></location>Preparation work included uploading and parsing the sourced PDF documents in the Corpus Conversion Service (CCS) [22], a cloud-native platform which provides a visual annotation interface and allows for dataset inspection and analysis. The annotation interface of CCS is shown in Figure 3. The desired balance of pages between the different document categories was achieved by selective subsampling of pages with certain desired properties. For example, we made sure to include the title page of each document and bias the remaining page selection to those with figures or tables. The latter was achieved by leveraging pre-trained object detection models from PubLayNet, which helped us estimate how many figures and tables a given page contains.</text>
|
||||
<text><location><page_4><loc_52><loc_12><loc_91><loc_36></location>Phase 2: Label selection and guideline. We reviewed the collected documents and identified the most common structural features they exhibit. This was achieved by identifying recurrent layout elements and lead us to the definition of 11 distinct class labels. These 11 class labels are Caption , Footnote , Formula , List-item , Pagefooter , Page-header , Picture , Section-header , Table , Text , and Title . Critical factors that were considered for the choice of these class labels were (1) the overall occurrence of the label, (2) the specificity of the label, (3) recognisability on a single page (i.e. no need for context from previous or next page) and (4) overall coverage of the page. Specificity ensures that the choice of label is not ambiguous, while coverage ensures that all meaningful items on a page can be annotated. We refrained from class labels that are very specific to a document category, such as Abstract in the Scientific Articles category. We also avoided class labels that are tightly linked to the semantics of the text. Labels such as Author and Affiliation , as seen in DocBank, are often only distinguishable by discriminating on</text>
|
||||
<text><location><page_5><loc_9><loc_87><loc_48><loc_89></location>the textual content of an element, which goes beyond visual layout recognition, in particular outside the Scientific Articles category.</text>
|
||||
<text><location><page_5><loc_9><loc_69><loc_48><loc_86></location>At first sight, the task of visual document-layout interpretation appears intuitive enough to obtain plausible annotations in most cases. However, during early trial-runs in the core team, we observed many cases in which annotators use different annotation styles, especially for documents with challenging layouts. For example, if a figure is presented with subfigures, one annotator might draw a single figure bounding-box, while another might annotate each subfigure separately. The same applies for lists, where one might annotate all list items in one block or each list item separately. In essence, we observed that challenging layouts would be annotated in different but plausible ways. To illustrate this, we show in Figure 4 multiple examples of plausible but inconsistent annotations on the same pages.</text>
|
||||
<text><location><page_5><loc_9><loc_57><loc_48><loc_68></location>Obviously, this inconsistency in annotations is not desirable for datasets which are intended to be used for model training. To minimise these inconsistencies, we created a detailed annotation guideline. While perfect consistency across 40 annotation staff members is clearly not possible to achieve, we saw a huge improvement in annotation consistency after the introduction of our annotation guideline. A few selected, non-trivial highlights of the guideline are:</text>
|
||||
<unordered_list>
|
||||
<list_item><location><page_5><loc_11><loc_51><loc_48><loc_56></location>(1) Every list-item is an individual object instance with class label List-item . This definition is different from PubLayNet and DocBank, where all list-items are grouped together into one List object.</list_item>
|
||||
<list_item><location><page_5><loc_11><loc_45><loc_48><loc_50></location>(2) A List-item is a paragraph with hanging indentation. Singleline elements can qualify as List-item if the neighbour elements expose hanging indentation. Bullet or enumeration symbols are not a requirement.</list_item>
|
||||
<list_item><location><page_5><loc_11><loc_42><loc_48><loc_45></location>(3) For every Caption , there must be exactly one corresponding Picture or Table .</list_item>
|
||||
<list_item><location><page_5><loc_11><loc_40><loc_48><loc_42></location>(4) Connected sub-pictures are grouped together in one Picture object.</list_item>
|
||||
<list_item><location><page_5><loc_11><loc_38><loc_43><loc_39></location>(5) Formula numbers are included in a Formula object.</list_item>
|
||||
<list_item><location><page_5><loc_11><loc_34><loc_48><loc_38></location>(6) Emphasised text (e.g. in italic or bold) at the beginning of a paragraph is not considered a Section-header , unless it appears exclusively on its own line.</list_item>
|
||||
<text><loc_259><loc_106><loc_457><loc_139>All aspects outlined above are detailed in Section 3. In Section 4, we will elaborate on how we designed and executed this large-scale human annotation campaign. We will also share key insights and lessons learned that might prove helpful for other parties planning to set up annotation campaigns.</text>
|
||||
<text><loc_260><loc_141><loc_457><loc_194>In Section 5, we will present baseline accuracy numbers for a variety of object detection methods (Faster R-CNN, Mask R-CNN and YOLOv5) trained on DocLayNet. We further show how the model performance is impacted by varying the DocLayNet dataset size, reducing the label set and modifying the train/test-split. Last but not least, we compare the performance of models trained on PubLayNet, DocBank and DocLayNet and demonstrate that a model trained on DocLayNet provides overall more robust layout recovery.</text>
|
||||
<section_header_level_1><loc_260><loc_203><loc_345><loc_209>2 RELATED WORK</section_header_level_1>
|
||||
<text><loc_259><loc_219><loc_457><loc_293>While early approaches in document-layout analysis used rulebased algorithms and heuristics [8], the problem is lately addressed with deep learning methods. The most common approach is to leverage object detection models [9-15]. In the last decade, the accuracy and speed of these models has increased dramatically. Furthermore, most state-of-the-art object detection methods can be trained and applied with very little work, thanks to a standardisation effort of the ground-truth data format [16] and common deep-learning frameworks [17]. Reference data sets such as PubLayNet [6] and DocBank provide their data in the commonly accepted COCO format [16].</text>
|
||||
<text><loc_260><loc_295><loc_457><loc_348>Lately, new types of ML models for document-layout analysis have emerged in the community [18-21]. These models do not approach the problem of layout analysis purely based on an image representation of the page, as computer vision methods do. Instead, they combine the text tokens and image representation of a page in order to obtain a segmentation. While the reported accuracies appear to be promising, a broadly accepted data format which links geometric and textual features has yet to establish.</text>
|
||||
<section_header_level_1><loc_260><loc_357><loc_390><loc_363>3 THE DOCLAYNET DATASET</section_header_level_1>
|
||||
<text><loc_260><loc_373><loc_457><loc_426>DocLayNet contains 80863 PDF pages. Among these, 7059 carry two instances of human annotations, and 1591 carry three. This amounts to 91104 total annotation instances. The annotations provide layout information in the shape of labeled, rectangular boundingboxes. We define 11 distinct labels for layout features, namely Caption , Footnote , Formula , List-item , Page-footer , Page-header , Picture , Section-header , Table , Text , and Title . Our reasoning for picking this particular label set is detailed in Section 4.</text>
|
||||
<text><loc_260><loc_428><loc_456><loc_447>In addition to open intellectual property constraints for the source documents, we required that the documents in DocLayNet adhere to a few conditions. Firstly, we kept scanned documents</text>
|
||||
<page_break>
|
||||
<page_header><loc_44><loc_38><loc_284><loc_43>DocLayNet: A Large Human-Annotated Dataset for Document-Layout Analysis</page_header>
|
||||
<page_header><loc_299><loc_38><loc_456><loc_43>KDD ’22, August 14-18, 2022, Washington, DC, USA</page_header>
|
||||
<picture><loc_72><loc_59><loc_215><loc_139><caption><loc_44><loc_149><loc_240><loc_161>Figure 2: Distribution of DocLayNet pages across document categories.</caption></picture>
|
||||
<text><loc_44><loc_178><loc_240><loc_232>to a minimum, since they introduce difficulties in annotation (see Section 4). As a second condition, we focussed on medium to large documents ( > 10 pages) with technical content, dense in complex tables, figures, plots and captions. Such documents carry a lot of information value, but are often hard to analyse with high accuracy due to their challenging layouts. Counterexamples of documents not included in the dataset are receipts, invoices, hand-written documents or photographs showing "text in the wild".</text>
|
||||
<text><loc_44><loc_233><loc_241><loc_322>The pages in DocLayNet can be grouped into six distinct categories, namely Financial Reports , Manuals , Scientific Articles , Laws & Regulations , Patents and Government Tenders . Each document category was sourced from various repositories. For example, Financial Reports contain both free-style format annual reports 2 which expose company-specific, artistic layouts as well as the more formal SEC filings. The two largest categories ( Financial Reports and Manuals ) contain a large amount of free-style layouts in order to obtain maximum variability. In the other four categories, we boosted the variability by mixing documents from independent providers, such as different government websites or publishers. In Figure 2, we show the document categories contained in DocLayNet with their respective sizes.</text>
|
||||
<text><loc_44><loc_323><loc_241><loc_384>We did not control the document selection with regard to language. The vast majority of documents contained in DocLayNet (close to 95%) are published in English language. However, DocLayNet also contains a number of documents in other languages such as German (2.5%), French (1.0%) and Japanese (1.0%). While the document language has negligible impact on the performance of computer vision methods such as object detection and segmentation models, it might prove challenging for layout analysis methods which exploit textual features.</text>
|
||||
<text><loc_44><loc_385><loc_241><loc_432>To ensure that future benchmarks in the document-layout analysis community can be easily compared, we have split up DocLayNet into pre-defined train-, test- and validation-sets. In this way, we can avoid spurious variations in the evaluation scores due to random splitting in train-, test- and validation-sets. We also ensured that less frequent labels are represented in train and test sets in equal proportions.</text>
|
||||
<footnote><loc_44><loc_443><loc_160><loc_447>$^{2}$e.g. AAPL from https://www.annualreports.com/</footnote>
|
||||
<text><loc_259><loc_55><loc_457><loc_102>Table 1 shows the overall frequency and distribution of the labels among the different sets. Importantly, we ensure that subsets are only split on full-document boundaries. This avoids that pages of the same document are spread over train, test and validation set, which can give an undesired evaluation advantage to models and lead to overestimation of their prediction accuracy. We will show the impact of this decision in Section 5.</text>
|
||||
<text><loc_260><loc_104><loc_456><loc_171>In order to accommodate the different types of models currently in use by the community, we provide DocLayNet in an augmented COCO format [16]. This entails the standard COCO ground-truth file (in JSON format) with the associated page images (in PNG format, 1025 × 1025 pixels). Furthermore, custom fields have been added to each COCO record to specify document category, original document filename and page number. In addition, we also provide the original PDF pages, as well as sidecar files containing parsed PDF text and text-cell coordinates (in JSON). All additional files are linked to the primary page images by their matching filenames.</text>
|
||||
<text><loc_259><loc_173><loc_457><loc_372>Despite being cost-intense and far less scalable than automation, human annotation has several benefits over automated groundtruth generation. The first and most obvious reason to leverage human annotations is the freedom to annotate any type of document without requiring a programmatic source. For most PDF documents, the original source document is not available. The latter is not a hard constraint with human annotation, but it is for automated methods. A second reason to use human annotations is that the latter usually provide a more natural interpretation of the page layout. The human-interpreted layout can significantly deviate from the programmatic layout used in typesetting. For example, "invisible" tables might be used solely for aligning text paragraphs on columns. Such typesetting tricks might be interpreted by automated methods incorrectly as an actual table, while the human annotation will interpret it correctly as Text or other styles. The same applies to multi-line text elements, when authors decided to space them as "invisible" list elements without bullet symbols. A third reason to gather ground-truth through human annotation is to estimate a "natural" upper bound on the segmentation accuracy. As we will show in Section 4, certain documents featuring complex layouts can have different but equally acceptable layout interpretations. This natural upper bound for segmentation accuracy can be found by annotating the same pages multiple times by different people and evaluating the inter-annotator agreement. Such a baseline consistency evaluation is very useful to define expectations for a good target accuracy in trained deep neural network models and avoid overfitting (see Table 1). On the flip side, achieving high annotation consistency proved to be a key challenge in human annotation, as we outline in Section 4.</text>
|
||||
<section_header_level_1><loc_260><loc_383><loc_384><loc_390>4 ANNOTATION CAMPAIGN</section_header_level_1>
|
||||
<text><loc_260><loc_399><loc_457><loc_446>The annotation campaign was carried out in four phases. In phase one, we identified and prepared the data sources for annotation. In phase two, we determined the class labels and how annotations should be done on the documents in order to obtain maximum consistency. The latter was guided by a detailed requirement analysis and exhaustive experiments. In phase three, we trained the annotation staff and performed exams for quality assurance. In phase four,</text>
|
||||
<page_break>
|
||||
<page_header><loc_44><loc_38><loc_456><loc_43>KDD ’22, August 14-18, 2022, Washington, DC, USA Birgit Pfitzmann, Christoph Auer, Michele Dolfi, Ahmed S. Nassar, and Peter Staar</page_header>
|
||||
<otsl><loc_81><loc_87><loc_419><loc_186><ecel><ecel><ched>% of Total<lcel><lcel><lcel><ched>triple inter-annotator mAP @ 0.5-0.95 (%)<lcel><lcel><lcel><lcel><lcel><nl><ched>class label<ched>Count<ched>Train<ched>Test<ched>Val<ched>All<ched>Fin<ched>Man<ched>Sci<ched>Law<ched>Pat<ched>Ten<nl><rhed>Caption<fcel>22524<fcel>2.04<fcel>1.77<fcel>2.32<fcel>84-89<fcel>40-61<fcel>86-92<fcel>94-99<fcel>95-99<fcel>69-78<fcel>n/a<nl><rhed>Footnote<fcel>6318<fcel>0.60<fcel>0.31<fcel>0.58<fcel>83-91<fcel>n/a<fcel>100<fcel>62-88<fcel>85-94<fcel>n/a<fcel>82-97<nl><rhed>Formula<fcel>25027<fcel>2.25<fcel>1.90<fcel>2.96<fcel>83-85<fcel>n/a<fcel>n/a<fcel>84-87<fcel>86-96<fcel>n/a<fcel>n/a<nl><rhed>List-item<fcel>185660<fcel>17.19<fcel>13.34<fcel>15.82<fcel>87-88<fcel>74-83<fcel>90-92<fcel>97-97<fcel>81-85<fcel>75-88<fcel>93-95<nl><rhed>Page-footer<fcel>70878<fcel>6.51<fcel>5.58<fcel>6.00<fcel>93-94<fcel>88-90<fcel>95-96<fcel>100<fcel>92-97<fcel>100<fcel>96-98<nl><rhed>Page-header<fcel>58022<fcel>5.10<fcel>6.70<fcel>5.06<fcel>85-89<fcel>66-76<fcel>90-94<fcel>98-100<fcel>91-92<fcel>97-99<fcel>81-86<nl><rhed>Picture<fcel>45976<fcel>4.21<fcel>2.78<fcel>5.31<fcel>69-71<fcel>56-59<fcel>82-86<fcel>69-82<fcel>80-95<fcel>66-71<fcel>59-76<nl><rhed>Section-header<fcel>142884<fcel>12.60<fcel>15.77<fcel>12.85<fcel>83-84<fcel>76-81<fcel>90-92<fcel>94-95<fcel>87-94<fcel>69-73<fcel>78-86<nl><rhed>Table<fcel>34733<fcel>3.20<fcel>2.27<fcel>3.60<fcel>77-81<fcel>75-80<fcel>83-86<fcel>98-99<fcel>58-80<fcel>79-84<fcel>70-85<nl><rhed>Text<fcel>510377<fcel>45.82<fcel>49.28<fcel>45.00<fcel>84-86<fcel>81-86<fcel>88-93<fcel>89-93<fcel>87-92<fcel>71-79<fcel>87-95<nl><rhed>Title<fcel>5071<fcel>0.47<fcel>0.30<fcel>0.50<fcel>60-72<fcel>24-63<fcel>50-63<fcel>94-100<fcel>82-96<fcel>68-79<fcel>24-56<nl><rhed>Total<fcel>1107470<fcel>941123<fcel>99816<fcel>66531<fcel>82-83<fcel>71-74<fcel>79-81<fcel>89-94<fcel>86-91<fcel>71-76<fcel>68-85<nl><caption><loc_44><loc_54><loc_456><loc_73>Table 1: DocLayNet dataset overview. Along with the frequency of each class label, we present the relative occurrence (as % of row "Total") in the train, test and validation sets. The inter-annotator agreement is computed as the mAP@0.5-0.95 metric between pairwise annotations from the triple-annotated pages, from which we obtain accuracy ranges.</caption></otsl>
|
||||
<picture><loc_43><loc_196><loc_242><loc_341><caption><loc_44><loc_350><loc_242><loc_383>Figure 3: Corpus Conversion Service annotation user interface. The PDF page is shown in the background, with overlaid text-cells (in darker shades). The annotation boxes can be drawn by dragging a rectangle over each segment with the respective label from the palette on the right.</caption></picture>
|
||||
<text><loc_44><loc_400><loc_240><loc_426>we distributed the annotation workload and performed continuous quality controls. Phase one and two required a small team of experts only. For phases three and four, a group of 40 dedicated annotators were assembled and supervised.</text>
|
||||
<text><loc_44><loc_428><loc_241><loc_447>Phase 1: Data selection and preparation. Our inclusion criteria for documents were described in Section 3. A large effort went into ensuring that all documents are free to use. The data sources</text>
|
||||
<text><loc_260><loc_197><loc_457><loc_237>include publication repositories such as arXiv$^{3}$, government offices, company websites as well as data directory services for financial reports and patents. Scanned documents were excluded wherever possible because they can be rotated or skewed. This would not allow us to perform annotation with rectangular bounding-boxes and therefore complicate the annotation process.</text>
|
||||
<text><loc_260><loc_239><loc_457><loc_320>Preparation work included uploading and parsing the sourced PDF documents in the Corpus Conversion Service (CCS) [22], a cloud-native platform which provides a visual annotation interface and allows for dataset inspection and analysis. The annotation interface of CCS is shown in Figure 3. The desired balance of pages between the different document categories was achieved by selective subsampling of pages with certain desired properties. For example, we made sure to include the title page of each document and bias the remaining page selection to those with figures or tables. The latter was achieved by leveraging pre-trained object detection models from PubLayNet, which helped us estimate how many figures and tables a given page contains.</text>
|
||||
<text><loc_259><loc_321><loc_457><loc_438>Phase 2: Label selection and guideline. We reviewed the collected documents and identified the most common structural features they exhibit. This was achieved by identifying recurrent layout elements and lead us to the definition of 11 distinct class labels. These 11 class labels are Caption , Footnote , Formula , List-item , Pagefooter , Page-header , Picture , Section-header , Table , Text , and Title . Critical factors that were considered for the choice of these class labels were (1) the overall occurrence of the label, (2) the specificity of the label, (3) recognisability on a single page (i.e. no need for context from previous or next page) and (4) overall coverage of the page. Specificity ensures that the choice of label is not ambiguous, while coverage ensures that all meaningful items on a page can be annotated. We refrained from class labels that are very specific to a document category, such as Abstract in the Scientific Articles category. We also avoided class labels that are tightly linked to the semantics of the text. Labels such as Author and Affiliation , as seen in DocBank, are often only distinguishable by discriminating on</text>
|
||||
<footnote><loc_260><loc_443><loc_302><loc_448>$^{3}$https://arxiv.org/</footnote>
|
||||
<page_break>
|
||||
<page_header><loc_44><loc_38><loc_284><loc_43>DocLayNet: A Large Human-Annotated Dataset for Document-Layout Analysis</page_header>
|
||||
<page_header><loc_299><loc_38><loc_456><loc_43>KDD ’22, August 14-18, 2022, Washington, DC, USA</page_header>
|
||||
<text><loc_44><loc_55><loc_240><loc_67>the textual content of an element, which goes beyond visual layout recognition, in particular outside the Scientific Articles category.</text>
|
||||
<text><loc_44><loc_69><loc_241><loc_157>At first sight, the task of visual document-layout interpretation appears intuitive enough to obtain plausible annotations in most cases. However, during early trial-runs in the core team, we observed many cases in which annotators use different annotation styles, especially for documents with challenging layouts. For example, if a figure is presented with subfigures, one annotator might draw a single figure bounding-box, while another might annotate each subfigure separately. The same applies for lists, where one might annotate all list items in one block or each list item separately. In essence, we observed that challenging layouts would be annotated in different but plausible ways. To illustrate this, we show in Figure 4 multiple examples of plausible but inconsistent annotations on the same pages.</text>
|
||||
<text><loc_44><loc_159><loc_241><loc_213>Obviously, this inconsistency in annotations is not desirable for datasets which are intended to be used for model training. To minimise these inconsistencies, we created a detailed annotation guideline. While perfect consistency across 40 annotation staff members is clearly not possible to achieve, we saw a huge improvement in annotation consistency after the introduction of our annotation guideline. A few selected, non-trivial highlights of the guideline are:</text>
|
||||
<unordered_list><list_item><loc_53><loc_220><loc_240><loc_246>(1) Every list-item is an individual object instance with class label List-item . This definition is different from PubLayNet and DocBank, where all list-items are grouped together into one List object.</list_item>
|
||||
<list_item><loc_53><loc_248><loc_241><loc_274>(2) A List-item is a paragraph with hanging indentation. Singleline elements can qualify as List-item if the neighbour elements expose hanging indentation. Bullet or enumeration symbols are not a requirement.</list_item>
|
||||
<list_item><loc_53><loc_275><loc_240><loc_288>(3) For every Caption , there must be exactly one corresponding Picture or Table .</list_item>
|
||||
<list_item><loc_53><loc_289><loc_240><loc_301>(4) Connected sub-pictures are grouped together in one Picture object.</list_item>
|
||||
<list_item><loc_53><loc_303><loc_216><loc_308>(5) Formula numbers are included in a Formula object.</list_item>
|
||||
<list_item><loc_53><loc_310><loc_240><loc_329>(6) Emphasised text (e.g. in italic or bold) at the beginning of a paragraph is not considered a Section-header , unless it appears exclusively on its own line.</list_item>
|
||||
</unordered_list>
|
||||
<text><location><page_5><loc_9><loc_27><loc_48><loc_33></location>The complete annotation guideline is over 100 pages long and a detailed description is obviously out of scope for this paper. Nevertheless, it will be made publicly available alongside with DocLayNet for future reference.</text>
|
||||
<text><location><page_5><loc_9><loc_11><loc_48><loc_27></location>Phase 3: Training. After a first trial with a small group of people, we realised that providing the annotation guideline and a set of random practice pages did not yield the desired quality level for layout annotation. Therefore we prepared a subset of pages with two different complexity levels, each with a practice and an exam part. 974 pages were reference-annotated by one proficient core team member. Annotation staff were then given the task to annotate the same subsets (blinded from the reference). By comparing the annotations of each staff member with the reference annotations, we could quantify how closely their annotations matched the reference. Only after passing two exam levels with high annotation quality, staff were admitted into the production phase. Practice iterations</text>
|
||||
<figure>
|
||||
<location><page_5><loc_52><loc_42><loc_91><loc_89></location>
|
||||
<caption>Figure 4: Examples of plausible annotation alternatives for the same page. Criteria in our annotation guideline can resolve cases A to C, while the case D remains ambiguous.</caption>
|
||||
</figure>
|
||||
<text><location><page_5><loc_65><loc_42><loc_78><loc_42></location>05237a14f2524e3f53c8454b074409d05078038a6a36b770fcc8ec7e540deae0</text>
|
||||
<text><location><page_5><loc_52><loc_31><loc_91><loc_34></location>were carried out over a timeframe of 12 weeks, after which 8 of the 40 initially allocated annotators did not pass the bar.</text>
|
||||
<text><location><page_5><loc_52><loc_10><loc_91><loc_31></location>Phase 4: Production annotation. The previously selected 80K pages were annotated with the defined 11 class labels by 32 annotators. This production phase took around three months to complete. All annotations were created online through CCS, which visualises the programmatic PDF text-cells as an overlay on the page. The page annotation are obtained by drawing rectangular bounding-boxes, as shown in Figure 3. With regard to the annotation practices, we implemented a few constraints and capabilities on the tooling level. First, we only allow non-overlapping, vertically oriented, rectangular boxes. For the large majority of documents, this constraint was sufficient and it speeds up the annotation considerably in comparison with arbitrary segmentation shapes. Second, annotator staff were not able to see each other's annotations. This was enforced by design to avoid any bias in the annotation, which could skew the numbers of the inter-annotator agreement (see Table 1). We wanted</text>
|
||||
<table>
|
||||
<location><page_6><loc_10><loc_56><loc_47><loc_75></location>
|
||||
<caption>Table 2: Prediction performance (mAP@0.5-0.95) of object detection networks on DocLayNet test set. The MRCNN (Mask R-CNN) and FRCNN (Faster R-CNN) models with ResNet-50 or ResNet-101 backbone were trained based on the network architectures from the detectron2 model zoo (Mask R-CNN R50, R101-FPN 3x, Faster R-CNN R101-FPN 3x), with default configurations. The YOLO implementation utilized was YOLOv5x6 [13]. All models were initialised using pre-trained weights from the COCO 2017 dataset.</caption>
|
||||
<row_0><col_0><body></col_0><col_1><col_header>human</col_1><col_2><col_header>MRCNN</col_2><col_3><col_header>MRCNN</col_3><col_4><col_header>FRCNN</col_4><col_5><col_header>YOLO</col_5></row_0>
|
||||
<row_1><col_0><body></col_0><col_1><col_header>human</col_1><col_2><col_header>R50</col_2><col_3><col_header>R101</col_3><col_4><col_header>R101</col_4><col_5><col_header>v5x6</col_5></row_1>
|
||||
<row_2><col_0><row_header>Caption</col_0><col_1><body>84-89</col_1><col_2><body>68.4</col_2><col_3><body>71.5</col_3><col_4><body>70.1</col_4><col_5><body>77.7</col_5></row_2>
|
||||
<row_3><col_0><row_header>Footnote</col_0><col_1><body>83-91</col_1><col_2><body>70.9</col_2><col_3><body>71.8</col_3><col_4><body>73.7</col_4><col_5><body>77.2</col_5></row_3>
|
||||
<row_4><col_0><row_header>Formula</col_0><col_1><body>83-85</col_1><col_2><body>60.1</col_2><col_3><body>63.4</col_3><col_4><body>63.5</col_4><col_5><body>66.2</col_5></row_4>
|
||||
<row_5><col_0><row_header>List-item</col_0><col_1><body>87-88</col_1><col_2><body>81.2</col_2><col_3><body>80.8</col_3><col_4><body>81.0</col_4><col_5><body>86.2</col_5></row_5>
|
||||
<row_6><col_0><row_header>Page-footer</col_0><col_1><body>93-94</col_1><col_2><body>61.6</col_2><col_3><body>59.3</col_3><col_4><body>58.9</col_4><col_5><body>61.1</col_5></row_6>
|
||||
<row_7><col_0><row_header>Page-header</col_0><col_1><body>85-89</col_1><col_2><body>71.9</col_2><col_3><body>70.0</col_3><col_4><body>72.0</col_4><col_5><body>67.9</col_5></row_7>
|
||||
<row_8><col_0><row_header>Picture</col_0><col_1><body>69-71</col_1><col_2><body>71.7</col_2><col_3><body>72.7</col_3><col_4><body>72.0</col_4><col_5><body>77.1</col_5></row_8>
|
||||
<row_9><col_0><row_header>Section-header</col_0><col_1><body>83-84</col_1><col_2><body>67.6</col_2><col_3><body>69.3</col_3><col_4><body>68.4</col_4><col_5><body>74.6</col_5></row_9>
|
||||
<row_10><col_0><row_header>Table</col_0><col_1><body>77-81</col_1><col_2><body>82.2</col_2><col_3><body>82.9</col_3><col_4><body>82.2</col_4><col_5><body>86.3</col_5></row_10>
|
||||
<row_11><col_0><row_header>Text</col_0><col_1><body>84-86</col_1><col_2><body>84.6</col_2><col_3><body>85.8</col_3><col_4><body>85.4</col_4><col_5><body>88.1</col_5></row_11>
|
||||
<row_12><col_0><row_header>Title</col_0><col_1><body>60-72</col_1><col_2><body>76.7</col_2><col_3><body>80.4</col_3><col_4><body>79.9</col_4><col_5><body>82.7</col_5></row_12>
|
||||
<row_13><col_0><row_header>All</col_0><col_1><body>82-83</col_1><col_2><body>72.4</col_2><col_3><body>73.5</col_3><col_4><body>73.4</col_4><col_5><body>76.8</col_5></row_13>
|
||||
</table>
|
||||
<text><location><page_6><loc_9><loc_27><loc_48><loc_53></location>to avoid this at any cost in order to have clear, unbiased baseline numbers for human document-layout annotation. Third, we introduced the feature of snapping boxes around text segments to obtain a pixel-accurate annotation and again reduce time and effort. The CCS annotation tool automatically shrinks every user-drawn box to the minimum bounding-box around the enclosed text-cells for all purely text-based segments, which excludes only Table and Picture . For the latter, we instructed annotation staff to minimise inclusion of surrounding whitespace while including all graphical lines. A downside of snapping boxes to enclosed text cells is that some wrongly parsed PDF pages cannot be annotated correctly and need to be skipped. Fourth, we established a way to flag pages as rejected for cases where no valid annotation according to the label guidelines could be achieved. Example cases for this would be PDF pages that render incorrectly or contain layouts that are impossible to capture with non-overlapping rectangles. Such rejected pages are not contained in the final dataset. With all these measures in place, experienced annotation staff managed to annotate a single page in a typical timeframe of 20s to 60s, depending on its complexity.</text>
|
||||
<section_header_level_1><location><page_6><loc_9><loc_24><loc_24><loc_26></location>5 EXPERIMENTS</section_header_level_1>
|
||||
<text><location><page_6><loc_9><loc_10><loc_48><loc_23></location>The primary goal of DocLayNet is to obtain high-quality ML models capable of accurate document-layout analysis on a wide variety of challenging layouts. As discussed in Section 2, object detection models are currently the easiest to use, due to the standardisation of ground-truth data in COCO format [16] and the availability of general frameworks such as detectron2 [17]. Furthermore, baseline numbers in PubLayNet and DocBank were obtained using standard object detection models such as Mask R-CNN and Faster R-CNN. As such, we will relate to these object detection methods in this</text>
|
||||
<figure>
|
||||
<location><page_6><loc_53><loc_67><loc_90><loc_89></location>
|
||||
<caption>Figure 5: Prediction performance (mAP@0.5-0.95) of a Mask R-CNN network with ResNet50 backbone trained on increasing fractions of the DocLayNet dataset. The learning curve flattens around the 80% mark, indicating that increasing the size of the DocLayNet dataset with similar data will not yield significantly better predictions.</caption>
|
||||
</figure>
|
||||
<text><location><page_6><loc_52><loc_49><loc_91><loc_52></location>paper and leave the detailed evaluation of more recent methods mentioned in Section 2 for future work.</text>
|
||||
<text><location><page_6><loc_52><loc_39><loc_91><loc_49></location>In this section, we will present several aspects related to the performance of object detection models on DocLayNet. Similarly as in PubLayNet, we will evaluate the quality of their predictions using mean average precision (mAP) with 10 overlaps that range from 0.5 to 0.95 in steps of 0.05 (mAP@0.5-0.95). These scores are computed by leveraging the evaluation code provided by the COCO API [16].</text>
|
||||
<section_header_level_1><location><page_6><loc_52><loc_36><loc_76><loc_37></location>Baselines for Object Detection</section_header_level_1>
|
||||
<text><location><page_6><loc_52><loc_11><loc_91><loc_35></location>In Table 2, we present baseline experiments (given in mAP) on Mask R-CNN [12], Faster R-CNN [11], and YOLOv5 [13]. Both training and evaluation were performed on RGB images with dimensions of 1025 × 1025 pixels. For training, we only used one annotation in case of redundantly annotated pages. As one can observe, the variation in mAP between the models is rather low, but overall between 6 and 10% lower than the mAP computed from the pairwise human annotations on triple-annotated pages. This gives a good indication that the DocLayNet dataset poses a worthwhile challenge for the research community to close the gap between human recognition and ML approaches. It is interesting to see that Mask R-CNN and Faster R-CNN produce very comparable mAP scores, indicating that pixel-based image segmentation derived from bounding-boxes does not help to obtain better predictions. On the other hand, the more recent Yolov5x model does very well and even out-performs humans on selected labels such as Text , Table and Picture . This is not entirely surprising, as Text , Table and Picture are abundant and the most visually distinctive in a document.</text>
|
||||
<text><location><page_7><loc_9><loc_84><loc_48><loc_89></location>Table 3: Performance of a Mask R-CNN R50 network in mAP@0.5-0.95 scores trained on DocLayNet with different class label sets. The reduced label sets were obtained by either down-mapping or dropping labels.</text>
|
||||
<table>
|
||||
<location><page_7><loc_13><loc_63><loc_44><loc_81></location>
|
||||
<caption>Table 4: Performance of a Mask R-CNN R50 network with document-wise and page-wise split for different label sets. Naive page-wise split will result in GLYPH<tildelow> 10% point improvement.</caption>
|
||||
<row_0><col_0><col_header>Class-count</col_0><col_1><col_header>11</col_1><col_2><col_header>6</col_2><col_3><col_header>5</col_3><col_4><col_header>4</col_4></row_0>
|
||||
<row_1><col_0><row_header>Caption</col_0><col_1><body>68</col_1><col_2><body>Text</col_2><col_3><body>Text</col_3><col_4><body>Text</col_4></row_1>
|
||||
<row_2><col_0><row_header>Footnote</col_0><col_1><body>71</col_1><col_2><body>Text</col_2><col_3><body>Text</col_3><col_4><body>Text</col_4></row_2>
|
||||
<row_3><col_0><row_header>Formula</col_0><col_1><body>60</col_1><col_2><body>Text</col_2><col_3><body>Text</col_3><col_4><body>Text</col_4></row_3>
|
||||
<row_4><col_0><row_header>List-item</col_0><col_1><body>81</col_1><col_2><body>Text</col_2><col_3><body>82</col_3><col_4><body>Text</col_4></row_4>
|
||||
<row_5><col_0><row_header>Page-footer</col_0><col_1><body>62</col_1><col_2><body>62</col_2><col_3><body>-</col_3><col_4><body>-</col_4></row_5>
|
||||
<row_6><col_0><row_header>Page-header</col_0><col_1><body>72</col_1><col_2><body>68</col_2><col_3><body>-</col_3><col_4><body>-</col_4></row_6>
|
||||
<row_7><col_0><row_header>Picture</col_0><col_1><body>72</col_1><col_2><body>72</col_2><col_3><body>72</col_3><col_4><body>72</col_4></row_7>
|
||||
<row_8><col_0><row_header>Section-header</col_0><col_1><body>68</col_1><col_2><body>67</col_2><col_3><body>69</col_3><col_4><body>68</col_4></row_8>
|
||||
<row_9><col_0><row_header>Table</col_0><col_1><body>82</col_1><col_2><body>83</col_2><col_3><body>82</col_3><col_4><body>82</col_4></row_9>
|
||||
<row_10><col_0><row_header>Text</col_0><col_1><body>85</col_1><col_2><body>84</col_2><col_3><body>84</col_3><col_4><body>84</col_4></row_10>
|
||||
<row_11><col_0><row_header>Title</col_0><col_1><body>77</col_1><col_2><body>Sec.-h.</col_2><col_3><body>Sec.-h.</col_3><col_4><body>Sec.-h.</col_4></row_11>
|
||||
<row_12><col_0><row_header>Overall</col_0><col_1><body>72</col_1><col_2><body>73</col_2><col_3><body>78</col_3><col_4><body>77</col_4></row_12>
|
||||
</table>
|
||||
<section_header_level_1><location><page_7><loc_9><loc_58><loc_21><loc_60></location>Learning Curve</section_header_level_1>
|
||||
<text><location><page_7><loc_9><loc_33><loc_48><loc_58></location>One of the fundamental questions related to any dataset is if it is "large enough". To answer this question for DocLayNet, we performed a data ablation study in which we evaluated a Mask R-CNN model trained on increasing fractions of the DocLayNet dataset. As can be seen in Figure 5, the mAP score rises sharply in the beginning and eventually levels out. To estimate the error-bar on the metrics, we ran the training five times on the entire data-set. This resulted in a 1% error-bar, depicted by the shaded area in Figure 5. In the inset of Figure 5, we show the exact same data-points, but with a logarithmic scale on the x-axis. As is expected, the mAP score increases linearly as a function of the data-size in the inset. The curve ultimately flattens out between the 80% and 100% mark, with the 80% mark falling within the error-bars of the 100% mark. This provides a good indication that the model would not improve significantly by yet increasing the data size. Rather, it would probably benefit more from improved data consistency (as discussed in Section 3), data augmentation methods [23], or the addition of more document categories and styles.</text>
|
||||
<section_header_level_1><location><page_7><loc_9><loc_30><loc_27><loc_32></location>Impact of Class Labels</section_header_level_1>
|
||||
<text><location><page_7><loc_9><loc_11><loc_48><loc_30></location>The choice and number of labels can have a significant effect on the overall model performance. Since PubLayNet, DocBank and DocLayNet all have different label sets, it is of particular interest to understand and quantify this influence of the label set on the model performance. We investigate this by either down-mapping labels into more common ones (e.g. Caption → Text ) or excluding them from the annotations entirely. Furthermore, it must be stressed that all mappings and exclusions were performed on the data before model training. In Table 3, we present the mAP scores for a Mask R-CNN R50 network on different label sets. Where a label is down-mapped, we show its corresponding label, otherwise it was excluded. We present three different label sets, with 6, 5 and 4 different labels respectively. The set of 5 labels contains the same labels as PubLayNet. However, due to the different definition of</text>
|
||||
<table>
|
||||
<location><page_7><loc_58><loc_61><loc_85><loc_81></location>
|
||||
<row_0><col_0><body>Class-count</col_0><col_1><col_header>11</col_1><col_2><col_header>11</col_2><col_3><col_header>5</col_3><col_4><col_header>5</col_4></row_0>
|
||||
<row_1><col_0><body>Split</col_0><col_1><col_header>Doc</col_1><col_2><col_header>Page</col_2><col_3><col_header>Doc</col_3><col_4><col_header>Page</col_4></row_1>
|
||||
<row_2><col_0><row_header>Caption</col_0><col_1><body>68</col_1><col_2><body>83</col_2><col_3><body></col_3><col_4><body></col_4></row_2>
|
||||
<row_3><col_0><row_header>Footnote</col_0><col_1><body>71</col_1><col_2><body>84</col_2><col_3><body></col_3><col_4><body></col_4></row_3>
|
||||
<row_4><col_0><row_header>Formula</col_0><col_1><body>60</col_1><col_2><body>66</col_2><col_3><body></col_3><col_4><body></col_4></row_4>
|
||||
<row_5><col_0><row_header>List-item</col_0><col_1><body>81</col_1><col_2><body>88</col_2><col_3><body>82</col_3><col_4><body>88</col_4></row_5>
|
||||
<row_6><col_0><row_header>Page-footer</col_0><col_1><body>62</col_1><col_2><body>89</col_2><col_3><body></col_3><col_4><body></col_4></row_6>
|
||||
<row_7><col_0><row_header>Page-header</col_0><col_1><body>72</col_1><col_2><body>90</col_2><col_3><body></col_3><col_4><body></col_4></row_7>
|
||||
<row_8><col_0><row_header>Picture</col_0><col_1><body>72</col_1><col_2><body>82</col_2><col_3><body>72</col_3><col_4><body>82</col_4></row_8>
|
||||
<row_9><col_0><row_header>Section-header</col_0><col_1><body>68</col_1><col_2><body>83</col_2><col_3><body>69</col_3><col_4><body>83</col_4></row_9>
|
||||
<row_10><col_0><row_header>Table</col_0><col_1><body>82</col_1><col_2><body>89</col_2><col_3><body>82</col_3><col_4><body>90</col_4></row_10>
|
||||
<row_11><col_0><row_header>Text</col_0><col_1><body>85</col_1><col_2><body>91</col_2><col_3><body>84</col_3><col_4><body>90</col_4></row_11>
|
||||
<row_12><col_0><row_header>Title</col_0><col_1><body>77</col_1><col_2><body>81</col_2><col_3><body></col_3><col_4><body></col_4></row_12>
|
||||
<row_13><col_0><row_header>All</col_0><col_1><body>72</col_1><col_2><body>84</col_2><col_3><body>78</col_3><col_4><body>87</col_4></row_13>
|
||||
</table>
|
||||
<text><location><page_7><loc_52><loc_47><loc_91><loc_58></location>lists in PubLayNet (grouped list-items) versus DocLayNet (separate list-items), the label set of size 4 is the closest to PubLayNet, in the assumption that the List is down-mapped to Text in PubLayNet. The results in Table 3 show that the prediction accuracy on the remaining class labels does not change significantly when other classes are merged into them. The overall macro-average improves by around 5%, in particular when Page-footer and Page-header are excluded.</text>
|
||||
<section_header_level_1><location><page_7><loc_52><loc_44><loc_90><loc_46></location>Impact of Document Split in Train and Test Set</section_header_level_1>
|
||||
<text><location><page_7><loc_52><loc_25><loc_91><loc_44></location>Many documents in DocLayNet have a unique styling. In order to avoid overfitting on a particular style, we have split the train-, test- and validation-sets of DocLayNet on document boundaries, i.e. every document contributes pages to only one set. To the best of our knowledge, this was not considered in PubLayNet or DocBank. To quantify how this affects model performance, we trained and evaluated a Mask R-CNN R50 model on a modified dataset version. Here, the train-, test- and validation-sets were obtained by a randomised draw over the individual pages. As can be seen in Table 4, the difference in model performance is surprisingly large: pagewise splitting gains ˜ 10% in mAP over the document-wise splitting. Thus, random page-wise splitting of DocLayNet can easily lead to accidental overestimation of model performance and should be avoided.</text>
|
||||
<section_header_level_1><location><page_7><loc_52><loc_22><loc_68><loc_23></location>Dataset Comparison</section_header_level_1>
|
||||
<text><location><page_7><loc_52><loc_11><loc_91><loc_21></location>Throughout this paper, we claim that DocLayNet's wider variety of document layouts leads to more robust layout detection models. In Table 5, we provide evidence for that. We trained models on each of the available datasets (PubLayNet, DocBank and DocLayNet) and evaluated them on the test sets of the other datasets. Due to the different label sets and annotation styles, a direct comparison is not possible. Hence, we focussed on the common labels among the datasets. Between PubLayNet and DocLayNet, these are Picture ,</text>
|
||||
<table>
|
||||
<location><page_8><loc_12><loc_57><loc_45><loc_78></location>
|
||||
<caption>Table 5: Prediction Performance (mAP@0.5-0.95) of a Mask R-CNN R50 network across the PubLayNet, DocBank & DocLayNet data-sets. By evaluating on common label classes of each dataset, we observe that the DocLayNet-trained model has much less pronounced variations in performance across all datasets.</caption>
|
||||
<row_0><col_0><body></col_0><col_1><body></col_1><col_2><col_header>Testing on</col_2><col_3><col_header>Testing on</col_3><col_4><col_header>Testing on</col_4></row_0>
|
||||
<row_1><col_0><col_header>Training on</col_0><col_1><col_header>labels</col_1><col_2><col_header>PLN</col_2><col_3><col_header>DB</col_3><col_4><col_header>DLN</col_4></row_1>
|
||||
<row_2><col_0><row_header>PubLayNet (PLN)</col_0><col_1><row_header>Figure</col_1><col_2><body>96</col_2><col_3><body>43</col_3><col_4><body>23</col_4></row_2>
|
||||
<row_3><col_0><row_header>PubLayNet (PLN)</col_0><col_1><row_header>Sec-header</col_1><col_2><body>87</col_2><col_3><body>-</col_3><col_4><body>32</col_4></row_3>
|
||||
<row_4><col_0><row_header>PubLayNet (PLN)</col_0><col_1><row_header>Table</col_1><col_2><body>95</col_2><col_3><body>24</col_3><col_4><body>49</col_4></row_4>
|
||||
<row_5><col_0><row_header>PubLayNet (PLN)</col_0><col_1><row_header>Text</col_1><col_2><body>96</col_2><col_3><body>-</col_3><col_4><body>42</col_4></row_5>
|
||||
<row_6><col_0><row_header>PubLayNet (PLN)</col_0><col_1><row_header>total</col_1><col_2><body>93</col_2><col_3><body>34</col_3><col_4><body>30</col_4></row_6>
|
||||
<row_7><col_0><row_header>DocBank (DB)</col_0><col_1><row_header>Figure</col_1><col_2><body>77</col_2><col_3><body>71</col_3><col_4><body>31</col_4></row_7>
|
||||
<row_8><col_0><row_header>DocBank (DB)</col_0><col_1><row_header>Table</col_1><col_2><body>19</col_2><col_3><body>65</col_3><col_4><body>22</col_4></row_8>
|
||||
<row_9><col_0><row_header>DocBank (DB)</col_0><col_1><row_header>total</col_1><col_2><body>48</col_2><col_3><body>68</col_3><col_4><body>27</col_4></row_9>
|
||||
<row_10><col_0><row_header>DocLayNet (DLN)</col_0><col_1><row_header>Figure</col_1><col_2><body>67</col_2><col_3><body>51</col_3><col_4><body>72</col_4></row_10>
|
||||
<row_11><col_0><row_header>DocLayNet (DLN)</col_0><col_1><row_header>Sec-header</col_1><col_2><body>53</col_2><col_3><body>-</col_3><col_4><body>68</col_4></row_11>
|
||||
<row_12><col_0><row_header>DocLayNet (DLN)</col_0><col_1><row_header>Table</col_1><col_2><body>87</col_2><col_3><body>43</col_3><col_4><body>82</col_4></row_12>
|
||||
<row_13><col_0><row_header>DocLayNet (DLN)</col_0><col_1><row_header>Text</col_1><col_2><body>77</col_2><col_3><body>-</col_3><col_4><body>84</col_4></row_13>
|
||||
<row_14><col_0><row_header>DocLayNet (DLN)</col_0><col_1><row_header>total</col_1><col_2><body>59</col_2><col_3><body>47</col_3><col_4><body>78</col_4></row_14>
|
||||
</table>
|
||||
<text><location><page_8><loc_9><loc_44><loc_48><loc_51></location>Section-header , Table and Text . Before training, we either mapped or excluded DocLayNet's other labels as specified in table 3, and also PubLayNet's List to Text . Note that the different clustering of lists (by list-element vs. whole list objects) naturally decreases the mAP score for Text .</text>
|
||||
<text><location><page_8><loc_9><loc_26><loc_48><loc_44></location>For comparison of DocBank with DocLayNet, we trained only on Picture and Table clusters of each dataset. We had to exclude Text because successive paragraphs are often grouped together into a single object in DocBank. This paragraph grouping is incompatible with the individual paragraphs of DocLayNet. As can be seen in Table 5, DocLayNet trained models yield better performance compared to the previous datasets. It is noteworthy that the models trained on PubLayNet and DocBank perform very well on their own test set, but have a much lower performance on the foreign datasets. While this also applies to DocLayNet, the difference is far less pronounced. Thus we conclude that DocLayNet trained models are overall more robust and will produce better results for challenging, unseen layouts.</text>
|
||||
<section_header_level_1><location><page_8><loc_9><loc_22><loc_25><loc_24></location>Example Predictions</section_header_level_1>
|
||||
<text><location><page_8><loc_9><loc_11><loc_48><loc_22></location>To conclude this section, we illustrate the quality of layout predictions one can expect from DocLayNet-trained models by providing a selection of examples without any further post-processing applied. Figure 6 shows selected layout predictions on pages from the test-set of DocLayNet. Results look decent in general across document categories, however one can also observe mistakes such as overlapping clusters of different classes, or entirely missing boxes due to low confidence.</text>
|
||||
<section_header_level_1><location><page_8><loc_52><loc_88><loc_66><loc_89></location>6 CONCLUSION</section_header_level_1>
|
||||
<text><location><page_8><loc_52><loc_76><loc_91><loc_87></location>In this paper, we presented the DocLayNet dataset. It provides the document conversion and layout analysis research community a new and challenging dataset to improve and fine-tune novel ML methods on. In contrast to many other datasets, DocLayNet was created by human annotation in order to obtain reliable layout ground-truth on a wide variety of publication- and typesettingstyles. Including a large proportion of documents outside the scientific publishing domain adds significant value in this respect.</text>
|
||||
<text><location><page_8><loc_52><loc_64><loc_91><loc_76></location>From the dataset, we have derived on the one hand reference metrics for human performance on document-layout annotation (through double and triple annotations) and on the other hand evaluated the baseline performance of commonly used object detection methods. We also illustrated the impact of various dataset-related aspects on model performance through data-ablation experiments, both from a size and class-label perspective. Last but not least, we compared the accuracy of models trained on other public datasets and showed that DocLayNet trained models are more robust.</text>
|
||||
<text><location><page_8><loc_52><loc_60><loc_91><loc_64></location>To date, there is still a significant gap between human and ML accuracy on the layout interpretation task, and we hope that this work will inspire the research community to close that gap.</text>
|
||||
<section_header_level_1><location><page_8><loc_52><loc_56><loc_63><loc_58></location>REFERENCES</section_header_level_1>
|
||||
<unordered_list>
|
||||
<list_item><location><page_8><loc_52><loc_53><loc_91><loc_56></location>[1] Max Göbel, Tamir Hassan, Ermelinda Oro, and Giorgio Orsi. Icdar 2013 table competition. In 2013 12th International Conference on Document Analysis and Recognition , pages 1449-1453, 2013.</list_item>
|
||||
<list_item><location><page_8><loc_52><loc_49><loc_91><loc_53></location>[2] Christian Clausner, Apostolos Antonacopoulos, and Stefan Pletschacher. Icdar2017 competition on recognition of documents with complex layouts rdcl2017. In 2017 14th IAPR International Conference on Document Analysis and Recognition (ICDAR) , volume 01, pages 1404-1410, 2017.</list_item>
|
||||
<list_item><location><page_8><loc_52><loc_46><loc_91><loc_49></location>[3] Hervé Déjean, Jean-Luc Meunier, Liangcai Gao, Yilun Huang, Yu Fang, Florian Kleber, and Eva-Maria Lang. ICDAR 2019 Competition on Table Detection and Recognition (cTDaR), April 2019. http://sac.founderit.com/.</list_item>
|
||||
<list_item><location><page_8><loc_52><loc_42><loc_91><loc_46></location>[4] Antonio Jimeno Yepes, Peter Zhong, and Douglas Burdick. Competition on scientific literature parsing. In Proceedings of the International Conference on Document Analysis and Recognition , ICDAR, pages 605-617. LNCS 12824, SpringerVerlag, sep 2021.</list_item>
|
||||
<list_item><location><page_8><loc_52><loc_38><loc_91><loc_42></location>[5] Logan Markewich, Hao Zhang, Yubin Xing, Navid Lambert-Shirzad, Jiang Zhexin, Roy Lee, Zhi Li, and Seok-Bum Ko. Segmentation for document layout analysis: not dead yet. International Journal on Document Analysis and Recognition (IJDAR) , pages 1-11, 01 2022.</list_item>
|
||||
<list_item><location><page_8><loc_52><loc_35><loc_91><loc_38></location>[6] Xu Zhong, Jianbin Tang, and Antonio Jimeno-Yepes. Publaynet: Largest dataset ever for document layout analysis. In Proceedings of the International Conference on Document Analysis and Recognition , ICDAR, pages 1015-1022, sep 2019.</list_item>
|
||||
<list_item><location><page_8><loc_52><loc_30><loc_91><loc_35></location>[7] Minghao Li, Yiheng Xu, Lei Cui, Shaohan Huang, Furu Wei, Zhoujun Li, and Ming Zhou. Docbank: A benchmark dataset for document layout analysis. In Proceedings of the 28th International Conference on Computational Linguistics , COLING, pages 949-960. International Committee on Computational Linguistics, dec 2020.</list_item>
|
||||
<list_item><location><page_8><loc_52><loc_27><loc_91><loc_30></location>[8] Riaz Ahmad, Muhammad Tanvir Afzal, and M. Qadir. Information extraction from pdf sources based on rule-based system using integrated formats. In SemWebEval@ESWC , 2016.</list_item>
|
||||
<list_item><location><page_8><loc_52><loc_23><loc_91><loc_27></location>[9] Ross B. Girshick, Jeff Donahue, Trevor Darrell, and Jitendra Malik. Rich feature hierarchies for accurate object detection and semantic segmentation. In IEEE Conference on Computer Vision and Pattern Recognition , CVPR, pages 580-587. IEEE Computer Society, jun 2014.</list_item>
|
||||
<list_item><location><page_8><loc_52><loc_21><loc_91><loc_23></location>[10] Ross B. Girshick. Fast R-CNN. In 2015 IEEE International Conference on Computer Vision , ICCV, pages 1440-1448. IEEE Computer Society, dec 2015.</list_item>
|
||||
<list_item><location><page_8><loc_52><loc_18><loc_91><loc_21></location>[11] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. IEEE Transactions on Pattern Analysis and Machine Intelligence , 39(6):1137-1149, 2017.</list_item>
|
||||
<list_item><location><page_8><loc_52><loc_15><loc_91><loc_18></location>[12] Kaiming He, Georgia Gkioxari, Piotr Dollár, and Ross B. Girshick. Mask R-CNN. In IEEE International Conference on Computer Vision , ICCV, pages 2980-2988. IEEE Computer Society, Oct 2017.</list_item>
|
||||
<list_item><location><page_8><loc_52><loc_11><loc_91><loc_15></location>[13] Glenn Jocher, Alex Stoken, Ayush Chaurasia, Jirka Borovec, NanoCode012, TaoXie, Yonghye Kwon, Kalen Michael, Liu Changyu, Jiacong Fang, Abhiram V, Laughing, tkianai, yxNONG, Piotr Skalski, Adam Hogan, Jebastin Nadar, imyhxy, Lorenzo Mammana, Alex Wang, Cristi Fati, Diego Montes, Jan Hajek, Laurentiu</list_item>
|
||||
<text><loc_44><loc_336><loc_241><loc_363>The complete annotation guideline is over 100 pages long and a detailed description is obviously out of scope for this paper. Nevertheless, it will be made publicly available alongside with DocLayNet for future reference.</text>
|
||||
<text><loc_44><loc_364><loc_241><loc_446>Phase 3: Training. After a first trial with a small group of people, we realised that providing the annotation guideline and a set of random practice pages did not yield the desired quality level for layout annotation. Therefore we prepared a subset of pages with two different complexity levels, each with a practice and an exam part. 974 pages were reference-annotated by one proficient core team member. Annotation staff were then given the task to annotate the same subsets (blinded from the reference). By comparing the annotations of each staff member with the reference annotations, we could quantify how closely their annotations matched the reference. Only after passing two exam levels with high annotation quality, staff were admitted into the production phase. Practice iterations</text>
|
||||
<picture><loc_258><loc_54><loc_457><loc_290><caption><loc_260><loc_299><loc_457><loc_318>Figure 4: Examples of plausible annotation alternatives for the same page. Criteria in our annotation guideline can resolve cases A to C, while the case D remains ambiguous.</caption></picture>
|
||||
<text><loc_327><loc_289><loc_389><loc_291>05237a14f2524e3f53c8454b074409d05078038a6a36b770fcc8ec7e540deae0</text>
|
||||
<text><loc_259><loc_332><loc_456><loc_344>were carried out over a timeframe of 12 weeks, after which 8 of the 40 initially allocated annotators did not pass the bar.</text>
|
||||
<text><loc_259><loc_346><loc_457><loc_448>Phase 4: Production annotation. The previously selected 80K pages were annotated with the defined 11 class labels by 32 annotators. This production phase took around three months to complete. All annotations were created online through CCS, which visualises the programmatic PDF text-cells as an overlay on the page. The page annotation are obtained by drawing rectangular bounding-boxes, as shown in Figure 3. With regard to the annotation practices, we implemented a few constraints and capabilities on the tooling level. First, we only allow non-overlapping, vertically oriented, rectangular boxes. For the large majority of documents, this constraint was sufficient and it speeds up the annotation considerably in comparison with arbitrary segmentation shapes. Second, annotator staff were not able to see each other's annotations. This was enforced by design to avoid any bias in the annotation, which could skew the numbers of the inter-annotator agreement (see Table 1). We wanted</text>
|
||||
<page_break>
|
||||
<page_header><loc_44><loc_38><loc_456><loc_43>KDD ’22, August 14-18, 2022, Washington, DC, USA Birgit Pfitzmann, Christoph Auer, Michele Dolfi, Ahmed S. Nassar, and Peter Staar</page_header>
|
||||
<otsl><loc_51><loc_124><loc_233><loc_222><ecel><ched>human<ched>MRCNN<lcel><ched>FRCNN<ched>YOLO<nl><ecel><ucel><ched>R50<ched>R101<ched>R101<ched>v5x6<nl><rhed>Caption<fcel>84-89<fcel>68.4<fcel>71.5<fcel>70.1<fcel>77.7<nl><rhed>Footnote<fcel>83-91<fcel>70.9<fcel>71.8<fcel>73.7<fcel>77.2<nl><rhed>Formula<fcel>83-85<fcel>60.1<fcel>63.4<fcel>63.5<fcel>66.2<nl><rhed>List-item<fcel>87-88<fcel>81.2<fcel>80.8<fcel>81.0<fcel>86.2<nl><rhed>Page-footer<fcel>93-94<fcel>61.6<fcel>59.3<fcel>58.9<fcel>61.1<nl><rhed>Page-header<fcel>85-89<fcel>71.9<fcel>70.0<fcel>72.0<fcel>67.9<nl><rhed>Picture<fcel>69-71<fcel>71.7<fcel>72.7<fcel>72.0<fcel>77.1<nl><rhed>Section-header<fcel>83-84<fcel>67.6<fcel>69.3<fcel>68.4<fcel>74.6<nl><rhed>Table<fcel>77-81<fcel>82.2<fcel>82.9<fcel>82.2<fcel>86.3<nl><rhed>Text<fcel>84-86<fcel>84.6<fcel>85.8<fcel>85.4<fcel>88.1<nl><rhed>Title<fcel>60-72<fcel>76.7<fcel>80.4<fcel>79.9<fcel>82.7<nl><rhed>All<fcel>82-83<fcel>72.4<fcel>73.5<fcel>73.4<fcel>76.8<nl><caption><loc_44><loc_55><loc_242><loc_116>Table 2: Prediction performance (mAP@0.5-0.95) of object detection networks on DocLayNet test set. The MRCNN (Mask R-CNN) and FRCNN (Faster R-CNN) models with ResNet-50 or ResNet-101 backbone were trained based on the network architectures from the detectron2 model zoo (Mask R-CNN R50, R101-FPN 3x, Faster R-CNN R101-FPN 3x), with default configurations. The YOLO implementation utilized was YOLOv5x6 [13]. All models were initialised using pre-trained weights from the COCO 2017 dataset.</caption></otsl>
|
||||
<text><loc_44><loc_234><loc_241><loc_364>to avoid this at any cost in order to have clear, unbiased baseline numbers for human document-layout annotation. Third, we introduced the feature of snapping boxes around text segments to obtain a pixel-accurate annotation and again reduce time and effort. The CCS annotation tool automatically shrinks every user-drawn box to the minimum bounding-box around the enclosed text-cells for all purely text-based segments, which excludes only Table and Picture . For the latter, we instructed annotation staff to minimise inclusion of surrounding whitespace while including all graphical lines. A downside of snapping boxes to enclosed text cells is that some wrongly parsed PDF pages cannot be annotated correctly and need to be skipped. Fourth, we established a way to flag pages as rejected for cases where no valid annotation according to the label guidelines could be achieved. Example cases for this would be PDF pages that render incorrectly or contain layouts that are impossible to capture with non-overlapping rectangles. Such rejected pages are not contained in the final dataset. With all these measures in place, experienced annotation staff managed to annotate a single page in a typical timeframe of 20s to 60s, depending on its complexity.</text>
|
||||
<section_header_level_1><loc_44><loc_371><loc_120><loc_378>5 EXPERIMENTS</section_header_level_1>
|
||||
<text><loc_44><loc_387><loc_241><loc_448>The primary goal of DocLayNet is to obtain high-quality ML models capable of accurate document-layout analysis on a wide variety of challenging layouts. As discussed in Section 2, object detection models are currently the easiest to use, due to the standardisation of ground-truth data in COCO format [16] and the availability of general frameworks such as detectron2 [17]. Furthermore, baseline numbers in PubLayNet and DocBank were obtained using standard object detection models such as Mask R-CNN and Faster R-CNN. As such, we will relate to these object detection methods in this</text>
|
||||
<picture><loc_264><loc_57><loc_452><loc_164><caption><loc_260><loc_176><loc_457><loc_216>Figure 5: Prediction performance (mAP@0.5-0.95) of a Mask R-CNN network with ResNet50 backbone trained on increasing fractions of the DocLayNet dataset. The learning curve flattens around the 80% mark, indicating that increasing the size of the DocLayNet dataset with similar data will not yield significantly better predictions.</caption></picture>
|
||||
<text><loc_260><loc_242><loc_456><loc_255>paper and leave the detailed evaluation of more recent methods mentioned in Section 2 for future work.</text>
|
||||
<text><loc_260><loc_256><loc_456><loc_303>In this section, we will present several aspects related to the performance of object detection models on DocLayNet. Similarly as in PubLayNet, we will evaluate the quality of their predictions using mean average precision (mAP) with 10 overlaps that range from 0.5 to 0.95 in steps of 0.05 (mAP@0.5-0.95). These scores are computed by leveraging the evaluation code provided by the COCO API [16].</text>
|
||||
<section_header_level_1><loc_260><loc_314><loc_381><loc_320>Baselines for Object Detection</section_header_level_1>
|
||||
<text><loc_260><loc_323><loc_456><loc_446>In Table 2, we present baseline experiments (given in mAP) on Mask R-CNN [12], Faster R-CNN [11], and YOLOv5 [13]. Both training and evaluation were performed on RGB images with dimensions of 1025 × 1025 pixels. For training, we only used one annotation in case of redundantly annotated pages. As one can observe, the variation in mAP between the models is rather low, but overall between 6 and 10% lower than the mAP computed from the pairwise human annotations on triple-annotated pages. This gives a good indication that the DocLayNet dataset poses a worthwhile challenge for the research community to close the gap between human recognition and ML approaches. It is interesting to see that Mask R-CNN and Faster R-CNN produce very comparable mAP scores, indicating that pixel-based image segmentation derived from bounding-boxes does not help to obtain better predictions. On the other hand, the more recent Yolov5x model does very well and even out-performs humans on selected labels such as Text , Table and Picture . This is not entirely surprising, as Text , Table and Picture are abundant and the most visually distinctive in a document.</text>
|
||||
<page_break>
|
||||
<page_header><loc_44><loc_38><loc_284><loc_43>DocLayNet: A Large Human-Annotated Dataset for Document-Layout Analysis</page_header>
|
||||
<page_header><loc_299><loc_38><loc_456><loc_43>KDD ’22, August 14-18, 2022, Washington, DC, USA</page_header>
|
||||
<text><loc_44><loc_55><loc_242><loc_81>Table 3: Performance of a Mask R-CNN R50 network in mAP@0.5-0.95 scores trained on DocLayNet with different class label sets. The reduced label sets were obtained by either down-mapping or dropping labels.</text>
|
||||
<otsl><loc_66><loc_95><loc_218><loc_187><ched>Class-count<ched>11<ched>6<ched>5<ched>4<nl><rhed>Caption<fcel>68<fcel>Text<fcel>Text<fcel>Text<nl><rhed>Footnote<fcel>71<fcel>Text<fcel>Text<fcel>Text<nl><rhed>Formula<fcel>60<fcel>Text<fcel>Text<fcel>Text<nl><rhed>List-item<fcel>81<fcel>Text<fcel>82<fcel>Text<nl><rhed>Page-footer<fcel>62<fcel>62<fcel>-<fcel>-<nl><rhed>Page-header<fcel>72<fcel>68<fcel>-<fcel>-<nl><rhed>Picture<fcel>72<fcel>72<fcel>72<fcel>72<nl><rhed>Section-header<fcel>68<fcel>67<fcel>69<fcel>68<nl><rhed>Table<fcel>82<fcel>83<fcel>82<fcel>82<nl><rhed>Text<fcel>85<fcel>84<fcel>84<fcel>84<nl><rhed>Title<fcel>77<fcel>Sec.-h.<fcel>Sec.-h.<fcel>Sec.-h.<nl><rhed>Overall<fcel>72<fcel>73<fcel>78<fcel>77<nl><caption><loc_260><loc_55><loc_457><loc_81>Table 4: Performance of a Mask R-CNN R50 network with document-wise and page-wise split for different label sets. Naive page-wise split will result in GLYPH<tildelow> 10% point improvement.</caption></otsl>
|
||||
<section_header_level_1><loc_44><loc_202><loc_107><loc_208>Learning Curve</section_header_level_1>
|
||||
<text><loc_43><loc_211><loc_241><loc_334>One of the fundamental questions related to any dataset is if it is "large enough". To answer this question for DocLayNet, we performed a data ablation study in which we evaluated a Mask R-CNN model trained on increasing fractions of the DocLayNet dataset. As can be seen in Figure 5, the mAP score rises sharply in the beginning and eventually levels out. To estimate the error-bar on the metrics, we ran the training five times on the entire data-set. This resulted in a 1% error-bar, depicted by the shaded area in Figure 5. In the inset of Figure 5, we show the exact same data-points, but with a logarithmic scale on the x-axis. As is expected, the mAP score increases linearly as a function of the data-size in the inset. The curve ultimately flattens out between the 80% and 100% mark, with the 80% mark falling within the error-bars of the 100% mark. This provides a good indication that the model would not improve significantly by yet increasing the data size. Rather, it would probably benefit more from improved data consistency (as discussed in Section 3), data augmentation methods [23], or the addition of more document categories and styles.</text>
|
||||
<section_header_level_1><loc_44><loc_342><loc_134><loc_349>Impact of Class Labels</section_header_level_1>
|
||||
<text><loc_44><loc_352><loc_241><loc_447>The choice and number of labels can have a significant effect on the overall model performance. Since PubLayNet, DocBank and DocLayNet all have different label sets, it is of particular interest to understand and quantify this influence of the label set on the model performance. We investigate this by either down-mapping labels into more common ones (e.g. Caption → Text ) or excluding them from the annotations entirely. Furthermore, it must be stressed that all mappings and exclusions were performed on the data before model training. In Table 3, we present the mAP scores for a Mask R-CNN R50 network on different label sets. Where a label is down-mapped, we show its corresponding label, otherwise it was excluded. We present three different label sets, with 6, 5 and 4 different labels respectively. The set of 5 labels contains the same labels as PubLayNet. However, due to the different definition of</text>
|
||||
<otsl><loc_288><loc_95><loc_427><loc_193><fcel>Class-count<ched>11<lcel><ched>5<lcel><nl><fcel>Split<ched>Doc<ched>Page<ched>Doc<ched>Page<nl><rhed>Caption<fcel>68<fcel>83<ecel><ecel><nl><rhed>Footnote<fcel>71<fcel>84<ecel><ecel><nl><rhed>Formula<fcel>60<fcel>66<ecel><ecel><nl><rhed>List-item<fcel>81<fcel>88<fcel>82<fcel>88<nl><rhed>Page-footer<fcel>62<fcel>89<ecel><ecel><nl><rhed>Page-header<fcel>72<fcel>90<ecel><ecel><nl><rhed>Picture<fcel>72<fcel>82<fcel>72<fcel>82<nl><rhed>Section-header<fcel>68<fcel>83<fcel>69<fcel>83<nl><rhed>Table<fcel>82<fcel>89<fcel>82<fcel>90<nl><rhed>Text<fcel>85<fcel>91<fcel>84<fcel>90<nl><rhed>Title<fcel>77<fcel>81<ecel><ecel><nl><rhed>All<fcel>72<fcel>84<fcel>78<fcel>87<nl></otsl>
|
||||
<text><loc_260><loc_209><loc_457><loc_263>lists in PubLayNet (grouped list-items) versus DocLayNet (separate list-items), the label set of size 4 is the closest to PubLayNet, in the assumption that the List is down-mapped to Text in PubLayNet. The results in Table 3 show that the prediction accuracy on the remaining class labels does not change significantly when other classes are merged into them. The overall macro-average improves by around 5%, in particular when Page-footer and Page-header are excluded.</text>
|
||||
<section_header_level_1><loc_260><loc_271><loc_449><loc_278>Impact of Document Split in Train and Test Set</section_header_level_1>
|
||||
<text><loc_259><loc_281><loc_457><loc_376>Many documents in DocLayNet have a unique styling. In order to avoid overfitting on a particular style, we have split the train-, test- and validation-sets of DocLayNet on document boundaries, i.e. every document contributes pages to only one set. To the best of our knowledge, this was not considered in PubLayNet or DocBank. To quantify how this affects model performance, we trained and evaluated a Mask R-CNN R50 model on a modified dataset version. Here, the train-, test- and validation-sets were obtained by a randomised draw over the individual pages. As can be seen in Table 4, the difference in model performance is surprisingly large: pagewise splitting gains ˜ 10% in mAP over the document-wise splitting. Thus, random page-wise splitting of DocLayNet can easily lead to accidental overestimation of model performance and should be avoided.</text>
|
||||
<section_header_level_1><loc_260><loc_384><loc_342><loc_391>Dataset Comparison</section_header_level_1>
|
||||
<text><loc_260><loc_394><loc_457><loc_447>Throughout this paper, we claim that DocLayNet's wider variety of document layouts leads to more robust layout detection models. In Table 5, we provide evidence for that. We trained models on each of the available datasets (PubLayNet, DocBank and DocLayNet) and evaluated them on the test sets of the other datasets. Due to the different label sets and annotation styles, a direct comparison is not possible. Hence, we focussed on the common labels among the datasets. Between PubLayNet and DocLayNet, these are Picture ,</text>
|
||||
<page_break>
|
||||
<page_header><loc_44><loc_38><loc_456><loc_43>KDD ’22, August 14-18, 2022, Washington, DC, USA Birgit Pfitzmann, Christoph Auer, Michele Dolfi, Ahmed S. Nassar, and Peter Staar</page_header>
|
||||
<otsl><loc_59><loc_109><loc_225><loc_215><ecel><ecel><ched>Testing on<lcel><lcel><nl><ched>Training on<ched>labels<ched>PLN<ched>DB<ched>DLN<nl><rhed>PubLayNet (PLN)<rhed>Figure<fcel>96<fcel>43<fcel>23<nl><ucel><rhed>Sec-header<fcel>87<fcel>-<fcel>32<nl><ucel><rhed>Table<fcel>95<fcel>24<fcel>49<nl><ucel><rhed>Text<fcel>96<fcel>-<fcel>42<nl><ucel><rhed>total<fcel>93<fcel>34<fcel>30<nl><rhed>DocBank (DB)<rhed>Figure<fcel>77<fcel>71<fcel>31<nl><ucel><rhed>Table<fcel>19<fcel>65<fcel>22<nl><ucel><rhed>total<fcel>48<fcel>68<fcel>27<nl><rhed>DocLayNet (DLN)<rhed>Figure<fcel>67<fcel>51<fcel>72<nl><ucel><rhed>Sec-header<fcel>53<fcel>-<fcel>68<nl><ucel><rhed>Table<fcel>87<fcel>43<fcel>82<nl><ucel><rhed>Text<fcel>77<fcel>-<fcel>84<nl><ucel><rhed>total<fcel>59<fcel>47<fcel>78<nl><caption><loc_44><loc_55><loc_242><loc_95>Table 5: Prediction Performance (mAP@0.5-0.95) of a Mask R-CNN R50 network across the PubLayNet, DocBank & DocLayNet data-sets. By evaluating on common label classes of each dataset, we observe that the DocLayNet-trained model has much less pronounced variations in performance across all datasets.</caption></otsl>
|
||||
<text><loc_44><loc_247><loc_240><loc_280>Section-header , Table and Text . Before training, we either mapped or excluded DocLayNet's other labels as specified in table 3, and also PubLayNet's List to Text . Note that the different clustering of lists (by list-element vs. whole list objects) naturally decreases the mAP score for Text .</text>
|
||||
<text><loc_44><loc_281><loc_241><loc_370>For comparison of DocBank with DocLayNet, we trained only on Picture and Table clusters of each dataset. We had to exclude Text because successive paragraphs are often grouped together into a single object in DocBank. This paragraph grouping is incompatible with the individual paragraphs of DocLayNet. As can be seen in Table 5, DocLayNet trained models yield better performance compared to the previous datasets. It is noteworthy that the models trained on PubLayNet and DocBank perform very well on their own test set, but have a much lower performance on the foreign datasets. While this also applies to DocLayNet, the difference is far less pronounced. Thus we conclude that DocLayNet trained models are overall more robust and will produce better results for challenging, unseen layouts.</text>
|
||||
<section_header_level_1><loc_44><loc_382><loc_127><loc_388>Example Predictions</section_header_level_1>
|
||||
<text><loc_44><loc_392><loc_241><loc_445>To conclude this section, we illustrate the quality of layout predictions one can expect from DocLayNet-trained models by providing a selection of examples without any further post-processing applied. Figure 6 shows selected layout predictions on pages from the test-set of DocLayNet. Results look decent in general across document categories, however one can also observe mistakes such as overlapping clusters of different classes, or entirely missing boxes due to low confidence.</text>
|
||||
<section_header_level_1><loc_260><loc_54><loc_331><loc_61>6 CONCLUSION</section_header_level_1>
|
||||
<text><loc_260><loc_64><loc_457><loc_118>In this paper, we presented the DocLayNet dataset. It provides the document conversion and layout analysis research community a new and challenging dataset to improve and fine-tune novel ML methods on. In contrast to many other datasets, DocLayNet was created by human annotation in order to obtain reliable layout ground-truth on a wide variety of publication- and typesettingstyles. Including a large proportion of documents outside the scientific publishing domain adds significant value in this respect.</text>
|
||||
<text><loc_260><loc_119><loc_457><loc_180>From the dataset, we have derived on the one hand reference metrics for human performance on document-layout annotation (through double and triple annotations) and on the other hand evaluated the baseline performance of commonly used object detection methods. We also illustrated the impact of various dataset-related aspects on model performance through data-ablation experiments, both from a size and class-label perspective. Last but not least, we compared the accuracy of models trained on other public datasets and showed that DocLayNet trained models are more robust.</text>
|
||||
<text><loc_259><loc_181><loc_456><loc_201>To date, there is still a significant gap between human and ML accuracy on the layout interpretation task, and we hope that this work will inspire the research community to close that gap.</text>
|
||||
<section_header_level_1><loc_260><loc_212><loc_316><loc_218>REFERENCES</section_header_level_1>
|
||||
<unordered_list><list_item><loc_262><loc_220><loc_456><loc_234>[1] Max Göbel, Tamir Hassan, Ermelinda Oro, and Giorgio Orsi. Icdar 2013 table competition. In 2013 12th International Conference on Document Analysis and Recognition , pages 1449-1453, 2013.</list_item>
|
||||
<list_item><loc_262><loc_235><loc_457><loc_254>[2] Christian Clausner, Apostolos Antonacopoulos, and Stefan Pletschacher. Icdar2017 competition on recognition of documents with complex layouts rdcl2017. In 2017 14th IAPR International Conference on Document Analysis and Recognition (ICDAR) , volume 01, pages 1404-1410, 2017.</list_item>
|
||||
<list_item><loc_262><loc_255><loc_456><loc_270>[3] Hervé Déjean, Jean-Luc Meunier, Liangcai Gao, Yilun Huang, Yu Fang, Florian Kleber, and Eva-Maria Lang. ICDAR 2019 Competition on Table Detection and Recognition (cTDaR), April 2019. http://sac.founderit.com/.</list_item>
|
||||
<list_item><loc_262><loc_270><loc_457><loc_290>[4] Antonio Jimeno Yepes, Peter Zhong, and Douglas Burdick. Competition on scientific literature parsing. In Proceedings of the International Conference on Document Analysis and Recognition , ICDAR, pages 605-617. LNCS 12824, SpringerVerlag, sep 2021.</list_item>
|
||||
<list_item><loc_262><loc_291><loc_457><loc_310>[5] Logan Markewich, Hao Zhang, Yubin Xing, Navid Lambert-Shirzad, Jiang Zhexin, Roy Lee, Zhi Li, and Seok-Bum Ko. Segmentation for document layout analysis: not dead yet. International Journal on Document Analysis and Recognition (IJDAR) , pages 1-11, 01 2022.</list_item>
|
||||
<list_item><loc_262><loc_311><loc_456><loc_325>[6] Xu Zhong, Jianbin Tang, and Antonio Jimeno-Yepes. Publaynet: Largest dataset ever for document layout analysis. In Proceedings of the International Conference on Document Analysis and Recognition , ICDAR, pages 1015-1022, sep 2019.</list_item>
|
||||
<list_item><loc_262><loc_326><loc_457><loc_350>[7] Minghao Li, Yiheng Xu, Lei Cui, Shaohan Huang, Furu Wei, Zhoujun Li, and Ming Zhou. Docbank: A benchmark dataset for document layout analysis. In Proceedings of the 28th International Conference on Computational Linguistics , COLING, pages 949-960. International Committee on Computational Linguistics, dec 2020.</list_item>
|
||||
<list_item><loc_262><loc_351><loc_457><loc_365>[8] Riaz Ahmad, Muhammad Tanvir Afzal, and M. Qadir. Information extraction from pdf sources based on rule-based system using integrated formats. In SemWebEval@ESWC , 2016.</list_item>
|
||||
<list_item><loc_262><loc_366><loc_457><loc_385>[9] Ross B. Girshick, Jeff Donahue, Trevor Darrell, and Jitendra Malik. Rich feature hierarchies for accurate object detection and semantic segmentation. In IEEE Conference on Computer Vision and Pattern Recognition , CVPR, pages 580-587. IEEE Computer Society, jun 2014.</list_item>
|
||||
<list_item><loc_260><loc_386><loc_456><loc_395>[10] Ross B. Girshick. Fast R-CNN. In 2015 IEEE International Conference on Computer Vision , ICCV, pages 1440-1448. IEEE Computer Society, dec 2015.</list_item>
|
||||
<list_item><loc_260><loc_396><loc_456><loc_410>[11] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. IEEE Transactions on Pattern Analysis and Machine Intelligence , 39(6):1137-1149, 2017.</list_item>
|
||||
<list_item><loc_260><loc_411><loc_457><loc_426>[12] Kaiming He, Georgia Gkioxari, Piotr Dollár, and Ross B. Girshick. Mask R-CNN. In IEEE International Conference on Computer Vision , ICCV, pages 2980-2988. IEEE Computer Society, Oct 2017.</list_item>
|
||||
<list_item><loc_260><loc_426><loc_457><loc_446>[13] Glenn Jocher, Alex Stoken, Ayush Chaurasia, Jirka Borovec, NanoCode012, TaoXie, Yonghye Kwon, Kalen Michael, Liu Changyu, Jiacong Fang, Abhiram V, Laughing, tkianai, yxNONG, Piotr Skalski, Adam Hogan, Jebastin Nadar, imyhxy, Lorenzo Mammana, Alex Wang, Cristi Fati, Diego Montes, Jan Hajek, Laurentiu</list_item>
|
||||
</unordered_list>
|
||||
<figure>
|
||||
<location><page_9><loc_9><loc_44><loc_91><loc_89></location>
|
||||
<caption>Text Caption List-Item Formula Table Section-Header Picture Page-Header Page-Footer Title</caption>
|
||||
</figure>
|
||||
<text><location><page_9><loc_9><loc_36><loc_91><loc_41></location>Figure 6: Example layout predictions on selected pages from the DocLayNet test-set. (A, D) exhibit favourable results on coloured backgrounds. (B, C) show accurate list-item and paragraph differentiation despite densely-spaced lines. (E) demonstrates good table and figure distinction. (F) shows predictions on a Chinese patent with multiple overlaps, label confusion and missing boxes.</text>
|
||||
<text><location><page_9><loc_11><loc_31><loc_48><loc_33></location>Diaconu, Mai Thanh Minh, Marc, albinxavi, fatih, oleg, and wanghao yang. ultralytics/yolov5: v6.0 - yolov5n nano models, roboflow integration, tensorflow export, opencv dnn support, October 2021.</text>
|
||||
<unordered_list>
|
||||
<list_item><location><page_9><loc_9><loc_28><loc_48><loc_30></location>[14] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. CoRR , abs/2005.12872, 2020.</list_item>
|
||||
<list_item><location><page_9><loc_9><loc_26><loc_48><loc_27></location>[15] Mingxing Tan, Ruoming Pang, and Quoc V. Le. Efficientdet: Scalable and efficient object detection. CoRR , abs/1911.09070, 2019.</list_item>
|
||||
<list_item><location><page_9><loc_9><loc_23><loc_48><loc_25></location>[16] Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona, Deva Ramanan, Piotr Dollár, and C. Lawrence Zitnick. Microsoft COCO: common objects in context, 2014.</list_item>
|
||||
<list_item><location><page_9><loc_9><loc_21><loc_48><loc_22></location>[17] Yuxin Wu, Alexander Kirillov, Francisco Massa, Wan-Yen Lo, and Ross Girshick. Detectron2, 2019.</list_item>
|
||||
<list_item><location><page_9><loc_9><loc_16><loc_48><loc_20></location>[18] Nikolaos Livathinos, Cesar Berrospi, Maksym Lysak, Viktor Kuropiatnyk, Ahmed Nassar, Andre Carvalho, Michele Dolfi, Christoph Auer, Kasper Dinkla, and Peter W. J. Staar. Robust pdf document conversion using recurrent neural networks. In Proceedings of the 35th Conference on Artificial Intelligence , AAAI, pages 1513715145, feb 2021.</list_item>
|
||||
<list_item><location><page_9><loc_9><loc_10><loc_48><loc_15></location>[19] Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, and Ming Zhou. Layoutlm: Pre-training of text and layout for document image understanding. In Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining , KDD, pages 1192-1200, New York, USA, 2020. Association for Computing Machinery.</list_item>
|
||||
<list_item><location><page_9><loc_52><loc_32><loc_91><loc_33></location>[20] Shoubin Li, Xuyan Ma, Shuaiqun Pan, Jun Hu, Lin Shi, and Qing Wang. Vtlayout: Fusion of visual and text features for document layout analysis, 2021.</list_item>
|
||||
<list_item><location><page_9><loc_52><loc_29><loc_91><loc_31></location>[21] Peng Zhang, Can Li, Liang Qiao, Zhanzhan Cheng, Shiliang Pu, Yi Niu, and Fei Wu. Vsr: A unified framework for document layout analysis combining vision, semantics and relations, 2021.</list_item>
|
||||
<list_item><location><page_9><loc_52><loc_25><loc_91><loc_28></location>[22] Peter W J Staar, Michele Dolfi, Christoph Auer, and Costas Bekas. Corpus conversion service: A machine learning platform to ingest documents at scale. In Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining , KDD, pages 774-782. ACM, 2018.</list_item>
|
||||
<list_item><location><page_9><loc_52><loc_23><loc_91><loc_24></location>[23] Connor Shorten and Taghi M. Khoshgoftaar. A survey on image data augmentation for deep learning. Journal of Big Data , 6(1):60, 2019.</list_item>
|
||||
</document>
|
||||
<page_break>
|
||||
<page_header><loc_44><loc_38><loc_284><loc_43>DocLayNet: A Large Human-Annotated Dataset for Document-Layout Analysis</page_header>
|
||||
<page_header><loc_299><loc_38><loc_456><loc_43>KDD ’22, August 14-18, 2022, Washington, DC, USA</page_header>
|
||||
<picture><loc_43><loc_53><loc_455><loc_279><caption><loc_51><loc_279><loc_260><loc_283>Text Caption List-Item Formula Table Section-Header Picture Page-Header Page-Footer Title</caption></picture>
|
||||
<text><loc_44><loc_293><loc_457><loc_319>Figure 6: Example layout predictions on selected pages from the DocLayNet test-set. (A, D) exhibit favourable results on coloured backgrounds. (B, C) show accurate list-item and paragraph differentiation despite densely-spaced lines. (E) demonstrates good table and figure distinction. (F) shows predictions on a Chinese patent with multiple overlaps, label confusion and missing boxes.</text>
|
||||
<text><loc_57><loc_333><loc_241><loc_347>Diaconu, Mai Thanh Minh, Marc, albinxavi, fatih, oleg, and wanghao yang. ultralytics/yolov5: v6.0 - yolov5n nano models, roboflow integration, tensorflow export, opencv dnn support, October 2021.</text>
|
||||
<unordered_list><list_item><loc_44><loc_348><loc_241><loc_362>[14] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. CoRR , abs/2005.12872, 2020.</list_item>
|
||||
<list_item><loc_44><loc_363><loc_240><loc_372>[15] Mingxing Tan, Ruoming Pang, and Quoc V. Le. Efficientdet: Scalable and efficient object detection. CoRR , abs/1911.09070, 2019.</list_item>
|
||||
<list_item><loc_44><loc_373><loc_241><loc_387>[16] Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona, Deva Ramanan, Piotr Dollár, and C. Lawrence Zitnick. Microsoft COCO: common objects in context, 2014.</list_item>
|
||||
<list_item><loc_44><loc_388><loc_241><loc_397>[17] Yuxin Wu, Alexander Kirillov, Francisco Massa, Wan-Yen Lo, and Ross Girshick. Detectron2, 2019.</list_item>
|
||||
<list_item><loc_44><loc_398><loc_241><loc_422>[18] Nikolaos Livathinos, Cesar Berrospi, Maksym Lysak, Viktor Kuropiatnyk, Ahmed Nassar, Andre Carvalho, Michele Dolfi, Christoph Auer, Kasper Dinkla, and Peter W. J. Staar. Robust pdf document conversion using recurrent neural networks. In Proceedings of the 35th Conference on Artificial Intelligence , AAAI, pages 1513715145, feb 2021.</list_item>
|
||||
<list_item><loc_44><loc_423><loc_241><loc_448>[19] Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, and Ming Zhou. Layoutlm: Pre-training of text and layout for document image understanding. In Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining , KDD, pages 1192-1200, New York, USA, 2020. Association for Computing Machinery.</list_item>
|
||||
<list_item><loc_260><loc_333><loc_457><loc_342>[20] Shoubin Li, Xuyan Ma, Shuaiqun Pan, Jun Hu, Lin Shi, and Qing Wang. Vtlayout: Fusion of visual and text features for document layout analysis, 2021.</list_item>
|
||||
<list_item><loc_260><loc_343><loc_457><loc_357>[21] Peng Zhang, Can Li, Liang Qiao, Zhanzhan Cheng, Shiliang Pu, Yi Niu, and Fei Wu. Vsr: A unified framework for document layout analysis combining vision, semantics and relations, 2021.</list_item>
|
||||
<list_item><loc_260><loc_358><loc_457><loc_377>[22] Peter W J Staar, Michele Dolfi, Christoph Auer, and Costas Bekas. Corpus conversion service: A machine learning platform to ingest documents at scale. In Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining , KDD, pages 774-782. ACM, 2018.</list_item>
|
||||
<list_item><loc_260><loc_378><loc_457><loc_387>[23] Connor Shorten and Taghi M. Khoshgoftaar. A survey on image data augmentation for deep learning. Journal of Big Data , 6(1):60, 2019.</list_item>
|
||||
</unordered_list>
|
||||
</doctag>
|
@ -1,19 +1,10 @@
|
||||
<document>
|
||||
<text><location><page_1><loc_22><loc_81><loc_79><loc_85></location>order to compute the TED score. Inference timing results for all experiments were obtained from the same machine on a single core with AMD EPYC 7763 CPU @2.45 GHz.</text>
|
||||
<section_header_level_1><location><page_1><loc_22><loc_77><loc_52><loc_79></location>5.1 Hyper Parameter Optimization</section_header_level_1>
|
||||
<text><location><page_1><loc_22><loc_68><loc_79><loc_77></location>We have chosen the PubTabNet data set to perform HPO, since it includes a highly diverse set of tables. Also we report TED scores separately for simple and complex tables (tables with cell spans). Results are presented in Table. 1. It is evident that with OTSL, our model achieves the same TED score and slightly better mAP scores in comparison to HTML. However OTSL yields a 2x speed up in the inference runtime over HTML.</text>
|
||||
<table>
|
||||
<location><page_1><loc_23><loc_41><loc_78><loc_57></location>
|
||||
<caption>Table 1. HPO performed in OTSL and HTML representation on the same transformer-based TableFormer [9] architecture, trained only on PubTabNet [22]. Effects of reducing the # of layers in encoder and decoder stages of the model show that smaller models trained on OTSL perform better, especially in recognizing complex table structures, and maintain a much higher mAP score than the HTML counterpart.</caption>
|
||||
<row_0><col_0><col_header>#</col_0><col_1><col_header>#</col_1><col_2><col_header>Language</col_2><col_3><col_header>TEDs</col_3><col_4><col_header>TEDs</col_4><col_5><col_header>TEDs</col_5><col_6><col_header>mAP</col_6><col_7><col_header>Inference</col_7></row_0>
|
||||
<row_1><col_0><col_header>enc-layers</col_0><col_1><col_header>dec-layers</col_1><col_2><col_header>Language</col_2><col_3><col_header>simple</col_3><col_4><col_header>complex</col_4><col_5><col_header>all</col_5><col_6><col_header>(0.75)</col_6><col_7><col_header>time (secs)</col_7></row_1>
|
||||
<row_2><col_0><body>6</col_0><col_1><body>6</col_1><col_2><body>OTSL HTML</col_2><col_3><body>0.965 0.969</col_3><col_4><body>0.934 0.927</col_4><col_5><body>0.955 0.955</col_5><col_6><body>0.88 0.857</col_6><col_7><body>2.73 5.39</col_7></row_2>
|
||||
<row_3><col_0><body>4</col_0><col_1><body>4</col_1><col_2><body>OTSL HTML</col_2><col_3><body>0.938</col_3><col_4><body>0.904</col_4><col_5><body>0.927</col_5><col_6><body>0.853</col_6><col_7><body>1.97</col_7></row_3>
|
||||
<row_4><col_0><body></col_0><col_1><body></col_1><col_2><body>OTSL</col_2><col_3><body>0.952 0.923</col_3><col_4><body>0.909</col_4><col_5><body>0.938</col_5><col_6><body>0.843</col_6><col_7><body>3.77</col_7></row_4>
|
||||
<row_5><col_0><body>2</col_0><col_1><body>4</col_1><col_2><body>HTML</col_2><col_3><body>0.945</col_3><col_4><body>0.897 0.901</col_4><col_5><body>0.915 0.931</col_5><col_6><body>0.859 0.834</col_6><col_7><body>1.91 3.81</col_7></row_5>
|
||||
<row_6><col_0><body>4</col_0><col_1><body>2</col_1><col_2><body>OTSL HTML</col_2><col_3><body>0.952 0.944</col_3><col_4><body>0.92 0.903</col_4><col_5><body>0.942 0.931</col_5><col_6><body>0.857 0.824</col_6><col_7><body>1.22 2</col_7></row_6>
|
||||
</table>
|
||||
<section_header_level_1><location><page_1><loc_22><loc_35><loc_43><loc_36></location>5.2 Quantitative Results</section_header_level_1>
|
||||
<text><location><page_1><loc_22><loc_22><loc_79><loc_34></location>We picked the model parameter configuration that produced the best prediction quality (enc=6, dec=6, heads=8) with PubTabNet alone, then independently trained and evaluated it on three publicly available data sets: PubTabNet (395k samples), FinTabNet (113k samples) and PubTables-1M (about 1M samples). Performance results are presented in Table. 2. It is clearly evident that the model trained on OTSL outperforms HTML across the board, keeping high TEDs and mAP scores even on difficult financial tables (FinTabNet) that contain sparse and large tables.</text>
|
||||
<text><location><page_1><loc_22><loc_16><loc_79><loc_22></location>Additionally, the results show that OTSL has an advantage over HTML when applied on a bigger data set like PubTables-1M and achieves significantly improved scores. Finally, OTSL achieves faster inference due to fewer decoding steps which is a result of the reduced sequence representation.</text>
|
||||
</document>
|
||||
<doctag><page_header><loc_159><loc_58><loc_366><loc_65>Optimized Table Tokenization for Table Structure Recognition</page_header>
|
||||
<page_header><loc_389><loc_58><loc_393><loc_65>9</page_header>
|
||||
<text><loc_110><loc_74><loc_393><loc_97>order to compute the TED score. Inference timing results for all experiments were obtained from the same machine on a single core with AMD EPYC 7763 CPU @2.45 GHz.</text>
|
||||
<section_header_level_1><loc_110><loc_105><loc_260><loc_113>5.1 Hyper Parameter Optimization</section_header_level_1>
|
||||
<text><loc_110><loc_116><loc_393><loc_161>We have chosen the PubTabNet data set to perform HPO, since it includes a highly diverse set of tables. Also we report TED scores separately for simple and complex tables (tables with cell spans). Results are presented in Table. 1. It is evident that with OTSL, our model achieves the same TED score and slightly better mAP scores in comparison to HTML. However OTSL yields a 2x speed up in the inference runtime over HTML.</text>
|
||||
<otsl><loc_114><loc_213><loc_388><loc_296><ched>#<ched>#<ched>Language<ched>TEDs<lcel><lcel><ched>mAP<ched>Inference<nl><ched>enc-layers<ched>dec-layers<ucel><ched>simple<ched>complex<ched>all<ched>(0.75)<ched>time (secs)<nl><fcel>6<fcel>6<fcel>OTSL HTML<fcel>0.965 0.969<fcel>0.934 0.927<fcel>0.955 0.955<fcel>0.88 0.857<fcel>2.73 5.39<nl><fcel>4<fcel>4<fcel>OTSL HTML<fcel>0.938<fcel>0.904<fcel>0.927<fcel>0.853<fcel>1.97<nl><ecel><ecel><fcel>OTSL<fcel>0.952 0.923<fcel>0.909<fcel>0.938<fcel>0.843<fcel>3.77<nl><fcel>2<fcel>4<fcel>HTML<fcel>0.945<fcel>0.897 0.901<fcel>0.915 0.931<fcel>0.859 0.834<fcel>1.91 3.81<nl><fcel>4<fcel>2<fcel>OTSL HTML<fcel>0.952 0.944<fcel>0.92 0.903<fcel>0.942 0.931<fcel>0.857 0.824<fcel>1.22 2<nl><caption><loc_110><loc_172><loc_393><loc_207>Table 1. HPO performed in OTSL and HTML representation on the same transformer-based TableFormer [9] architecture, trained only on PubTabNet [22]. Effects of reducing the # of layers in encoder and decoder stages of the model show that smaller models trained on OTSL perform better, especially in recognizing complex table structures, and maintain a much higher mAP score than the HTML counterpart.</caption></otsl>
|
||||
<section_header_level_1><loc_110><loc_319><loc_216><loc_327>5.2 Quantitative Results</section_header_level_1>
|
||||
<text><loc_110><loc_330><loc_393><loc_390>We picked the model parameter configuration that produced the best prediction quality (enc=6, dec=6, heads=8) with PubTabNet alone, then independently trained and evaluated it on three publicly available data sets: PubTabNet (395k samples), FinTabNet (113k samples) and PubTables-1M (about 1M samples). Performance results are presented in Table. 2. It is clearly evident that the model trained on OTSL outperforms HTML across the board, keeping high TEDs and mAP scores even on difficult financial tables (FinTabNet) that contain sparse and large tables.</text>
|
||||
<text><loc_110><loc_390><loc_393><loc_421>Additionally, the results show that OTSL has an advantage over HTML when applied on a bigger data set like PubTables-1M and achieves significantly improved scores. Finally, OTSL achieves faster inference due to fewer decoding steps which is a result of the reduced sequence representation.</text>
|
||||
</doctag>
|
@ -1,154 +1,149 @@
|
||||
<document>
|
||||
<section_header_level_1><location><page_1><loc_22><loc_82><loc_79><loc_85></location>Optimized Table Tokenization for Table Structure Recognition</section_header_level_1>
|
||||
<text><location><page_1><loc_23><loc_75><loc_78><loc_79></location>Maksym Lysak [0000 − 0002 − 3723 − $^{6960]}$, Ahmed Nassar[0000 − 0002 − 9468 − $^{0822]}$, Nikolaos Livathinos [0000 − 0001 − 8513 − $^{3491]}$, Christoph Auer[0000 − 0001 − 5761 − $^{0422]}$, [0000 − 0002 − 8088 − 0823]</text>
|
||||
<text><location><page_1><loc_38><loc_74><loc_49><loc_75></location>and Peter Staar</text>
|
||||
<text><location><page_1><loc_46><loc_72><loc_55><loc_73></location>IBM Research</text>
|
||||
<text><location><page_1><loc_36><loc_70><loc_64><loc_71></location>{mly,ahn,nli,cau,taa}@zurich.ibm.com</text>
|
||||
<text><location><page_1><loc_27><loc_41><loc_74><loc_66></location>Abstract. Extracting tables from documents is a crucial task in any document conversion pipeline. Recently, transformer-based models have demonstrated that table-structure can be recognized with impressive accuracy using Image-to-Markup-Sequence (Im2Seq) approaches. Taking only the image of a table, such models predict a sequence of tokens (e.g. in HTML, LaTeX) which represent the structure of the table. Since the token representation of the table structure has a significant impact on the accuracy and run-time performance of any Im2Seq model, we investigate in this paper how table-structure representation can be optimised. We propose a new, optimised table-structure language (OTSL) with a minimized vocabulary and specific rules. The benefits of OTSL are that it reduces the number of tokens to 5 (HTML needs 28+) and shortens the sequence length to half of HTML on average. Consequently, model accuracy improves significantly, inference time is halved compared to HTML-based models, and the predicted table structures are always syntactically correct. This in turn eliminates most post-processing needs. Popular table structure data-sets will be published in OTSL format to the community.</text>
|
||||
<text><location><page_1><loc_27><loc_37><loc_74><loc_40></location>Keywords: Table Structure Recognition · Data Representation · Transformers · Optimization.</text>
|
||||
<section_header_level_1><location><page_1><loc_22><loc_33><loc_37><loc_34></location>1 Introduction</section_header_level_1>
|
||||
<text><location><page_1><loc_22><loc_21><loc_79><loc_31></location>Tables are ubiquitous in documents such as scientific papers, patents, reports, manuals, specification sheets or marketing material. They often encode highly valuable information and therefore need to be extracted with high accuracy. Unfortunately, tables appear in documents in various sizes, styling and structure, making it difficult to recover their correct structure with simple analytical methods. Therefore, accurate table extraction is achieved these days with machine-learning based methods.</text>
|
||||
<text><location><page_1><loc_22><loc_16><loc_79><loc_20></location>In modern document understanding systems [1,15], table extraction is typically a two-step process. Firstly, every table on a page is located with a bounding box, and secondly, their logical row and column structure is recognized. As of</text>
|
||||
<figure>
|
||||
<location><page_2><loc_24><loc_46><loc_76><loc_74></location>
|
||||
<caption>Fig. 1. Comparison between HTML and OTSL table structure representation: (A) table-example with complex row and column headers, including a 2D empty span, (B) minimal graphical representation of table structure using rectangular layout, (C) HTML representation, (D) OTSL representation. This example demonstrates many of the key-features of OTSL, namely its reduced vocabulary size (12 versus 5 in this case), its reduced sequence length (55 versus 30) and a enhanced internal structure (variable token sequence length per row in HTML versus a fixed length of rows in OTSL).</caption>
|
||||
</figure>
|
||||
<text><location><page_2><loc_22><loc_34><loc_79><loc_43></location>today, table detection in documents is a well understood problem, and the latest state-of-the-art (SOTA) object detection methods provide an accuracy comparable to human observers [7,8,10,14,23]. On the other hand, the problem of table structure recognition (TSR) is a lot more challenging and remains a very active area of research, in which many novel machine learning algorithms are being explored [3,4,5,9,11,12,13,14,17,18,21,22].</text>
|
||||
<text><location><page_2><loc_22><loc_16><loc_79><loc_34></location>Recently emerging SOTA methods for table structure recognition employ transformer-based models, in which an image of the table is provided to the network in order to predict the structure of the table as a sequence of tokens. These image-to-sequence (Im2Seq) models are extremely powerful, since they allow for a purely data-driven solution. The tokens of the sequence typically belong to a markup language such as HTML, Latex or Markdown, which allow to describe table structure as rows, columns and spanning cells in various configurations. In Figure 1, we illustrate how HTML is used to represent the table-structure of a particular example table. Public table-structure data sets such as PubTabNet [22], and FinTabNet [21], which were created in a semi-automated way from paired PDF and HTML sources (e.g. PubMed Central), popularized primarily the use of HTML as ground-truth representation format for TSR.</text>
|
||||
<text><location><page_3><loc_22><loc_73><loc_79><loc_85></location>While the majority of research in TSR is currently focused on the development and application of novel neural model architectures, the table structure representation language (e.g. HTML in PubTabNet and FinTabNet) is usually adopted as is for the sequence tokenization in Im2Seq models. In this paper, we aim for the opposite and investigate the impact of the table structure representation language with an otherwise unmodified Im2Seq transformer-based architecture. Since the current state-of-the-art Im2Seq model is TableFormer [9], we select this model to perform our experiments.</text>
|
||||
<text><location><page_3><loc_22><loc_58><loc_79><loc_73></location>The main contribution of this paper is the introduction of a new optimised table structure language (OTSL), specifically designed to describe table-structure in an compact and structured way for Im2Seq models. OTSL has a number of key features, which make it very attractive to use in Im2Seq models. Specifically, compared to other languages such as HTML, OTSL has a minimized vocabulary which yields short sequence length, strong inherent structure (e.g. strict rectangular layout) and a strict syntax with rules that only look backwards. The latter allows for syntax validation during inference and ensures a syntactically correct table-structure. These OTSL features are illustrated in Figure 1, in comparison to HTML.</text>
|
||||
<text><location><page_3><loc_22><loc_45><loc_79><loc_58></location>The paper is structured as follows. In section 2, we give an overview of the latest developments in table-structure reconstruction. In section 3 we review the current HTML table encoding (popularised by PubTabNet and FinTabNet) and discuss its flaws. Subsequently, we introduce OTSL in section 4, which includes the language definition, syntax rules and error-correction procedures. In section 5, we apply OTSL on the TableFormer architecture, compare it to TableFormer models trained on HTML and ultimately demonstrate the advantages of using OTSL. Finally, in section 6 we conclude our work and outline next potential steps.</text>
|
||||
<section_header_level_1><location><page_3><loc_22><loc_40><loc_39><loc_42></location>2 Related Work</section_header_level_1>
|
||||
<text><location><page_3><loc_22><loc_16><loc_79><loc_38></location>Approaches to formalize the logical structure and layout of tables in electronic documents date back more than two decades [16]. In the recent past, a wide variety of computer vision methods have been explored to tackle the problem of table structure recognition, i.e. the correct identification of columns, rows and spanning cells in a given table. Broadly speaking, the current deeplearning based approaches fall into three categories: object detection (OD) methods, Graph-Neural-Network (GNN) methods and Image-to-Markup-Sequence (Im2Seq) methods. Object-detection based methods [11,12,13,14,21] rely on tablestructure annotation using (overlapping) bounding boxes for training, and produce bounding-box predictions to define table cells, rows, and columns on a table image. Graph Neural Network (GNN) based methods [3,6,17,18], as the name suggests, represent tables as graph structures. The graph nodes represent the content of each table cell, an embedding vector from the table image, or geometric coordinates of the table cell. The edges of the graph define the relationship between the nodes, e.g. if they belong to the same column, row, or table cell.</text>
|
||||
<text><location><page_4><loc_22><loc_67><loc_79><loc_85></location>Other work [20] aims at predicting a grid for each table and deciding which cells must be merged using an attention network. Im2Seq methods cast the problem as a sequence generation task [4,5,9,22], and therefore need an internal tablestructure representation language, which is often implemented with standard markup languages (e.g. HTML, LaTeX, Markdown). In theory, Im2Seq methods have a natural advantage over the OD and GNN methods by virtue of directly predicting the table-structure. As such, no post-processing or rules are needed in order to obtain the table-structure, which is necessary with OD and GNN approaches. In practice, this is not entirely true, because a predicted sequence of table-structure markup does not necessarily have to be syntactically correct. Hence, depending on the quality of the predicted sequence, some post-processing needs to be performed to ensure a syntactically valid (let alone correct) sequence.</text>
|
||||
<text><location><page_4><loc_22><loc_39><loc_79><loc_67></location>Within the Im2Seq method, we find several popular models, namely the encoder-dual-decoder model (EDD) [22], TableFormer [9], Tabsplitter[2] and Ye et. al. [19]. EDD uses two consecutive long short-term memory (LSTM) decoders to predict a table in HTML representation. The tag decoder predicts a sequence of HTML tags. For each decoded table cell ( <td> ), the attention is passed to the cell decoder to predict the content with an embedded OCR approach. The latter makes it susceptible to transcription errors in the cell content of the table. TableFormer address this reliance on OCR and uses two transformer decoders for HTML structure and cell bounding box prediction in an end-to-end architecture. The predicted cell bounding box is then used to extract text tokens from an originating (digital) PDF page, circumventing any need for OCR. TabSplitter [2] proposes a compact double-matrix representation of table rows and columns to do error detection and error correction of HTML structure sequences based on predictions from [19]. This compact double-matrix representation can not be used directly by the Img2seq model training, so the model uses HTML as an intermediate form. Chi et. al. [4] introduce a data set and a baseline method using bidirectional LSTMs to predict LaTeX code. Kayal [5] introduces Gated ResNet transformers to predict LaTeX code, and a separate OCR module to extract content.</text>
|
||||
<text><location><page_4><loc_22><loc_26><loc_79><loc_38></location>Im2Seq approaches have shown to be well-suited for the TSR task and allow a full end-to-end network design that can output the final table structure without pre- or post-processing logic. Furthermore, Im2Seq models have demonstrated to deliver state-of-the-art prediction accuracy [9]. This motivated the authors to investigate if the performance (both in accuracy and inference time) can be further improved by optimising the table structure representation language. We believe this is a necessary step before further improving neural network architectures for this task.</text>
|
||||
<section_header_level_1><location><page_4><loc_22><loc_22><loc_44><loc_24></location>3 Problem Statement</section_header_level_1>
|
||||
<text><location><page_4><loc_22><loc_16><loc_79><loc_20></location>All known Im2Seq based models for TSR fundamentally work in similar ways. Given an image of a table, the Im2Seq model predicts the structure of the table by generating a sequence of tokens. These tokens originate from a finite vocab-</text>
|
||||
<text><location><page_5><loc_22><loc_76><loc_79><loc_85></location>ulary and can be interpreted as a table structure. For example, with the HTML tokens <table> , </table> , <tr> , </tr> , <td> and </td> , one can construct simple table structures without any spanning cells. In reality though, one needs at least 28 HTML tokens to describe the most common complex tables observed in real-world documents [21,22], due to a variety of spanning cells definitions in the HTML token vocabulary.</text>
|
||||
<figure>
|
||||
<location><page_5><loc_22><loc_57><loc_78><loc_71></location>
|
||||
<caption>Fig. 2. Frequency of tokens in HTML and OTSL as they appear in PubTabNet.</caption>
|
||||
</figure>
|
||||
<text><location><page_5><loc_22><loc_33><loc_79><loc_54></location>Obviously, HTML and other general-purpose markup languages were not designed for Im2Seq models. As such, they have some serious drawbacks. First, the token vocabulary needs to be artificially large in order to describe all plausible tabular structures. Since most Im2Seq models use an autoregressive approach, they generate the sequence token by token. Therefore, to reduce inference time, a shorter sequence length is critical. Every table-cell is represented by at least two tokens ( <td> and </td> ). Furthermore, when tokenizing the HTML structure, one needs to explicitly enumerate possible column-spans and row-spans as words. In practice, this ends up requiring 28 different HTML tokens (when including column- and row-spans up to 10 cells) just to describe every table in the PubTabNet dataset. Clearly, not every token is equally represented, as is depicted in Figure 2. This skewed distribution of tokens in combination with variable token row-length makes it challenging for models to learn the HTML structure.</text>
|
||||
<text><location><page_5><loc_22><loc_27><loc_79><loc_32></location>Additionally, it would be desirable if the representation would easily allow an early detection of invalid sequences on-the-go, before the prediction of the entire table structure is completed. HTML is not well-suited for this purpose as the verification of incomplete sequences is non-trivial or even impossible.</text>
|
||||
<text><location><page_5><loc_22><loc_16><loc_79><loc_26></location>In a valid HTML table, the token sequence must describe a 2D grid of table cells, serialised in row-major ordering, where each row and each column have the same length (while considering row- and column-spans). Furthermore, every opening tag in HTML needs to be matched by a closing tag in a correct hierarchical manner. Since the number of tokens for each table row and column can vary significantly, especially for large tables with many row- and column-spans, it is complex to verify the consistency of predicted structures during sequence</text>
|
||||
<text><location><page_6><loc_22><loc_82><loc_79><loc_85></location>generation. Implicitly, this also means that Im2Seq models need to learn these complex syntax rules, simply to deliver valid output.</text>
|
||||
<text><location><page_6><loc_22><loc_63><loc_79><loc_82></location>In practice, we observe two major issues with prediction quality when training Im2Seq models on HTML table structure generation from images. On the one hand, we find that on large tables, the visual attention of the model often starts to drift and is not accurately moving forward cell by cell anymore. This manifests itself in either in an increasing location drift for proposed table-cells in later rows on the same column or even complete loss of vertical alignment, as illustrated in Figure 5. Addressing this with post-processing is partially possible, but clearly undesired. On the other hand, we find many instances of predictions with structural inconsistencies or plain invalid HTML output, as shown in Figure 6, which are nearly impossible to properly correct. Both problems seriously impact the TSR model performance, since they reflect not only in the task of pure structure recognition but also in the equally crucial recognition or matching of table cell content.</text>
|
||||
<section_header_level_1><location><page_6><loc_22><loc_58><loc_61><loc_60></location>4 Optimised Table Structure Language</section_header_level_1>
|
||||
<text><location><page_6><loc_22><loc_44><loc_79><loc_56></location>To mitigate the issues with HTML in Im2Seq-based TSR models laid out before, we propose here our Optimised Table Structure Language (OTSL). OTSL is designed to express table structure with a minimized vocabulary and a simple set of rules, which are both significantly reduced compared to HTML. At the same time, OTSL enables easy error detection and correction during sequence generation. We further demonstrate how the compact structure representation and minimized sequence length improves prediction accuracy and inference time in the TableFormer architecture.</text>
|
||||
<section_header_level_1><location><page_6><loc_22><loc_40><loc_43><loc_41></location>4.1 Language Definition</section_header_level_1>
|
||||
<text><location><page_6><loc_22><loc_34><loc_79><loc_38></location>In Figure 3, we illustrate how the OTSL is defined. In essence, the OTSL defines only 5 tokens that directly describe a tabular structure based on an atomic 2D grid.</text>
|
||||
<text><location><page_6><loc_24><loc_33><loc_67><loc_34></location>The OTSL vocabulary is comprised of the following tokens:</text>
|
||||
<unordered_list>
|
||||
<list_item><location><page_6><loc_23><loc_30><loc_75><loc_31></location>-"C" cell a new table cell that either has or does not have cell content</list_item>
|
||||
<list_item><location><page_6><loc_23><loc_27><loc_79><loc_29></location>-"L" cell left-looking cell , merging with the left neighbor cell to create a span</list_item>
|
||||
<list_item><location><page_6><loc_23><loc_24><loc_79><loc_26></location>-"U" cell up-looking cell , merging with the upper neighbor cell to create a span</list_item>
|
||||
<list_item><location><page_6><loc_23><loc_22><loc_74><loc_23></location>-"X" cell cross cell , to merge with both left and upper neighbor cells</list_item>
|
||||
<list_item><location><page_6><loc_23><loc_20><loc_54><loc_21></location>-"NL" new-line , switch to the next row.</list_item>
|
||||
<doctag><page_header><loc_15><loc_132><loc_30><loc_350>arXiv:2305.03393v1 [cs.CV] 5 May 2023</page_header>
|
||||
<section_header_level_1><loc_110><loc_73><loc_393><loc_92>Optimized Table Tokenization for Table Structure Recognition</section_header_level_1>
|
||||
<text><loc_114><loc_107><loc_389><loc_126>Maksym Lysak [0000 − 0002 − 3723 − $^{6960]}$, Ahmed Nassar[0000 − 0002 − 9468 − $^{0822]}$, Nikolaos Livathinos [0000 − 0001 − 8513 − $^{3491]}$, Christoph Auer[0000 − 0001 − 5761 − $^{0422]}$, [0000 − 0002 − 8088 − 0823]</text>
|
||||
<text><loc_188><loc_123><loc_244><loc_129>and Peter Staar</text>
|
||||
<text><loc_228><loc_137><loc_275><loc_142>IBM Research</text>
|
||||
<text><loc_182><loc_144><loc_321><loc_149>{mly,ahn,nli,cau,taa}@zurich.ibm.com</text>
|
||||
<text><loc_133><loc_171><loc_369><loc_293>Abstract. Extracting tables from documents is a crucial task in any document conversion pipeline. Recently, transformer-based models have demonstrated that table-structure can be recognized with impressive accuracy using Image-to-Markup-Sequence (Im2Seq) approaches. Taking only the image of a table, such models predict a sequence of tokens (e.g. in HTML, LaTeX) which represent the structure of the table. Since the token representation of the table structure has a significant impact on the accuracy and run-time performance of any Im2Seq model, we investigate in this paper how table-structure representation can be optimised. We propose a new, optimised table-structure language (OTSL) with a minimized vocabulary and specific rules. The benefits of OTSL are that it reduces the number of tokens to 5 (HTML needs 28+) and shortens the sequence length to half of HTML on average. Consequently, model accuracy improves significantly, inference time is halved compared to HTML-based models, and the predicted table structures are always syntactically correct. This in turn eliminates most post-processing needs. Popular table structure data-sets will be published in OTSL format to the community.</text>
|
||||
<text><loc_133><loc_302><loc_369><loc_314>Keywords: Table Structure Recognition · Data Representation · Transformers · Optimization.</text>
|
||||
<section_header_level_1><loc_110><loc_330><loc_187><loc_336>1 Introduction</section_header_level_1>
|
||||
<text><loc_110><loc_346><loc_393><loc_397>Tables are ubiquitous in documents such as scientific papers, patents, reports, manuals, specification sheets or marketing material. They often encode highly valuable information and therefore need to be extracted with high accuracy. Unfortunately, tables appear in documents in various sizes, styling and structure, making it difficult to recover their correct structure with simple analytical methods. Therefore, accurate table extraction is achieved these days with machine-learning based methods.</text>
|
||||
<text><loc_110><loc_399><loc_393><loc_420>In modern document understanding systems [1,15], table extraction is typically a two-step process. Firstly, every table on a page is located with a bounding box, and secondly, their logical row and column structure is recognized. As of</text>
|
||||
<page_break>
|
||||
<page_header><loc_110><loc_59><loc_114><loc_64>2</page_header>
|
||||
<page_header><loc_137><loc_59><loc_189><loc_64>M. Lysak, et al.</page_header>
|
||||
<picture><loc_121><loc_132><loc_379><loc_269><caption><loc_110><loc_80><loc_393><loc_126>Fig. 1. Comparison between HTML and OTSL table structure representation: (A) table-example with complex row and column headers, including a 2D empty span, (B) minimal graphical representation of table structure using rectangular layout, (C) HTML representation, (D) OTSL representation. This example demonstrates many of the key-features of OTSL, namely its reduced vocabulary size (12 versus 5 in this case), its reduced sequence length (55 versus 30) and a enhanced internal structure (variable token sequence length per row in HTML versus a fixed length of rows in OTSL).</caption></picture>
|
||||
<text><loc_110><loc_286><loc_393><loc_329>today, table detection in documents is a well understood problem, and the latest state-of-the-art (SOTA) object detection methods provide an accuracy comparable to human observers [7,8,10,14,23]. On the other hand, the problem of table structure recognition (TSR) is a lot more challenging and remains a very active area of research, in which many novel machine learning algorithms are being explored [3,4,5,9,11,12,13,14,17,18,21,22].</text>
|
||||
<text><loc_110><loc_331><loc_393><loc_420>Recently emerging SOTA methods for table structure recognition employ transformer-based models, in which an image of the table is provided to the network in order to predict the structure of the table as a sequence of tokens. These image-to-sequence (Im2Seq) models are extremely powerful, since they allow for a purely data-driven solution. The tokens of the sequence typically belong to a markup language such as HTML, Latex or Markdown, which allow to describe table structure as rows, columns and spanning cells in various configurations. In Figure 1, we illustrate how HTML is used to represent the table-structure of a particular example table. Public table-structure data sets such as PubTabNet [22], and FinTabNet [21], which were created in a semi-automated way from paired PDF and HTML sources (e.g. PubMed Central), popularized primarily the use of HTML as ground-truth representation format for TSR.</text>
|
||||
<page_break>
|
||||
<page_header><loc_159><loc_59><loc_366><loc_64>Optimized Table Tokenization for Table Structure Recognition</page_header>
|
||||
<page_header><loc_389><loc_59><loc_393><loc_64>3</page_header>
|
||||
<text><loc_110><loc_75><loc_393><loc_133>While the majority of research in TSR is currently focused on the development and application of novel neural model architectures, the table structure representation language (e.g. HTML in PubTabNet and FinTabNet) is usually adopted as is for the sequence tokenization in Im2Seq models. In this paper, we aim for the opposite and investigate the impact of the table structure representation language with an otherwise unmodified Im2Seq transformer-based architecture. Since the current state-of-the-art Im2Seq model is TableFormer [9], we select this model to perform our experiments.</text>
|
||||
<text><loc_110><loc_136><loc_393><loc_209>The main contribution of this paper is the introduction of a new optimised table structure language (OTSL), specifically designed to describe table-structure in an compact and structured way for Im2Seq models. OTSL has a number of key features, which make it very attractive to use in Im2Seq models. Specifically, compared to other languages such as HTML, OTSL has a minimized vocabulary which yields short sequence length, strong inherent structure (e.g. strict rectangular layout) and a strict syntax with rules that only look backwards. The latter allows for syntax validation during inference and ensures a syntactically correct table-structure. These OTSL features are illustrated in Figure 1, in comparison to HTML.</text>
|
||||
<text><loc_110><loc_211><loc_393><loc_277>The paper is structured as follows. In section 2, we give an overview of the latest developments in table-structure reconstruction. In section 3 we review the current HTML table encoding (popularised by PubTabNet and FinTabNet) and discuss its flaws. Subsequently, we introduce OTSL in section 4, which includes the language definition, syntax rules and error-correction procedures. In section 5, we apply OTSL on the TableFormer architecture, compare it to TableFormer models trained on HTML and ultimately demonstrate the advantages of using OTSL. Finally, in section 6 we conclude our work and outline next potential steps.</text>
|
||||
<section_header_level_1><loc_110><loc_292><loc_193><loc_298>2 Related Work</section_header_level_1>
|
||||
<text><loc_110><loc_309><loc_396><loc_420>Approaches to formalize the logical structure and layout of tables in electronic documents date back more than two decades [16]. In the recent past, a wide variety of computer vision methods have been explored to tackle the problem of table structure recognition, i.e. the correct identification of columns, rows and spanning cells in a given table. Broadly speaking, the current deeplearning based approaches fall into three categories: object detection (OD) methods, Graph-Neural-Network (GNN) methods and Image-to-Markup-Sequence (Im2Seq) methods. Object-detection based methods [11,12,13,14,21] rely on tablestructure annotation using (overlapping) bounding boxes for training, and produce bounding-box predictions to define table cells, rows, and columns on a table image. Graph Neural Network (GNN) based methods [3,6,17,18], as the name suggests, represent tables as graph structures. The graph nodes represent the content of each table cell, an embedding vector from the table image, or geometric coordinates of the table cell. The edges of the graph define the relationship between the nodes, e.g. if they belong to the same column, row, or table cell.</text>
|
||||
<page_break>
|
||||
<page_header><loc_110><loc_59><loc_114><loc_64>4</page_header>
|
||||
<page_header><loc_137><loc_59><loc_189><loc_64>M. Lysak, et al.</page_header>
|
||||
<text><loc_110><loc_75><loc_393><loc_164>Other work [20] aims at predicting a grid for each table and deciding which cells must be merged using an attention network. Im2Seq methods cast the problem as a sequence generation task [4,5,9,22], and therefore need an internal tablestructure representation language, which is often implemented with standard markup languages (e.g. HTML, LaTeX, Markdown). In theory, Im2Seq methods have a natural advantage over the OD and GNN methods by virtue of directly predicting the table-structure. As such, no post-processing or rules are needed in order to obtain the table-structure, which is necessary with OD and GNN approaches. In practice, this is not entirely true, because a predicted sequence of table-structure markup does not necessarily have to be syntactically correct. Hence, depending on the quality of the predicted sequence, some post-processing needs to be performed to ensure a syntactically valid (let alone correct) sequence.</text>
|
||||
<text><loc_110><loc_166><loc_393><loc_307>Within the Im2Seq method, we find several popular models, namely the encoder-dual-decoder model (EDD) [22], TableFormer [9], Tabsplitter[2] and Ye et. al. [19]. EDD uses two consecutive long short-term memory (LSTM) decoders to predict a table in HTML representation. The tag decoder predicts a sequence of HTML tags. For each decoded table cell ( <td> ), the attention is passed to the cell decoder to predict the content with an embedded OCR approach. The latter makes it susceptible to transcription errors in the cell content of the table. TableFormer address this reliance on OCR and uses two transformer decoders for HTML structure and cell bounding box prediction in an end-to-end architecture. The predicted cell bounding box is then used to extract text tokens from an originating (digital) PDF page, circumventing any need for OCR. TabSplitter [2] proposes a compact double-matrix representation of table rows and columns to do error detection and error correction of HTML structure sequences based on predictions from [19]. This compact double-matrix representation can not be used directly by the Img2seq model training, so the model uses HTML as an intermediate form. Chi et. al. [4] introduce a data set and a baseline method using bidirectional LSTMs to predict LaTeX code. Kayal [5] introduces Gated ResNet transformers to predict LaTeX code, and a separate OCR module to extract content.</text>
|
||||
<text><loc_110><loc_309><loc_393><loc_368>Im2Seq approaches have shown to be well-suited for the TSR task and allow a full end-to-end network design that can output the final table structure without pre- or post-processing logic. Furthermore, Im2Seq models have demonstrated to deliver state-of-the-art prediction accuracy [9]. This motivated the authors to investigate if the performance (both in accuracy and inference time) can be further improved by optimising the table structure representation language. We believe this is a necessary step before further improving neural network architectures for this task.</text>
|
||||
<section_header_level_1><loc_110><loc_382><loc_220><loc_389>3 Problem Statement</section_header_level_1>
|
||||
<text><loc_110><loc_399><loc_393><loc_420>All known Im2Seq based models for TSR fundamentally work in similar ways. Given an image of a table, the Im2Seq model predicts the structure of the table by generating a sequence of tokens. These tokens originate from a finite vocab-</text>
|
||||
<page_break>
|
||||
<page_header><loc_159><loc_59><loc_366><loc_64>Optimized Table Tokenization for Table Structure Recognition</page_header>
|
||||
<page_header><loc_389><loc_59><loc_393><loc_64>5</page_header>
|
||||
<text><loc_110><loc_75><loc_393><loc_118>ulary and can be interpreted as a table structure. For example, with the HTML tokens <table> , </table> , <tr> , </tr> , <td> and </td> , one can construct simple table structures without any spanning cells. In reality though, one needs at least 28 HTML tokens to describe the most common complex tables observed in real-world documents [21,22], due to a variety of spanning cells definitions in the HTML token vocabulary.</text>
|
||||
<picture><loc_112><loc_147><loc_389><loc_215><caption><loc_119><loc_140><loc_384><loc_145>Fig. 2. Frequency of tokens in HTML and OTSL as they appear in PubTabNet.</caption></picture>
|
||||
<text><loc_110><loc_232><loc_393><loc_336>Obviously, HTML and other general-purpose markup languages were not designed for Im2Seq models. As such, they have some serious drawbacks. First, the token vocabulary needs to be artificially large in order to describe all plausible tabular structures. Since most Im2Seq models use an autoregressive approach, they generate the sequence token by token. Therefore, to reduce inference time, a shorter sequence length is critical. Every table-cell is represented by at least two tokens ( <td> and </td> ). Furthermore, when tokenizing the HTML structure, one needs to explicitly enumerate possible column-spans and row-spans as words. In practice, this ends up requiring 28 different HTML tokens (when including column- and row-spans up to 10 cells) just to describe every table in the PubTabNet dataset. Clearly, not every token is equally represented, as is depicted in Figure 2. This skewed distribution of tokens in combination with variable token row-length makes it challenging for models to learn the HTML structure.</text>
|
||||
<text><loc_110><loc_338><loc_393><loc_367>Additionally, it would be desirable if the representation would easily allow an early detection of invalid sequences on-the-go, before the prediction of the entire table structure is completed. HTML is not well-suited for this purpose as the verification of incomplete sequences is non-trivial or even impossible.</text>
|
||||
<text><loc_110><loc_369><loc_393><loc_420>In a valid HTML table, the token sequence must describe a 2D grid of table cells, serialised in row-major ordering, where each row and each column have the same length (while considering row- and column-spans). Furthermore, every opening tag in HTML needs to be matched by a closing tag in a correct hierarchical manner. Since the number of tokens for each table row and column can vary significantly, especially for large tables with many row- and column-spans, it is complex to verify the consistency of predicted structures during sequence</text>
|
||||
<page_break>
|
||||
<page_header><loc_110><loc_59><loc_114><loc_64>6</page_header>
|
||||
<page_header><loc_137><loc_59><loc_189><loc_64>M. Lysak, et al.</page_header>
|
||||
<text><loc_110><loc_75><loc_393><loc_88>generation. Implicitly, this also means that Im2Seq models need to learn these complex syntax rules, simply to deliver valid output.</text>
|
||||
<text><loc_110><loc_91><loc_393><loc_187>In practice, we observe two major issues with prediction quality when training Im2Seq models on HTML table structure generation from images. On the one hand, we find that on large tables, the visual attention of the model often starts to drift and is not accurately moving forward cell by cell anymore. This manifests itself in either in an increasing location drift for proposed table-cells in later rows on the same column or even complete loss of vertical alignment, as illustrated in Figure 5. Addressing this with post-processing is partially possible, but clearly undesired. On the other hand, we find many instances of predictions with structural inconsistencies or plain invalid HTML output, as shown in Figure 6, which are nearly impossible to properly correct. Both problems seriously impact the TSR model performance, since they reflect not only in the task of pure structure recognition but also in the equally crucial recognition or matching of table cell content.</text>
|
||||
<section_header_level_1><loc_110><loc_202><loc_304><loc_209>4 Optimised Table Structure Language</section_header_level_1>
|
||||
<text><loc_110><loc_220><loc_393><loc_279>To mitigate the issues with HTML in Im2Seq-based TSR models laid out before, we propose here our Optimised Table Structure Language (OTSL). OTSL is designed to express table structure with a minimized vocabulary and a simple set of rules, which are both significantly reduced compared to HTML. At the same time, OTSL enables easy error detection and correction during sequence generation. We further demonstrate how the compact structure representation and minimized sequence length improves prediction accuracy and inference time in the TableFormer architecture.</text>
|
||||
<section_header_level_1><loc_110><loc_294><loc_214><loc_300>4.1 Language Definition</section_header_level_1>
|
||||
<text><loc_110><loc_309><loc_393><loc_329>In Figure 3, we illustrate how the OTSL is defined. In essence, the OTSL defines only 5 tokens that directly describe a tabular structure based on an atomic 2D grid.</text>
|
||||
<text><loc_122><loc_332><loc_334><loc_337>The OTSL vocabulary is comprised of the following tokens:</text>
|
||||
<unordered_list><list_item><loc_115><loc_346><loc_376><loc_352>-"C" cell a new table cell that either has or does not have cell content</list_item>
|
||||
<list_item><loc_115><loc_354><loc_393><loc_367>-"L" cell left-looking cell , merging with the left neighbor cell to create a span</list_item>
|
||||
<list_item><loc_115><loc_369><loc_393><loc_382>-"U" cell up-looking cell , merging with the upper neighbor cell to create a span</list_item>
|
||||
<list_item><loc_115><loc_385><loc_371><loc_390>-"X" cell cross cell , to merge with both left and upper neighbor cells</list_item>
|
||||
<list_item><loc_115><loc_393><loc_268><loc_398>-"NL" new-line , switch to the next row.</list_item>
|
||||
</unordered_list>
|
||||
<text><location><page_6><loc_22><loc_16><loc_79><loc_19></location>A notable attribute of OTSL is that it has the capability of achieving lossless conversion to HTML.</text>
|
||||
<figure>
|
||||
<location><page_7><loc_27><loc_65><loc_73><loc_79></location>
|
||||
<caption>Fig. 3. OTSL description of table structure: A - table example; B - graphical representation of table structure; C - mapping structure on a grid; D - OTSL structure encoding; E - explanation on cell encoding</caption>
|
||||
</figure>
|
||||
<section_header_level_1><location><page_7><loc_22><loc_60><loc_40><loc_61></location>4.2 Language Syntax</section_header_level_1>
|
||||
<text><location><page_7><loc_22><loc_58><loc_59><loc_59></location>The OTSL representation follows these syntax rules:</text>
|
||||
<unordered_list>
|
||||
<list_item><location><page_7><loc_23><loc_54><loc_79><loc_56></location>1. Left-looking cell rule : The left neighbour of an "L" cell must be either another "L" cell or a "C" cell.</list_item>
|
||||
<list_item><location><page_7><loc_23><loc_51><loc_79><loc_53></location>2. Up-looking cell rule : The upper neighbour of a "U" cell must be either another "U" cell or a "C" cell.</list_item>
|
||||
<text><loc_110><loc_407><loc_393><loc_420>A notable attribute of OTSL is that it has the capability of achieving lossless conversion to HTML.</text>
|
||||
<page_break>
|
||||
<page_header><loc_159><loc_59><loc_366><loc_64>Optimized Table Tokenization for Table Structure Recognition</page_header>
|
||||
<page_header><loc_389><loc_59><loc_393><loc_64>7</page_header>
|
||||
<picture><loc_135><loc_103><loc_367><loc_177><caption><loc_110><loc_79><loc_393><loc_98>Fig. 3. OTSL description of table structure: A - table example; B - graphical representation of table structure; C - mapping structure on a grid; D - OTSL structure encoding; E - explanation on cell encoding</caption></picture>
|
||||
<section_header_level_1><loc_110><loc_193><loc_202><loc_198>4.2 Language Syntax</section_header_level_1>
|
||||
<text><loc_110><loc_205><loc_297><loc_211>The OTSL representation follows these syntax rules:</text>
|
||||
<unordered_list><list_item><loc_114><loc_219><loc_393><loc_232>1. Left-looking cell rule : The left neighbour of an "L" cell must be either another "L" cell or a "C" cell.</list_item>
|
||||
<list_item><loc_114><loc_234><loc_393><loc_247>2. Up-looking cell rule : The upper neighbour of a "U" cell must be either another "U" cell or a "C" cell.</list_item>
|
||||
</unordered_list>
|
||||
<section_header_level_1><location><page_7><loc_23><loc_49><loc_37><loc_50></location>3. Cross cell rule :</section_header_level_1>
|
||||
<unordered_list>
|
||||
<list_item><location><page_7><loc_25><loc_44><loc_79><loc_49></location>The left neighbour of an "X" cell must be either another "X" cell or a "U" cell, and the upper neighbour of an "X" cell must be either another "X" cell or an "L" cell.</list_item>
|
||||
<list_item><location><page_7><loc_23><loc_43><loc_78><loc_44></location>4. First row rule : Only "L" cells and "C" cells are allowed in the first row.</list_item>
|
||||
<list_item><location><page_7><loc_23><loc_40><loc_79><loc_43></location>5. First column rule : Only "U" cells and "C" cells are allowed in the first column.</list_item>
|
||||
<list_item><location><page_7><loc_23><loc_37><loc_79><loc_40></location>6. Rectangular rule : The table representation is always rectangular - all rows must have an equal number of tokens, terminated with "NL" token.</list_item>
|
||||
<section_header_level_1><loc_114><loc_249><loc_185><loc_255>3. Cross cell rule :</section_header_level_1>
|
||||
<unordered_list><list_item><loc_124><loc_257><loc_393><loc_278>The left neighbour of an "X" cell must be either another "X" cell or a "U" cell, and the upper neighbour of an "X" cell must be either another "X" cell or an "L" cell.</list_item>
|
||||
<list_item><loc_114><loc_280><loc_388><loc_285>4. First row rule : Only "L" cells and "C" cells are allowed in the first row.</list_item>
|
||||
<list_item><loc_114><loc_287><loc_393><loc_300>5. First column rule : Only "U" cells and "C" cells are allowed in the first column.</list_item>
|
||||
<list_item><loc_114><loc_302><loc_393><loc_315>6. Rectangular rule : The table representation is always rectangular - all rows must have an equal number of tokens, terminated with "NL" token.</list_item>
|
||||
</unordered_list>
|
||||
<text><location><page_7><loc_22><loc_19><loc_79><loc_35></location>The application of these rules gives OTSL a set of unique properties. First of all, the OTSL enforces a strictly rectangular structure representation, where every new-line token starts a new row. As a consequence, all rows and all columns have exactly the same number of tokens, irrespective of cell spans. Secondly, the OTSL representation is unambiguous: Every table structure is represented in one way. In this representation every table cell corresponds to a "C"-cell token, which in case of spans is always located in the top-left corner of the table cell definition. Third, OTSL syntax rules are only backward-looking. As a consequence, every predicted token can be validated straight during sequence generation by looking at the previously predicted sequence. As such, OTSL can guarantee that every predicted sequence is syntactically valid.</text>
|
||||
<text><location><page_7><loc_22><loc_16><loc_79><loc_19></location>These characteristics can be easily learned by sequence generator networks, as we demonstrate further below. We find strong indications that this pattern</text>
|
||||
<text><location><page_8><loc_22><loc_82><loc_79><loc_85></location>reduces significantly the column drift seen in the HTML based models (see Figure 5).</text>
|
||||
<section_header_level_1><location><page_8><loc_22><loc_78><loc_52><loc_80></location>4.3 Error-detection and -mitigation</section_header_level_1>
|
||||
<text><location><page_8><loc_22><loc_62><loc_79><loc_77></location>The design of OTSL allows to validate a table structure easily on an unfinished sequence. The detection of an invalid sequence token is a clear indication of a prediction mistake, however a valid sequence by itself does not guarantee prediction correctness. Different heuristics can be used to correct token errors in an invalid sequence and thus increase the chances for accurate predictions. Such heuristics can be applied either after the prediction of each token, or at the end on the entire predicted sequence. For example a simple heuristic which can correct the predicted OTSL sequence on-the-fly is to verify if the token with the highest prediction confidence invalidates the predicted sequence, and replace it by the token with the next highest confidence until OTSL rules are satisfied.</text>
|
||||
<section_header_level_1><location><page_8><loc_22><loc_58><loc_37><loc_59></location>5 Experiments</section_header_level_1>
|
||||
<text><location><page_8><loc_22><loc_43><loc_79><loc_56></location>To evaluate the impact of OTSL on prediction accuracy and inference times, we conducted a series of experiments based on the TableFormer model (Figure 4) with two objectives: Firstly we evaluate the prediction quality and performance of OTSL vs. HTML after performing Hyper Parameter Optimization (HPO) on the canonical PubTabNet data set. Secondly we pick the best hyper-parameters found in the first step and evaluate how OTSL impacts the performance of TableFormer after training on other publicly available data sets (FinTabNet, PubTables-1M [14]). The ground truth (GT) from all data sets has been converted into OTSL format for this purpose, and will be made publicly available.</text>
|
||||
<figure>
|
||||
<location><page_8><loc_23><loc_25><loc_77><loc_36></location>
|
||||
<caption>Fig. 4. Architecture sketch of the TableFormer model, which is a representative for the Im2Seq approach.</caption>
|
||||
</figure>
|
||||
<text><location><page_8><loc_22><loc_16><loc_79><loc_22></location>We rely on standard metrics such as Tree Edit Distance score (TEDs) for table structure prediction, and Mean Average Precision (mAP) with 0.75 Intersection Over Union (IOU) threshold for the bounding-box predictions of table cells. The predicted OTSL structures were converted back to HTML format in</text>
|
||||
<text><location><page_9><loc_22><loc_81><loc_79><loc_85></location>order to compute the TED score. Inference timing results for all experiments were obtained from the same machine on a single core with AMD EPYC 7763 CPU @2.45 GHz.</text>
|
||||
<section_header_level_1><location><page_9><loc_22><loc_78><loc_52><loc_79></location>5.1 Hyper Parameter Optimization</section_header_level_1>
|
||||
<text><location><page_9><loc_22><loc_68><loc_79><loc_77></location>We have chosen the PubTabNet data set to perform HPO, since it includes a highly diverse set of tables. Also we report TED scores separately for simple and complex tables (tables with cell spans). Results are presented in Table. 1. It is evident that with OTSL, our model achieves the same TED score and slightly better mAP scores in comparison to HTML. However OTSL yields a 2x speed up in the inference runtime over HTML.</text>
|
||||
<table>
|
||||
<location><page_9><loc_23><loc_41><loc_78><loc_57></location>
|
||||
<caption>Table 1. HPO performed in OTSL and HTML representation on the same transformer-based TableFormer [9] architecture, trained only on PubTabNet [22]. Effects of reducing the # of layers in encoder and decoder stages of the model show that smaller models trained on OTSL perform better, especially in recognizing complex table structures, and maintain a much higher mAP score than the HTML counterpart.</caption>
|
||||
<row_0><col_0><col_header>#</col_0><col_1><col_header>#</col_1><col_2><col_header>Language</col_2><col_3><col_header>TEDs</col_3><col_4><col_header>TEDs</col_4><col_5><col_header>TEDs</col_5><col_6><col_header>mAP</col_6><col_7><col_header>Inference</col_7></row_0>
|
||||
<row_1><col_0><col_header>enc-layers</col_0><col_1><col_header>dec-layers</col_1><col_2><col_header>Language</col_2><col_3><col_header>simple</col_3><col_4><col_header>complex</col_4><col_5><col_header>all</col_5><col_6><col_header>(0.75)</col_6><col_7><col_header>time (secs)</col_7></row_1>
|
||||
<row_2><col_0><body>6</col_0><col_1><body>6</col_1><col_2><body>OTSL HTML</col_2><col_3><body>0.965 0.969</col_3><col_4><body>0.934 0.927</col_4><col_5><body>0.955 0.955</col_5><col_6><body>0.88 0.857</col_6><col_7><body>2.73 5.39</col_7></row_2>
|
||||
<row_3><col_0><body>4</col_0><col_1><body>4</col_1><col_2><body>OTSL HTML</col_2><col_3><body>0.938 0.952</col_3><col_4><body>0.904</col_4><col_5><body>0.927</col_5><col_6><body>0.853</col_6><col_7><body>1.97</col_7></row_3>
|
||||
<row_4><col_0><body>2</col_0><col_1><body>4</col_1><col_2><body>OTSL</col_2><col_3><body>0.923 0.945</col_3><col_4><body>0.909 0.897</col_4><col_5><body>0.938</col_5><col_6><body>0.843</col_6><col_7><body>3.77</col_7></row_4>
|
||||
<row_5><col_0><body></col_0><col_1><body></col_1><col_2><body>HTML</col_2><col_3><body></col_3><col_4><body>0.901</col_4><col_5><body>0.915 0.931</col_5><col_6><body>0.859 0.834</col_6><col_7><body>1.91 3.81</col_7></row_5>
|
||||
<row_6><col_0><body>4</col_0><col_1><body>2</col_1><col_2><body>OTSL HTML</col_2><col_3><body>0.952 0.944</col_3><col_4><body>0.92 0.903</col_4><col_5><body>0.942 0.931</col_5><col_6><body>0.857 0.824</col_6><col_7><body>1.22 2</col_7></row_6>
|
||||
</table>
|
||||
<section_header_level_1><location><page_9><loc_22><loc_35><loc_43><loc_36></location>5.2 Quantitative Results</section_header_level_1>
|
||||
<text><location><page_9><loc_22><loc_22><loc_79><loc_34></location>We picked the model parameter configuration that produced the best prediction quality (enc=6, dec=6, heads=8) with PubTabNet alone, then independently trained and evaluated it on three publicly available data sets: PubTabNet (395k samples), FinTabNet (113k samples) and PubTables-1M (about 1M samples). Performance results are presented in Table. 2. It is clearly evident that the model trained on OTSL outperforms HTML across the board, keeping high TEDs and mAP scores even on difficult financial tables (FinTabNet) that contain sparse and large tables.</text>
|
||||
<text><location><page_9><loc_22><loc_16><loc_79><loc_22></location>Additionally, the results show that OTSL has an advantage over HTML when applied on a bigger data set like PubTables-1M and achieves significantly improved scores. Finally, OTSL achieves faster inference due to fewer decoding steps which is a result of the reduced sequence representation.</text>
|
||||
<table>
|
||||
<location><page_10><loc_23><loc_67><loc_77><loc_80></location>
|
||||
<caption>Table 2. TSR and cell detection results compared between OTSL and HTML on the PubTabNet [22], FinTabNet [21] and PubTables-1M [14] data sets using TableFormer [9] (with enc=6, dec=6, heads=8).</caption>
|
||||
<row_0><col_0><body></col_0><col_1><col_header>Language</col_1><col_2><col_header>TEDs</col_2><col_3><col_header>TEDs</col_3><col_4><col_header>TEDs</col_4><col_5><col_header>mAP(0.75)</col_5><col_6><col_header>Inference time (secs)</col_6></row_0>
|
||||
<row_1><col_0><body></col_0><col_1><col_header>Language</col_1><col_2><col_header>simple</col_2><col_3><col_header>complex</col_3><col_4><col_header>all</col_4><col_5><col_header>mAP(0.75)</col_5><col_6><col_header>Inference time (secs)</col_6></row_1>
|
||||
<row_2><col_0><row_header>PubTabNet</col_0><col_1><row_header>OTSL</col_1><col_2><body>0.965</col_2><col_3><body>0.934</col_3><col_4><body>0.955</col_4><col_5><body>0.88</col_5><col_6><body>2.73</col_6></row_2>
|
||||
<row_3><col_0><row_header>PubTabNet</col_0><col_1><row_header>HTML</col_1><col_2><body>0.969</col_2><col_3><body>0.927</col_3><col_4><body>0.955</col_4><col_5><body>0.857</col_5><col_6><body>5.39</col_6></row_3>
|
||||
<row_4><col_0><row_header>FinTabNet</col_0><col_1><row_header>OTSL</col_1><col_2><body>0.955</col_2><col_3><body>0.961</col_3><col_4><body>0.959</col_4><col_5><body>0.862</col_5><col_6><body>1.85</col_6></row_4>
|
||||
<row_5><col_0><row_header>FinTabNet</col_0><col_1><row_header>HTML</col_1><col_2><body>0.917</col_2><col_3><body>0.922</col_3><col_4><body>0.92</col_4><col_5><body>0.722</col_5><col_6><body>3.26</col_6></row_5>
|
||||
<row_6><col_0><row_header>PubTables-1M</col_0><col_1><row_header>OTSL</col_1><col_2><body>0.987</col_2><col_3><body>0.964</col_3><col_4><body>0.977</col_4><col_5><body>0.896</col_5><col_6><body>1.79</col_6></row_6>
|
||||
<row_7><col_0><row_header>PubTables-1M</col_0><col_1><row_header>HTML</col_1><col_2><body>0.983</col_2><col_3><body>0.944</col_3><col_4><body>0.966</col_4><col_5><body>0.889</col_5><col_6><body>3.26</col_6></row_7>
|
||||
</table>
|
||||
<section_header_level_1><location><page_10><loc_22><loc_62><loc_42><loc_64></location>5.3 Qualitative Results</section_header_level_1>
|
||||
<text><location><page_10><loc_22><loc_54><loc_79><loc_61></location>To illustrate the qualitative differences between OTSL and HTML, Figure 5 demonstrates less overlap and more accurate bounding boxes with OTSL. In Figure 6, OTSL proves to be more effective in handling tables with longer token sequences, resulting in even more precise structure prediction and bounding boxes.</text>
|
||||
<figure>
|
||||
<location><page_10><loc_27><loc_16><loc_74><loc_44></location>
|
||||
<caption>Fig. 5. The OTSL model produces more accurate bounding boxes with less overlap (E) than the HTML model (D), when predicting the structure of a sparse table (A), at twice the inference speed because of shorter sequence length (B),(C). "PMC2807444_006_00.png" PubTabNet. μ</caption>
|
||||
</figure>
|
||||
<text><location><page_10><loc_37><loc_15><loc_38><loc_16></location>μ</text>
|
||||
<text><location><page_10><loc_49><loc_12><loc_49><loc_14></location>≥</text>
|
||||
<figure>
|
||||
<location><page_11><loc_28><loc_20><loc_73><loc_77></location>
|
||||
<caption>Fig. 6. Visualization of predicted structure and detected bounding boxes on a complex table with many rows. The OTSL model (B) captured repeating pattern of horizontally merged cells from the GT (A), unlike the HTML model (C). The HTML model also didn't complete the HTML sequence correctly and displayed a lot more of drift and overlap of bounding boxes. "PMC5406406_003_01.png" PubTabNet.</caption>
|
||||
</figure>
|
||||
<section_header_level_1><location><page_12><loc_22><loc_84><loc_36><loc_85></location>6 Conclusion</section_header_level_1>
|
||||
<text><location><page_12><loc_22><loc_74><loc_79><loc_81></location>We demonstrated that representing tables in HTML for the task of table structure recognition with Im2Seq models is ill-suited and has serious limitations. Furthermore, we presented in this paper an Optimized Table Structure Language (OTSL) which, when compared to commonly used general purpose languages, has several key benefits.</text>
|
||||
<text><location><page_12><loc_22><loc_59><loc_79><loc_74></location>First and foremost, given the same network configuration, inference time for a table-structure prediction is about 2 times faster compared to the conventional HTML approach. This is primarily owed to the shorter sequence length of the OTSL representation. Additional performance benefits can be obtained with HPO (hyper parameter optimization). As we demonstrate in our experiments, models trained on OTSL can be significantly smaller, e.g. by reducing the number of encoder and decoder layers, while preserving comparatively good prediction quality. This can further improve inference performance, yielding 5-6 times faster inference speed in OTSL with prediction quality comparable to models trained on HTML (see Table 1).</text>
|
||||
<text><location><page_12><loc_22><loc_41><loc_79><loc_59></location>Secondly, OTSL has more inherent structure and a significantly restricted vocabulary size. This allows autoregressive models to perform better in the TED metric, but especially with regards to prediction accuracy of the table-cell bounding boxes (see Table 2). As shown in Figure 5, we observe that the OTSL drastically reduces the drift for table cell bounding boxes at high row count and in sparse tables. This leads to more accurate predictions and a significant reduction in post-processing complexity, which is an undesired necessity in HTML-based Im2Seq models. Significant novelty lies in OTSL syntactical rules, which are few, simple and always backwards looking. Each new token can be validated only by analyzing the sequence of previous tokens, without requiring the entire sequence to detect mistakes. This in return allows to perform structural error detection and correction on-the-fly during sequence generation.</text>
|
||||
<section_header_level_1><location><page_12><loc_22><loc_36><loc_32><loc_38></location>References</section_header_level_1>
|
||||
<unordered_list>
|
||||
<list_item><location><page_12><loc_23><loc_29><loc_79><loc_34></location>1. Auer, C., Dolfi, M., Carvalho, A., Ramis, C.B., Staar, P.W.J.: Delivering document conversion as a cloud service with high throughput and responsiveness. CoRR abs/2206.00785 (2022). https://doi.org/10.48550/arXiv.2206.00785 , https://doi.org/10.48550/arXiv.2206.00785</list_item>
|
||||
<list_item><location><page_12><loc_23><loc_23><loc_79><loc_28></location>2. Chen, B., Peng, D., Zhang, J., Ren, Y., Jin, L.: Complex table structure recognition in the wild using transformer and identity matrix-based augmentation. In: Porwal, U., Fornés, A., Shafait, F. (eds.) Frontiers in Handwriting Recognition. pp. 545561. Springer International Publishing, Cham (2022)</list_item>
|
||||
<list_item><location><page_12><loc_23><loc_20><loc_79><loc_23></location>3. Chi, Z., Huang, H., Xu, H.D., Yu, H., Yin, W., Mao, X.L.: Complicated table structure recognition. arXiv preprint arXiv:1908.04729 (2019)</list_item>
|
||||
<list_item><location><page_12><loc_23><loc_16><loc_79><loc_20></location>4. Deng, Y., Rosenberg, D., Mann, G.: Challenges in end-to-end neural scientific table recognition. In: 2019 International Conference on Document Analysis and Recognition (ICDAR). pp. 894-901. IEEE (2019)</list_item>
|
||||
<text><loc_110><loc_324><loc_393><loc_405>The application of these rules gives OTSL a set of unique properties. First of all, the OTSL enforces a strictly rectangular structure representation, where every new-line token starts a new row. As a consequence, all rows and all columns have exactly the same number of tokens, irrespective of cell spans. Secondly, the OTSL representation is unambiguous: Every table structure is represented in one way. In this representation every table cell corresponds to a "C"-cell token, which in case of spans is always located in the top-left corner of the table cell definition. Third, OTSL syntax rules are only backward-looking. As a consequence, every predicted token can be validated straight during sequence generation by looking at the previously predicted sequence. As such, OTSL can guarantee that every predicted sequence is syntactically valid.</text>
|
||||
<text><loc_110><loc_407><loc_393><loc_420>These characteristics can be easily learned by sequence generator networks, as we demonstrate further below. We find strong indications that this pattern</text>
|
||||
<page_break>
|
||||
<page_header><loc_110><loc_59><loc_114><loc_64>8</page_header>
|
||||
<page_header><loc_137><loc_59><loc_189><loc_64>M. Lysak, et al.</page_header>
|
||||
<text><loc_110><loc_75><loc_393><loc_88>reduces significantly the column drift seen in the HTML based models (see Figure 5).</text>
|
||||
<section_header_level_1><loc_110><loc_102><loc_261><loc_108>4.3 Error-detection and -mitigation</section_header_level_1>
|
||||
<text><loc_110><loc_115><loc_393><loc_189>The design of OTSL allows to validate a table structure easily on an unfinished sequence. The detection of an invalid sequence token is a clear indication of a prediction mistake, however a valid sequence by itself does not guarantee prediction correctness. Different heuristics can be used to correct token errors in an invalid sequence and thus increase the chances for accurate predictions. Such heuristics can be applied either after the prediction of each token, or at the end on the entire predicted sequence. For example a simple heuristic which can correct the predicted OTSL sequence on-the-fly is to verify if the token with the highest prediction confidence invalidates the predicted sequence, and replace it by the token with the next highest confidence until OTSL rules are satisfied.</text>
|
||||
<section_header_level_1><loc_110><loc_203><loc_187><loc_209>5 Experiments</section_header_level_1>
|
||||
<text><loc_110><loc_219><loc_393><loc_285>To evaluate the impact of OTSL on prediction accuracy and inference times, we conducted a series of experiments based on the TableFormer model (Figure 4) with two objectives: Firstly we evaluate the prediction quality and performance of OTSL vs. HTML after performing Hyper Parameter Optimization (HPO) on the canonical PubTabNet data set. Secondly we pick the best hyper-parameters found in the first step and evaluate how OTSL impacts the performance of TableFormer after training on other publicly available data sets (FinTabNet, PubTables-1M [14]). The ground truth (GT) from all data sets has been converted into OTSL format for this purpose, and will be made publicly available.</text>
|
||||
<picture><loc_115><loc_321><loc_386><loc_375><caption><loc_110><loc_306><loc_393><loc_318>Fig. 4. Architecture sketch of the TableFormer model, which is a representative for the Im2Seq approach.</caption></picture>
|
||||
<text><loc_110><loc_392><loc_393><loc_420>We rely on standard metrics such as Tree Edit Distance score (TEDs) for table structure prediction, and Mean Average Precision (mAP) with 0.75 Intersection Over Union (IOU) threshold for the bounding-box predictions of table cells. The predicted OTSL structures were converted back to HTML format in</text>
|
||||
<page_break>
|
||||
<page_header><loc_159><loc_59><loc_366><loc_64>Optimized Table Tokenization for Table Structure Recognition</page_header>
|
||||
<page_header><loc_389><loc_59><loc_393><loc_64>9</page_header>
|
||||
<text><loc_110><loc_75><loc_393><loc_96>order to compute the TED score. Inference timing results for all experiments were obtained from the same machine on a single core with AMD EPYC 7763 CPU @2.45 GHz.</text>
|
||||
<section_header_level_1><loc_110><loc_107><loc_260><loc_112>5.1 Hyper Parameter Optimization</section_header_level_1>
|
||||
<text><loc_110><loc_117><loc_393><loc_160>We have chosen the PubTabNet data set to perform HPO, since it includes a highly diverse set of tables. Also we report TED scores separately for simple and complex tables (tables with cell spans). Results are presented in Table. 1. It is evident that with OTSL, our model achieves the same TED score and slightly better mAP scores in comparison to HTML. However OTSL yields a 2x speed up in the inference runtime over HTML.</text>
|
||||
<otsl><loc_114><loc_213><loc_388><loc_296><ched>#<ched>#<ched>Language<ched>TEDs<lcel><lcel><ched>mAP<ched>Inference<nl><ched>enc-layers<ched>dec-layers<ucel><ched>simple<ched>complex<ched>all<ched>(0.75)<ched>time (secs)<nl><fcel>6<fcel>6<fcel>OTSL HTML<fcel>0.965 0.969<fcel>0.934 0.927<fcel>0.955 0.955<fcel>0.88 0.857<fcel>2.73 5.39<nl><fcel>4<fcel>4<fcel>OTSL HTML<fcel>0.938 0.952<fcel>0.904<fcel>0.927<fcel>0.853<fcel>1.97<nl><fcel>2<fcel>4<fcel>OTSL<fcel>0.923 0.945<fcel>0.909 0.897<fcel>0.938<fcel>0.843<fcel>3.77<nl><ecel><ecel><fcel>HTML<ecel><fcel>0.901<fcel>0.915 0.931<fcel>0.859 0.834<fcel>1.91 3.81<nl><fcel>4<fcel>2<fcel>OTSL HTML<fcel>0.952 0.944<fcel>0.92 0.903<fcel>0.942 0.931<fcel>0.857 0.824<fcel>1.22 2<nl><caption><loc_110><loc_174><loc_393><loc_206>Table 1. HPO performed in OTSL and HTML representation on the same transformer-based TableFormer [9] architecture, trained only on PubTabNet [22]. Effects of reducing the # of layers in encoder and decoder stages of the model show that smaller models trained on OTSL perform better, especially in recognizing complex table structures, and maintain a much higher mAP score than the HTML counterpart.</caption></otsl>
|
||||
<section_header_level_1><loc_110><loc_321><loc_216><loc_326>5.2 Quantitative Results</section_header_level_1>
|
||||
<text><loc_110><loc_331><loc_393><loc_390>We picked the model parameter configuration that produced the best prediction quality (enc=6, dec=6, heads=8) with PubTabNet alone, then independently trained and evaluated it on three publicly available data sets: PubTabNet (395k samples), FinTabNet (113k samples) and PubTables-1M (about 1M samples). Performance results are presented in Table. 2. It is clearly evident that the model trained on OTSL outperforms HTML across the board, keeping high TEDs and mAP scores even on difficult financial tables (FinTabNet) that contain sparse and large tables.</text>
|
||||
<text><loc_110><loc_392><loc_393><loc_420>Additionally, the results show that OTSL has an advantage over HTML when applied on a bigger data set like PubTables-1M and achieves significantly improved scores. Finally, OTSL achieves faster inference due to fewer decoding steps which is a result of the reduced sequence representation.</text>
|
||||
<page_break>
|
||||
<page_header><loc_110><loc_59><loc_118><loc_64>10</page_header>
|
||||
<page_header><loc_137><loc_59><loc_189><loc_64>M. Lysak, et al.</page_header>
|
||||
<otsl><loc_117><loc_99><loc_385><loc_166><ecel><ched>Language<ched>TEDs<lcel><lcel><ched>mAP(0.75)<ched>Inference time (secs)<nl><ecel><ucel><ched>simple<ched>complex<ched>all<ucel><ucel><nl><rhed>PubTabNet<rhed>OTSL<fcel>0.965<fcel>0.934<fcel>0.955<fcel>0.88<fcel>2.73<nl><ucel><rhed>HTML<fcel>0.969<fcel>0.927<fcel>0.955<fcel>0.857<fcel>5.39<nl><rhed>FinTabNet<rhed>OTSL<fcel>0.955<fcel>0.961<fcel>0.959<fcel>0.862<fcel>1.85<nl><ucel><rhed>HTML<fcel>0.917<fcel>0.922<fcel>0.92<fcel>0.722<fcel>3.26<nl><rhed>PubTables-1M<rhed>OTSL<fcel>0.987<fcel>0.964<fcel>0.977<fcel>0.896<fcel>1.79<nl><ucel><rhed>HTML<fcel>0.983<fcel>0.944<fcel>0.966<fcel>0.889<fcel>3.26<nl><caption><loc_110><loc_73><loc_393><loc_92>Table 2. TSR and cell detection results compared between OTSL and HTML on the PubTabNet [22], FinTabNet [21] and PubTables-1M [14] data sets using TableFormer [9] (with enc=6, dec=6, heads=8).</caption></otsl>
|
||||
<section_header_level_1><loc_110><loc_182><loc_210><loc_188>5.3 Qualitative Results</section_header_level_1>
|
||||
<text><loc_110><loc_196><loc_393><loc_231>To illustrate the qualitative differences between OTSL and HTML, Figure 5 demonstrates less overlap and more accurate bounding boxes with OTSL. In Figure 6, OTSL proves to be more effective in handling tables with longer token sequences, resulting in even more precise structure prediction and bounding boxes.</text>
|
||||
<picture><loc_133><loc_281><loc_369><loc_419><caption><loc_110><loc_251><loc_393><loc_278>Fig. 5. The OTSL model produces more accurate bounding boxes with less overlap (E) than the HTML model (D), when predicting the structure of a sparse table (A), at twice the inference speed because of shorter sequence length (B),(C). "PMC2807444_006_00.png" PubTabNet. μ</caption></picture>
|
||||
<text><loc_186><loc_420><loc_188><loc_426>μ</text>
|
||||
<text><loc_246><loc_432><loc_247><loc_438>≥</text>
|
||||
<page_break>
|
||||
<page_header><loc_159><loc_59><loc_366><loc_64>Optimized Table Tokenization for Table Structure Recognition</page_header>
|
||||
<page_header><loc_385><loc_59><loc_393><loc_64>11</page_header>
|
||||
<picture><loc_138><loc_115><loc_365><loc_400><caption><loc_110><loc_79><loc_393><loc_112>Fig. 6. Visualization of predicted structure and detected bounding boxes on a complex table with many rows. The OTSL model (B) captured repeating pattern of horizontally merged cells from the GT (A), unlike the HTML model (C). The HTML model also didn't complete the HTML sequence correctly and displayed a lot more of drift and overlap of bounding boxes. "PMC5406406_003_01.png" PubTabNet.</caption></picture>
|
||||
<page_break>
|
||||
<page_header><loc_110><loc_59><loc_118><loc_64>12</page_header>
|
||||
<page_header><loc_137><loc_59><loc_189><loc_64>M. Lysak, et al.</page_header>
|
||||
<section_header_level_1><loc_110><loc_74><loc_179><loc_81>6 Conclusion</section_header_level_1>
|
||||
<text><loc_110><loc_93><loc_393><loc_128>We demonstrated that representing tables in HTML for the task of table structure recognition with Im2Seq models is ill-suited and has serious limitations. Furthermore, we presented in this paper an Optimized Table Structure Language (OTSL) which, when compared to commonly used general purpose languages, has several key benefits.</text>
|
||||
<text><loc_110><loc_131><loc_393><loc_204>First and foremost, given the same network configuration, inference time for a table-structure prediction is about 2 times faster compared to the conventional HTML approach. This is primarily owed to the shorter sequence length of the OTSL representation. Additional performance benefits can be obtained with HPO (hyper parameter optimization). As we demonstrate in our experiments, models trained on OTSL can be significantly smaller, e.g. by reducing the number of encoder and decoder layers, while preserving comparatively good prediction quality. This can further improve inference performance, yielding 5-6 times faster inference speed in OTSL with prediction quality comparable to models trained on HTML (see Table 1).</text>
|
||||
<text><loc_110><loc_207><loc_393><loc_296>Secondly, OTSL has more inherent structure and a significantly restricted vocabulary size. This allows autoregressive models to perform better in the TED metric, but especially with regards to prediction accuracy of the table-cell bounding boxes (see Table 2). As shown in Figure 5, we observe that the OTSL drastically reduces the drift for table cell bounding boxes at high row count and in sparse tables. This leads to more accurate predictions and a significant reduction in post-processing complexity, which is an undesired necessity in HTML-based Im2Seq models. Significant novelty lies in OTSL syntactical rules, which are few, simple and always backwards looking. Each new token can be validated only by analyzing the sequence of previous tokens, without requiring the entire sequence to detect mistakes. This in return allows to perform structural error detection and correction on-the-fly during sequence generation.</text>
|
||||
<section_header_level_1><loc_110><loc_312><loc_162><loc_318>References</section_header_level_1>
|
||||
<unordered_list><list_item><loc_114><loc_330><loc_393><loc_356>1. Auer, C., Dolfi, M., Carvalho, A., Ramis, C.B., Staar, P.W.J.: Delivering document conversion as a cloud service with high throughput and responsiveness. CoRR abs/2206.00785 (2022). https://doi.org/10.48550/arXiv.2206.00785 , https://doi.org/10.48550/arXiv.2206.00785</list_item>
|
||||
<list_item><loc_114><loc_358><loc_393><loc_384>2. Chen, B., Peng, D., Zhang, J., Ren, Y., Jin, L.: Complex table structure recognition in the wild using transformer and identity matrix-based augmentation. In: Porwal, U., Fornés, A., Shafait, F. (eds.) Frontiers in Handwriting Recognition. pp. 545561. Springer International Publishing, Cham (2022)</list_item>
|
||||
<list_item><loc_114><loc_386><loc_393><loc_398>3. Chi, Z., Huang, H., Xu, H.D., Yu, H., Yin, W., Mao, X.L.: Complicated table structure recognition. arXiv preprint arXiv:1908.04729 (2019)</list_item>
|
||||
<list_item><loc_114><loc_401><loc_393><loc_420>4. Deng, Y., Rosenberg, D., Mann, G.: Challenges in end-to-end neural scientific table recognition. In: 2019 International Conference on Document Analysis and Recognition (ICDAR). pp. 894-901. IEEE (2019)</list_item>
|
||||
</unordered_list>
|
||||
<unordered_list>
|
||||
<list_item><location><page_13><loc_23><loc_81><loc_79><loc_85></location>5. Kayal, P., Anand, M., Desai, H., Singh, M.: Tables to latex: structure and content extraction from scientific tables. International Journal on Document Analysis and Recognition (IJDAR) pp. 1-10 (2022)</list_item>
|
||||
<list_item><location><page_13><loc_23><loc_76><loc_79><loc_81></location>6. Lee, E., Kwon, J., Yang, H., Park, J., Lee, S., Koo, H.I., Cho, N.I.: Table structure recognition based on grid shape graph. In: 2022 Asia-Pacific Signal and Information Processing Association Annual Summit and Conference (APSIPA ASC). pp. 18681873. IEEE (2022)</list_item>
|
||||
<list_item><location><page_13><loc_23><loc_73><loc_79><loc_75></location>7. Li, M., Cui, L., Huang, S., Wei, F., Zhou, M., Li, Z.: Tablebank: A benchmark dataset for table detection and recognition (2019)</list_item>
|
||||
<list_item><location><page_13><loc_23><loc_66><loc_79><loc_72></location>8. Livathinos, N., Berrospi, C., Lysak, M., Kuropiatnyk, V., Nassar, A., Carvalho, A., Dolfi, M., Auer, C., Dinkla, K., Staar, P.: Robust pdf document conversion using recurrent neural networks. Proceedings of the AAAI Conference on Artificial Intelligence 35 (17), 15137-15145 (May 2021), https://ojs.aaai.org/index.php/ AAAI/article/view/17777</list_item>
|
||||
<list_item><location><page_13><loc_23><loc_62><loc_79><loc_66></location>9. Nassar, A., Livathinos, N., Lysak, M., Staar, P.: Tableformer: Table structure understanding with transformers. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 4614-4623 (June 2022)</list_item>
|
||||
<list_item><location><page_13><loc_22><loc_53><loc_79><loc_61></location>10. Pfitzmann, B., Auer, C., Dolfi, M., Nassar, A.S., Staar, P.W.J.: Doclaynet: A large human-annotated dataset for document-layout segmentation. In: Zhang, A., Rangwala, H. (eds.) KDD '22: The 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, Washington, DC, USA, August 14 - 18, 2022. pp. 3743-3751. ACM (2022). https://doi.org/10.1145/3534678.3539043 , https:// doi.org/10.1145/3534678.3539043</list_item>
|
||||
<list_item><location><page_13><loc_22><loc_48><loc_79><loc_53></location>11. Prasad, D., Gadpal, A., Kapadni, K., Visave, M., Sultanpure, K.: Cascadetabnet: An approach for end to end table detection and structure recognition from imagebased documents. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition workshops. pp. 572-573 (2020)</list_item>
|
||||
<list_item><location><page_13><loc_22><loc_42><loc_79><loc_48></location>12. Schreiber, S., Agne, S., Wolf, I., Dengel, A., Ahmed, S.: Deepdesrt: Deep learning for detection and structure recognition of tables in document images. In: 2017 14th IAPR international conference on document analysis and recognition (ICDAR). vol. 1, pp. 1162-1167. IEEE (2017)</list_item>
|
||||
<list_item><location><page_13><loc_22><loc_37><loc_79><loc_42></location>13. Siddiqui, S.A., Fateh, I.A., Rizvi, S.T.R., Dengel, A., Ahmed, S.: Deeptabstr: Deep learning based table structure recognition. In: 2019 International Conference on Document Analysis and Recognition (ICDAR). pp. 1403-1409 (2019). https:// doi.org/10.1109/ICDAR.2019.00226</list_item>
|
||||
<list_item><location><page_13><loc_22><loc_31><loc_79><loc_36></location>14. Smock, B., Pesala, R., Abraham, R.: PubTables-1M: Towards comprehensive table extraction from unstructured documents. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 4634-4642 (June 2022)</list_item>
|
||||
<list_item><location><page_13><loc_22><loc_23><loc_79><loc_31></location>15. Staar, P.W.J., Dolfi, M., Auer, C., Bekas, C.: Corpus conversion service: A machine learning platform to ingest documents at scale. In: Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. pp. 774-782. KDD '18, Association for Computing Machinery, New York, NY, USA (2018). https://doi.org/10.1145/3219819.3219834 , https://doi.org/10. 1145/3219819.3219834</list_item>
|
||||
<list_item><location><page_13><loc_22><loc_20><loc_79><loc_23></location>16. Wang, X.: Tabular Abstraction, Editing, and Formatting. Ph.D. thesis, CAN (1996), aAINN09397</list_item>
|
||||
<list_item><location><page_13><loc_22><loc_16><loc_79><loc_20></location>17. Xue, W., Li, Q., Tao, D.: Res2tim: Reconstruct syntactic structures from table images. In: 2019 International Conference on Document Analysis and Recognition (ICDAR). pp. 749-755. IEEE (2019)</list_item>
|
||||
<page_break>
|
||||
<page_header><loc_159><loc_59><loc_366><loc_64>Optimized Table Tokenization for Table Structure Recognition</page_header>
|
||||
<page_header><loc_385><loc_59><loc_393><loc_64>13</page_header>
|
||||
<unordered_list><list_item><loc_114><loc_76><loc_393><loc_94>5. Kayal, P., Anand, M., Desai, H., Singh, M.: Tables to latex: structure and content extraction from scientific tables. International Journal on Document Analysis and Recognition (IJDAR) pp. 1-10 (2022)</list_item>
|
||||
<list_item><loc_114><loc_96><loc_393><loc_122>6. Lee, E., Kwon, J., Yang, H., Park, J., Lee, S., Koo, H.I., Cho, N.I.: Table structure recognition based on grid shape graph. In: 2022 Asia-Pacific Signal and Information Processing Association Annual Summit and Conference (APSIPA ASC). pp. 18681873. IEEE (2022)</list_item>
|
||||
<list_item><loc_114><loc_124><loc_393><loc_136>7. Li, M., Cui, L., Huang, S., Wei, F., Zhou, M., Li, Z.: Tablebank: A benchmark dataset for table detection and recognition (2019)</list_item>
|
||||
<list_item><loc_114><loc_138><loc_393><loc_171>8. Livathinos, N., Berrospi, C., Lysak, M., Kuropiatnyk, V., Nassar, A., Carvalho, A., Dolfi, M., Auer, C., Dinkla, K., Staar, P.: Robust pdf document conversion using recurrent neural networks. Proceedings of the AAAI Conference on Artificial Intelligence 35 (17), 15137-15145 (May 2021), https://ojs.aaai.org/index.php/ AAAI/article/view/17777</list_item>
|
||||
<list_item><loc_114><loc_172><loc_393><loc_191>9. Nassar, A., Livathinos, N., Lysak, M., Staar, P.: Tableformer: Table structure understanding with transformers. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 4614-4623 (June 2022)</list_item>
|
||||
<list_item><loc_110><loc_193><loc_393><loc_233>10. Pfitzmann, B., Auer, C., Dolfi, M., Nassar, A.S., Staar, P.W.J.: Doclaynet: A large human-annotated dataset for document-layout segmentation. In: Zhang, A., Rangwala, H. (eds.) KDD '22: The 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, Washington, DC, USA, August 14 - 18, 2022. pp. 3743-3751. ACM (2022). https://doi.org/10.1145/3534678.3539043 , https:// doi.org/10.1145/3534678.3539043</list_item>
|
||||
<list_item><loc_110><loc_235><loc_393><loc_261>11. Prasad, D., Gadpal, A., Kapadni, K., Visave, M., Sultanpure, K.: Cascadetabnet: An approach for end to end table detection and structure recognition from imagebased documents. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition workshops. pp. 572-573 (2020)</list_item>
|
||||
<list_item><loc_110><loc_262><loc_393><loc_288>12. Schreiber, S., Agne, S., Wolf, I., Dengel, A., Ahmed, S.: Deepdesrt: Deep learning for detection and structure recognition of tables in document images. In: 2017 14th IAPR international conference on document analysis and recognition (ICDAR). vol. 1, pp. 1162-1167. IEEE (2017)</list_item>
|
||||
<list_item><loc_110><loc_290><loc_393><loc_316>13. Siddiqui, S.A., Fateh, I.A., Rizvi, S.T.R., Dengel, A., Ahmed, S.: Deeptabstr: Deep learning based table structure recognition. In: 2019 International Conference on Document Analysis and Recognition (ICDAR). pp. 1403-1409 (2019). https:// doi.org/10.1109/ICDAR.2019.00226</list_item>
|
||||
<list_item><loc_110><loc_318><loc_393><loc_344>14. Smock, B., Pesala, R., Abraham, R.: PubTables-1M: Towards comprehensive table extraction from unstructured documents. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 4634-4642 (June 2022)</list_item>
|
||||
<list_item><loc_110><loc_345><loc_393><loc_385>15. Staar, P.W.J., Dolfi, M., Auer, C., Bekas, C.: Corpus conversion service: A machine learning platform to ingest documents at scale. In: Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. pp. 774-782. KDD '18, Association for Computing Machinery, New York, NY, USA (2018). https://doi.org/10.1145/3219819.3219834 , https://doi.org/10. 1145/3219819.3219834</list_item>
|
||||
<list_item><loc_110><loc_387><loc_393><loc_399>16. Wang, X.: Tabular Abstraction, Editing, and Formatting. Ph.D. thesis, CAN (1996), aAINN09397</list_item>
|
||||
<list_item><loc_110><loc_401><loc_393><loc_420>17. Xue, W., Li, Q., Tao, D.: Res2tim: Reconstruct syntactic structures from table images. In: 2019 International Conference on Document Analysis and Recognition (ICDAR). pp. 749-755. IEEE (2019)</list_item>
|
||||
</unordered_list>
|
||||
<unordered_list>
|
||||
<list_item><location><page_14><loc_22><loc_81><loc_79><loc_85></location>18. Xue, W., Yu, B., Wang, W., Tao, D., Li, Q.: Tgrnet: A table graph reconstruction network for table structure recognition. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 1295-1304 (2021)</list_item>
|
||||
<list_item><location><page_14><loc_22><loc_76><loc_79><loc_81></location>19. Ye, J., Qi, X., He, Y., Chen, Y., Gu, D., Gao, P., Xiao, R.: Pingan-vcgroup's solution for icdar 2021 competition on scientific literature parsing task b: Table recognition to html (2021). https://doi.org/10.48550/ARXIV.2105.01848 , https://arxiv.org/abs/2105.01848</list_item>
|
||||
<list_item><location><page_14><loc_22><loc_73><loc_79><loc_75></location>20. Zhang, Z., Zhang, J., Du, J., Wang, F.: Split, embed and merge: An accurate table structure recognizer. Pattern Recognition 126 , 108565 (2022)</list_item>
|
||||
<list_item><location><page_14><loc_22><loc_66><loc_79><loc_72></location>21. Zheng, X., Burdick, D., Popa, L., Zhong, X., Wang, N.X.R.: Global table extractor (gte): A framework for joint table identification and cell structure recognition using visual context. In: 2021 IEEE Winter Conference on Applications of Computer Vision (WACV). pp. 697-706 (2021). https://doi.org/10.1109/WACV48630.2021. 00074</list_item>
|
||||
<list_item><location><page_14><loc_22><loc_60><loc_79><loc_66></location>22. Zhong, X., ShafieiBavani, E., Jimeno Yepes, A.: Image-based table recognition: Data, model, and evaluation. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.M. (eds.) Computer Vision - ECCV 2020. pp. 564-580. Springer International Publishing, Cham (2020)</list_item>
|
||||
<list_item><location><page_14><loc_22><loc_56><loc_79><loc_60></location>23. Zhong, X., Tang, J., Yepes, A.J.: Publaynet: largest dataset ever for document layout analysis. In: 2019 International Conference on Document Analysis and Recognition (ICDAR). pp. 1015-1022. IEEE (2019)</list_item>
|
||||
</document>
|
||||
<page_break>
|
||||
<page_header><loc_110><loc_59><loc_118><loc_64>14</page_header>
|
||||
<page_header><loc_137><loc_59><loc_189><loc_64>M. Lysak, et al.</page_header>
|
||||
<unordered_list><list_item><loc_110><loc_76><loc_393><loc_94>18. Xue, W., Yu, B., Wang, W., Tao, D., Li, Q.: Tgrnet: A table graph reconstruction network for table structure recognition. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 1295-1304 (2021)</list_item>
|
||||
<list_item><loc_110><loc_96><loc_393><loc_122>19. Ye, J., Qi, X., He, Y., Chen, Y., Gu, D., Gao, P., Xiao, R.: Pingan-vcgroup's solution for icdar 2021 competition on scientific literature parsing task b: Table recognition to html (2021). https://doi.org/10.48550/ARXIV.2105.01848 , https://arxiv.org/abs/2105.01848</list_item>
|
||||
<list_item><loc_110><loc_124><loc_393><loc_136>20. Zhang, Z., Zhang, J., Du, J., Wang, F.: Split, embed and merge: An accurate table structure recognizer. Pattern Recognition 126 , 108565 (2022)</list_item>
|
||||
<list_item><loc_110><loc_138><loc_393><loc_171>21. Zheng, X., Burdick, D., Popa, L., Zhong, X., Wang, N.X.R.: Global table extractor (gte): A framework for joint table identification and cell structure recognition using visual context. In: 2021 IEEE Winter Conference on Applications of Computer Vision (WACV). pp. 697-706 (2021). https://doi.org/10.1109/WACV48630.2021. 00074</list_item>
|
||||
<list_item><loc_110><loc_172><loc_393><loc_198>22. Zhong, X., ShafieiBavani, E., Jimeno Yepes, A.: Image-based table recognition: Data, model, and evaluation. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.M. (eds.) Computer Vision - ECCV 2020. pp. 564-580. Springer International Publishing, Cham (2020)</list_item>
|
||||
<list_item><loc_110><loc_200><loc_393><loc_219>23. Zhong, X., Tang, J., Yepes, A.J.: Publaynet: largest dataset ever for document layout analysis. In: 2019 International Conference on Document Analysis and Recognition (ICDAR). pp. 1015-1022. IEEE (2019)</list_item>
|
||||
</unordered_list>
|
||||
</doctag>
|
@ -1,23 +1,17 @@
|
||||
<document>
|
||||
<text><location><page_1><loc_12><loc_88><loc_53><loc_94></location>pulleys, provided the inner race of the bearing is clamped to the supporting structure by the nut and bolt. Plates must be attached to the structure in a positive manner to eliminate rotation or misalignment when tightening the bolts or screws.</text>
|
||||
<text><location><page_1><loc_12><loc_77><loc_53><loc_86></location>The two general types of self-locking nuts currently in use are the all-metal type and the fiber lock type. For the sake of simplicity, only three typical kinds of self-locking nuts are considered in this handbook: the Boots self-locking and the stainless steel self-locking nuts, representing the all-metal types; and the elastic stop nut, representing the fiber insert type.</text>
|
||||
<section_header_level_1><location><page_1><loc_12><loc_73><loc_28><loc_75></location>Boots Self-Locking Nut</section_header_level_1>
|
||||
<text><location><page_1><loc_12><loc_64><loc_54><loc_73></location>The Boots self-locking nut is of one piece, all-metal construction designed to hold tight despite severe vibration. Note in Figure 7-26 that it has two sections and is essentially two nuts in one: a locking nut and a load-carrying nut. The two sections are connected with a spring, which is an integral part of the nut.</text>
|
||||
<text><location><page_1><loc_12><loc_52><loc_53><loc_62></location>The spring keeps the locking and load-carrying sections such a distance apart that the two sets of threads are out of phase or spaced so that a bolt, which has been screwed through the load-carrying section, must push the locking section outward against the force of the spring to engage the threads of the locking section properly.</text>
|
||||
<text><location><page_1><loc_12><loc_38><loc_54><loc_50></location>The spring, through the medium of the locking section, exerts a constant locking force on the bolt in the same direction as a force that would tighten the nut. In this nut, the load-carrying section has the thread strength of a standard nut of comparable size, while the locking section presses against the threads of the bolt and locks the nut firmly in position. Only a wrench applied to the nut loosens it. The nut can be removed and reused without impairing its efficiency.</text>
|
||||
<text><location><page_1><loc_12><loc_33><loc_53><loc_36></location>Boots self-locking nuts are made with three different spring styles and in various shapes and sizes. The wing type that is</text>
|
||||
<figure>
|
||||
<location><page_1><loc_12><loc_10><loc_52><loc_31></location>
|
||||
<caption>Figure 7-26. Self-locking nuts.</caption>
|
||||
</figure>
|
||||
<text><location><page_1><loc_54><loc_85><loc_95><loc_94></location>the most common ranges in size for No. 6 up to 1 / 4 inch, the Rol-top ranges from 1 / 4 inch to 1 / 6 inch, and the bellows type ranges in size from No. 8 up to 3 / 8 inch. Wing-type nuts are made of anodized aluminum alloy, cadmium-plated carbon steel, or stainless steel. The Rol-top nut is cadmium-plated steel, and the bellows type is made of aluminum alloy only.</text>
|
||||
<text><location><page_1><loc_54><loc_83><loc_55><loc_85></location>.</text>
|
||||
<section_header_level_1><location><page_1><loc_54><loc_82><loc_76><loc_83></location>Stainless Steel Self-Locking Nut</section_header_level_1>
|
||||
<text><location><page_1><loc_54><loc_54><loc_96><loc_81></location>The stainless steel self-locking nut may be spun on and off by hand as its locking action takes places only when the nut is seated against a solid surface and tightened. The nut consists of two parts: a case with a beveled locking shoulder and key and a thread insert with a locking shoulder and slotted keyway. Until the nut is tightened, it spins on the bolt easily, because the threaded insert is the proper size for the bolt. However, when the nut is seated against a solid surface and tightened, the locking shoulder of the insert is pulled downward and wedged against the locking shoulder of the case. This action compresses the threaded insert and causes it to clench the bolt tightly. The cross-sectional view in Figure 7-27 shows how the key of the case fits into the slotted keyway of the insert so that when the case is turned, the threaded insert is turned with it. Note that the slot is wider than the key. This permits the slot to be narrowed and the insert to be compressed when the nut is tightened.</text>
|
||||
<section_header_level_1><location><page_1><loc_54><loc_51><loc_65><loc_52></location>Elastic Stop Nut</section_header_level_1>
|
||||
<text><location><page_1><loc_54><loc_47><loc_93><loc_50></location>The elastic stop nut is a standard nut with the height increased to accommodate a fiber locking collar. This</text>
|
||||
<figure>
|
||||
<location><page_1><loc_54><loc_11><loc_94><loc_46></location>
|
||||
<caption>Figure 7-27. Stainless steel self-locking nut.</caption>
|
||||
</figure>
|
||||
</document>
|
||||
<doctag><text><loc_61><loc_28><loc_264><loc_60>pulleys, provided the inner race of the bearing is clamped to the supporting structure by the nut and bolt. Plates must be attached to the structure in a positive manner to eliminate rotation or misalignment when tightening the bolts or screws.</text>
|
||||
<text><loc_61><loc_69><loc_264><loc_116>The two general types of self-locking nuts currently in use are the all-metal type and the fiber lock type. For the sake of simplicity, only three typical kinds of self-locking nuts are considered in this handbook: the Boots self-locking and the stainless steel self-locking nuts, representing the all-metal types; and the elastic stop nut, representing the fiber insert type.</text>
|
||||
<section_header_level_1><loc_61><loc_125><loc_141><loc_133>Boots Self-Locking Nut</section_header_level_1>
|
||||
<text><loc_61><loc_134><loc_268><loc_182>The Boots self-locking nut is of one piece, all-metal construction designed to hold tight despite severe vibration. Note in Figure 7-26 that it has two sections and is essentially two nuts in one: a locking nut and a load-carrying nut. The two sections are connected with a spring, which is an integral part of the nut.</text>
|
||||
<text><loc_61><loc_191><loc_267><loc_239>The spring keeps the locking and load-carrying sections such a distance apart that the two sets of threads are out of phase or spaced so that a bolt, which has been screwed through the load-carrying section, must push the locking section outward against the force of the spring to engage the threads of the locking section properly.</text>
|
||||
<text><loc_61><loc_248><loc_268><loc_311>The spring, through the medium of the locking section, exerts a constant locking force on the bolt in the same direction as a force that would tighten the nut. In this nut, the load-carrying section has the thread strength of a standard nut of comparable size, while the locking section presses against the threads of the bolt and locks the nut firmly in position. Only a wrench applied to the nut loosens it. The nut can be removed and reused without impairing its efficiency.</text>
|
||||
<text><loc_61><loc_320><loc_264><loc_336>Boots self-locking nuts are made with three different spring styles and in various shapes and sizes. The wing type that is</text>
|
||||
<picture><loc_59><loc_343><loc_261><loc_449><caption><loc_61><loc_454><loc_155><loc_461>Figure 7-26. Self-locking nuts.</caption></picture>
|
||||
<text><loc_270><loc_28><loc_473><loc_76>the most common ranges in size for No. 6 up to 1 / 4 inch, the Rol-top ranges from 1 / 4 inch to 1 / 6 inch, and the bellows type ranges in size from No. 8 up to 3 / 8 inch. Wing-type nuts are made of anodized aluminum alloy, cadmium-plated carbon steel, or stainless steel. The Rol-top nut is cadmium-plated steel, and the bellows type is made of aluminum alloy only.</text>
|
||||
<text><loc_270><loc_77><loc_274><loc_84>.</text>
|
||||
<section_header_level_1><loc_270><loc_85><loc_380><loc_92>Stainless Steel Self-Locking Nut</section_header_level_1>
|
||||
<text><loc_270><loc_94><loc_478><loc_231>The stainless steel self-locking nut may be spun on and off by hand as its locking action takes places only when the nut is seated against a solid surface and tightened. The nut consists of two parts: a case with a beveled locking shoulder and key and a thread insert with a locking shoulder and slotted keyway. Until the nut is tightened, it spins on the bolt easily, because the threaded insert is the proper size for the bolt. However, when the nut is seated against a solid surface and tightened, the locking shoulder of the insert is pulled downward and wedged against the locking shoulder of the case. This action compresses the threaded insert and causes it to clench the bolt tightly. The cross-sectional view in Figure 7-27 shows how the key of the case fits into the slotted keyway of the insert so that when the case is turned, the threaded insert is turned with it. Note that the slot is wider than the key. This permits the slot to be narrowed and the insert to be compressed when the nut is tightened.</text>
|
||||
<section_header_level_1><loc_270><loc_240><loc_327><loc_247>Elastic Stop Nut</section_header_level_1>
|
||||
<text><loc_270><loc_249><loc_465><loc_264>The elastic stop nut is a standard nut with the height increased to accommodate a fiber locking collar. This</text>
|
||||
<picture><loc_270><loc_272><loc_470><loc_447><caption><loc_270><loc_452><loc_405><loc_459>Figure 7-27. Stainless steel self-locking nut.</caption></picture>
|
||||
<page_footer><loc_453><loc_470><loc_472><loc_478>7-45</page_footer>
|
||||
</doctag>
|
@ -1,16 +1,18 @@
|
||||
<document>
|
||||
<section_header_level_1><location><page_1><loc_22><loc_83><loc_52><loc_84></location>JavaScript Code Example</section_header_level_1>
|
||||
<text><location><page_1><loc_22><loc_63><loc_78><loc_81></location>Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.</text>
|
||||
<text><location><page_1><loc_22><loc_57><loc_78><loc_63></location>Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi. Lorem ipsum dolor sit amet,</text>
|
||||
<paragraph><location><page_1><loc_36><loc_55><loc_63><loc_56></location>Listing 1: Simple JavaScript Program</paragraph>
|
||||
<code><location><page_1><loc_22><loc_49><loc_43><loc_54></location>function add(a, b) { return a + b; } console.log(add(3, 5));</code>
|
||||
<text><location><page_1><loc_22><loc_29><loc_78><loc_47></location>Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.</text>
|
||||
<text><location><page_1><loc_22><loc_23><loc_78><loc_29></location>Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi. Lorem ipsum dolor sit amet,</text>
|
||||
<section_header_level_1><location><page_2><loc_22><loc_84><loc_32><loc_85></location>Formula</section_header_level_1>
|
||||
<text><location><page_2><loc_22><loc_66><loc_80><loc_82></location>Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.</text>
|
||||
<text><location><page_2><loc_22><loc_58><loc_80><loc_65></location>Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt.</text>
|
||||
<formula><location><page_2><loc_47><loc_56><loc_56><loc_57></location></formula>
|
||||
<text><location><page_2><loc_22><loc_38><loc_80><loc_55></location>Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.</text>
|
||||
<text><location><page_2><loc_22><loc_29><loc_80><loc_37></location>Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat.</text>
|
||||
<text><location><page_2><loc_22><loc_21><loc_80><loc_29></location>Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat.</text>
|
||||
</document>
|
||||
<doctag><section_header_level_1><loc_109><loc_79><loc_258><loc_87>JavaScript Code Example</section_header_level_1>
|
||||
<text><loc_109><loc_94><loc_390><loc_183>Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.</text>
|
||||
<text><loc_109><loc_185><loc_390><loc_213>Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi. Lorem ipsum dolor sit amet,</text>
|
||||
<paragraph><loc_182><loc_221><loc_317><loc_226>Listing 1: Simple JavaScript Program</paragraph>
|
||||
<code<loc_110><loc_231><loc_215><loc_257><_unknown_>function add(a, b) { return a + b; } console.log(add(3, 5));</code
|
||||
<text><loc_109><loc_265><loc_390><loc_353>Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.</text>
|
||||
<text><loc_109><loc_355><loc_390><loc_383>Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi. Lorem ipsum dolor sit amet,</text>
|
||||
<page_footer><loc_248><loc_439><loc_252><loc_445>1</page_footer>
|
||||
<page_break>
|
||||
<section_header_level_1><loc_112><loc_74><loc_161><loc_82>Formula</section_header_level_1>
|
||||
<text><loc_112><loc_89><loc_401><loc_172>Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.</text>
|
||||
<text><loc_112><loc_174><loc_401><loc_208>Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt.</text>
|
||||
<formula><loc_236><loc_215><loc_278><loc_222></formula>
|
||||
<text><loc_112><loc_227><loc_401><loc_311>Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.</text>
|
||||
<text><loc_112><loc_313><loc_401><loc_353>Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat.</text>
|
||||
<text><loc_112><loc_355><loc_401><loc_396>Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat.</text>
|
||||
<page_footer><loc_255><loc_413><loc_259><loc_418>1</page_footer>
|
||||
</doctag>
|
File diff suppressed because one or more lines are too long
@ -1,15 +1,11 @@
|
||||
<document>
|
||||
<section_header_level_1><location><page_1><loc_22><loc_83><loc_41><loc_84></location>Figures Example</section_header_level_1>
|
||||
<text><location><page_1><loc_22><loc_63><loc_78><loc_81></location>Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.</text>
|
||||
<figure>
|
||||
<location><page_1><loc_22><loc_36><loc_78><loc_62></location>
|
||||
<caption>Figure 1: This is an example image.</caption>
|
||||
</figure>
|
||||
<text><location><page_1><loc_22><loc_15><loc_78><loc_30></location>Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua.</text>
|
||||
<text><location><page_2><loc_22><loc_66><loc_78><loc_84></location>Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.</text>
|
||||
<figure>
|
||||
<location><page_2><loc_36><loc_36><loc_64><loc_65></location>
|
||||
<caption>Figure 2: This is an example image.</caption>
|
||||
</figure>
|
||||
<text><location><page_2><loc_22><loc_15><loc_78><loc_31></location>Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum.</text>
|
||||
</document>
|
||||
<doctag><section_header_level_1><loc_109><loc_79><loc_206><loc_87>Figures Example</section_header_level_1>
|
||||
<text><loc_109><loc_94><loc_390><loc_183>Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.</text>
|
||||
<picture><loc_110><loc_192><loc_389><loc_322><caption><loc_185><loc_334><loc_314><loc_340>Figure 1: This is an example image.</caption></picture>
|
||||
<text><loc_109><loc_349><loc_390><loc_423>Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua.</text>
|
||||
<page_footer><loc_248><loc_439><loc_252><loc_445>1</page_footer>
|
||||
<page_break>
|
||||
<text><loc_109><loc_81><loc_390><loc_169>Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.</text>
|
||||
<picture><loc_179><loc_176><loc_320><loc_321><caption><loc_185><loc_330><loc_314><loc_336>Figure 2: This is an example image.</caption></picture>
|
||||
<text><loc_109><loc_345><loc_390><loc_426>Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum.</text>
|
||||
<page_footer><loc_248><loc_439><loc_252><loc_445>2</page_footer>
|
||||
</doctag>
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -1,9 +1,8 @@
|
||||
<document>
|
||||
<section_header_level_1><location><page_1><loc_37><loc_89><loc_85><loc_91></location>Pythonو R ةغلب ةجمربلا للاخ نم تلاكشملا لحو ةيجاتنلإا نيسحت</section_header_level_1>
|
||||
<text><location><page_1><loc_15><loc_80><loc_85><loc_87></location>Python و R ةغلب ةجمربلا ربتعت ةلاعف لولح داجيإ يف دعاستو ةيجاتنلإا ززعت نأ نكمي يتلا ةيوقلا تاودلأا نم ءاملعلاو نيللحملا ىلع لهسي امم ،تانايبلا ليلحتل ةيلاثم اهلعجت ةديرف تازيمPython و R نم لك كلتمي .تلاكشملل ناك اذإ .ةلاعفو ةعيرس ةقيرطب ةدقعم تلايلحت ءارجإ مهسي نأ نكمي تاغللا هذه مادختسا نإف ،ةيليلحت ةيلقع كيدل .لمعلا جئاتن نيسحت يف ريبك لكشب</text>
|
||||
<text><location><page_1><loc_34><loc_73><loc_34><loc_75></location>ً</text>
|
||||
<text><location><page_1><loc_16><loc_71><loc_85><loc_78></location>جارختساو تانايبلا نم ةلئاه تايمك ةجلاعم نكمملا نم حبصي ،ةجمربلا تاراهم عم يليلحتلا ريكفتلا عمتجي امدنع ذيفنتلPython و R مادختسا نيجمربملل نكمي .اهنم تاهجوتلاو طامنلأا ةجذمنلا لثم ،ةمدقتم ةيليلحت تايلمع ةقد رثكأ تارارق ذاختا ىلإ ا ضيأ يدؤي نأ نكمي لب ،تقولا رفوي طقف سيل اذه .ةريبكلا تانايبلا ليلحتو ةيئاصحلإا تانايبلا ىلع ةمئاق تاجاتنتسا ىلع ءانب .</text>
|
||||
<text><location><page_1><loc_83><loc_71><loc_83><loc_73></location>ً</text>
|
||||
<text><location><page_1><loc_15><loc_63><loc_85><loc_70></location>ليلحتلا نم ،تاقيبطتلا نم ةعساو ةعومجم معدت ةينغ تاودأو تابتكمPython و R نم لك رفوت ،كلذ ىلع ةولاع ىلع .ةفلتخملا تلاكشملل ةركتبم لولح ريوطتل تابتكملا هذه نم ةدافتسلاا نيمدختسملل نكمي .يللآا ملعتلا ىلإ ينايبلا R رفوت امنيب ،ةءافكب تانايبلا ةرادلإ Python يف pandas ةبتكم مادختسا نكمي ،لاثملا ليبس مسرلل ةيوق تاودأ .نيللحملاو نيثحابلل ةيلاثم اهلعجي امم ،يئاصحلإا ليلحتلاو ينايبلا</text>
|
||||
<text><location><page_1><loc_16><loc_56><loc_85><loc_61></location>Python و R ةغلب ةجمربلا يدؤت نأ نكمي ،ةياهنلا يف ةركتبم لولح ريفوتو ةيجاتنلإا نيسحت ىلإ ةيليلحت ةيلقع عم اهل نوكت نأ نكمي ةبسانملا ةيجمربلا بيلاسلأا قيبطتو لاعف لكشب تانايبلا ليلحت ىلع ةردقلا نإ .ةدقعملا تلاكشملل .ينهملاو يصخشلا ءادلأا ىلع ىدملا ةديعب ةيباجيإ تاريثأت</text>
|
||||
</document>
|
||||
<doctag><section_header_level_1><loc_183><loc_46><loc_426><loc_55>Pythonو R ةغلب ةجمربلا للاخ نم تلاكشملا لحو ةيجاتنلإا نيسحت</section_header_level_1>
|
||||
<text><loc_74><loc_64><loc_427><loc_99>Python و R ةغلب ةجمربلا ربتعت ةلاعف لولح داجيإ يف دعاستو ةيجاتنلإا ززعت نأ نكمي يتلا ةيوقلا تاودلأا نم ءاملعلاو نيللحملا ىلع لهسي امم ،تانايبلا ليلحتل ةيلاثم اهلعجت ةديرف تازيمPython و R نم لك كلتمي .تلاكشملل ناك اذإ .ةلاعفو ةعيرس ةقيرطب ةدقعم تلايلحت ءارجإ مهسي نأ نكمي تاغللا هذه مادختسا نإف ،ةيليلحت ةيلقع كيدل .لمعلا جئاتن نيسحت يف ريبك لكشب</text>
|
||||
<text><loc_170><loc_126><loc_170><loc_134>ً</text>
|
||||
<text><loc_82><loc_108><loc_427><loc_143>جارختساو تانايبلا نم ةلئاه تايمك ةجلاعم نكمملا نم حبصي ،ةجمربلا تاراهم عم يليلحتلا ريكفتلا عمتجي امدنع ذيفنتلPython و R مادختسا نيجمربملل نكمي .اهنم تاهجوتلاو طامنلأا ةجذمنلا لثم ،ةمدقتم ةيليلحت تايلمع ةقد رثكأ تارارق ذاختا ىلإ ا ضيأ يدؤي نأ نكمي لب ،تقولا رفوي طقف سيل اذه .ةريبكلا تانايبلا ليلحتو ةيئاصحلإا تانايبلا ىلع ةمئاق تاجاتنتسا ىلع ءانب .</text>
|
||||
<text><loc_416><loc_135><loc_416><loc_143>ً</text>
|
||||
<text><loc_76><loc_152><loc_427><loc_186>ليلحتلا نم ،تاقيبطتلا نم ةعساو ةعومجم معدت ةينغ تاودأو تابتكمPython و R نم لك رفوت ،كلذ ىلع ةولاع ىلع .ةفلتخملا تلاكشملل ةركتبم لولح ريوطتل تابتكملا هذه نم ةدافتسلاا نيمدختسملل نكمي .يللآا ملعتلا ىلإ ينايبلا R رفوت امنيب ،ةءافكب تانايبلا ةرادلإ Python يف pandas ةبتكم مادختسا نكمي ،لاثملا ليبس مسرلل ةيوق تاودأ .نيللحملاو نيثحابلل ةيلاثم اهلعجي امم ،يئاصحلإا ليلحتلاو ينايبلا</text>
|
||||
<text><loc_79><loc_195><loc_427><loc_221>Python و R ةغلب ةجمربلا يدؤت نأ نكمي ،ةياهنلا يف ةركتبم لولح ريفوتو ةيجاتنلإا نيسحت ىلإ ةيليلحت ةيلقع عم اهل نوكت نأ نكمي ةبسانملا ةيجمربلا بيلاسلأا قيبطتو لاعف لكشب تانايبلا ليلحت ىلع ةردقلا نإ .ةدقعملا تلاكشملل .ينهملاو يصخشلا ءادلأا ىلع ىدملا ةديعب ةيباجيإ تاريثأت</text>
|
||||
</doctag>
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -1,10 +1,7 @@
|
||||
<document>
|
||||
<text><location><page_1><loc_8><loc_3><loc_10><loc_4></location>11</text>
|
||||
<text><location><page_1><loc_11><loc_50><loc_73><loc_75></location>،هيلعو ملا ةوا رملا لاول خواهييع ووص عضت ةيرص م لا ةموكح لا نإف ةو اب لأا نم ددي قي حت ىاي لمعلخب خال ةير وام جلا سي ئر د يسلا فياكت ا دو ه :خاسعر ىاي ويولولأا ةومئخق سعر ىا ي يرصملا نخسنلإا ءخهب فام عضو ، تخ ووومن تحدووعم قووي حت ىوو اي لو وم علا ،ليوواعللاو ةحووصلا تحخووجم اووف ةووصخل ىوووواي خوووو حلا ا وووو و ،تخوووو ي خل لا فوووواذع اووووف ةامخوووو و ةمادلووووسمو ةوووويوق وو يلودلاو ةوويمياقلإا تخيدوو حلل ا ءوووض اووف يرووصملا امووو لا نووملأا تاددووحم ،ة وو ام ةووعبخلم رارملووساو ،ةيووسخيسلا ة رخوواملا ر ي وو و لت د ووواو ةاووصاومو تخ ايوووو لاو ةوووفخ لا تخووو ام ريوووولت ، خوووهرلإا ةوووحفخ كمو ر ار لوووسحاو نوووملأا لي هخووو م وووسري ي ووولا وووو حهل ا ىووواي لدووولعملا اهيدووو لا خووولبلاو ،اه،وووولا .اعملجملا ماسلاو ةه،اوملا</text>
|
||||
<text><location><page_1><loc_13><loc_45><loc_74><loc_48></location>رول لا لاول ةيرو ص م لا ةو موكحلا امخونرب دالوسي ،قبس خمل خً فوو 2024( -)2026 اتلآا وحهلا ىاي اهو ،ةسيئر ةيجيتارلسا اد هع ةعبرع قي حت :</text>
|
||||
<text><location><page_1><loc_12><loc_37><loc_73><loc_40></location>نــــــــم ما ةــــــــيا م رـ صم لا يم وقل ا اــــسن ا ءاــــ نب رــــــــــــــــــــصم لا عاـــــصت ا ءاـــــ نب يــــــــــــــــــــــسبا نت قتسظا ق يقحت را ر يــــــــــــــــــــــــساي سلا</text>
|
||||
<text><location><page_1><loc_12><loc_23><loc_73><loc_31></location>خهلوسحخب امخونرب لا ت خفدالوسم ديدحت لت دق هن ع ىلإ رخ لإا ردجت لكواب د روووصم ةو ووي ر تخ فدال ووو س م ىووو اي سيوووئر 2023 ر اوو وو حلا تخووو ساو تخيوووصوتو ، كيال ا تخ اووصيل اه،ووولا امخوونربلاو ،تارا ووو لا ت خ فدا لوو سمو ،اه،ووولا ،ةوو ي ا ةيه، ولا تخ ي جيتا رلسحا فالبمو .</text>
|
||||
<figure>
|
||||
<location><page_1><loc_75><loc_23><loc_100><loc_76></location>
|
||||
</figure>
|
||||
</document>
|
||||
<doctag><text><loc_40><loc_478><loc_49><loc_486>11</text>
|
||||
<text><loc_57><loc_125><loc_367><loc_249>،هيلعو ملا ةوا رملا لاول خواهييع ووص عضت ةيرص م لا ةموكح لا نإف ةو اب لأا نم ددي قي حت ىاي لمعلخب خال ةير وام جلا سي ئر د يسلا فياكت ا دو ه :خاسعر ىاي ويولولأا ةومئخق سعر ىا ي يرصملا نخسنلإا ءخهب فام عضو ، تخ ووومن تحدووعم قووي حت ىوو اي لو وم علا ،ليوواعللاو ةحووصلا تحخووجم اووف ةووصخل ىوووواي خوووو حلا ا وووو و ،تخوووو ي خل لا فوووواذع اووووف ةامخوووو و ةمادلووووسمو ةوووويوق وو يلودلاو ةوويمياقلإا تخيدوو حلل ا ءوووض اووف يرووصملا امووو لا نووملأا تاددووحم ،ة وو ام ةووعبخلم رارملووساو ،ةيووسخيسلا ة رخوواملا ر ي وو و لت د ووواو ةاووصاومو تخ ايوووو لاو ةوووفخ لا تخووو ام ريوووولت ، خوووهرلإا ةوووحفخ كمو ر ار لوووسحاو نوووملأا لي هخووو م وووسري ي ووولا وووو حهل ا ىووواي لدووولعملا اهيدووو لا خووولبلاو ،اه،وووولا .اعملجملا ماسلاو ةه،اوملا</text>
|
||||
<text><loc_63><loc_258><loc_370><loc_277>رول لا لاول ةيرو ص م لا ةو موكحلا امخونرب دالوسي ،قبس خمل خً فوو 2024( -)2026 اتلآا وحهلا ىاي اهو ،ةسيئر ةيجيتارلسا اد هع ةعبرع قي حت :</text>
|
||||
<text><loc_58><loc_301><loc_367><loc_317>نــــــــم ما ةــــــــيا م رـ صم لا يم وقل ا اــــسن ا ءاــــ نب رــــــــــــــــــــصم لا عاـــــصت ا ءاـــــ نب يــــــــــــــــــــــسبا نت قتسظا ق يقحت را ر يــــــــــــــــــــــــساي سلا</text>
|
||||
<text><loc_61><loc_344><loc_367><loc_385>خهلوسحخب امخونرب لا ت خفدالوسم ديدحت لت دق هن ع ىلإ رخ لإا ردجت لكواب د روووصم ةو ووي ر تخ فدال ووو س م ىووو اي سيوووئر 2023 ر اوو وو حلا تخووو ساو تخيوووصوتو ، كيال ا تخ اووصيل اه،ووولا امخوونربلاو ،تارا ووو لا ت خ فدا لوو سمو ،اه،ووولا ،ةوو ي ا ةيه، ولا تخ ي جيتا رلسحا فالبمو .</text>
|
||||
<picture><loc_375><loc_119><loc_500><loc_386></picture>
|
||||
</doctag>
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -1,35 +1,33 @@
|
||||
<document>
|
||||
<section_header_level_1><location><page_1><loc_12><loc_90><loc_45><loc_93></location>یلخاد یلااک - یلصا رازاب رد شريذپ همانديما</section_header_level_1>
|
||||
<figure>
|
||||
<location><page_1><loc_65><loc_88><loc_81><loc_96></location>
|
||||
</figure>
|
||||
<section_header_level_1><location><page_1><loc_63><loc_81><loc_81><loc_84></location>لااک درادناتسا -2-5</section_header_level_1>
|
||||
<text><location><page_1><loc_77><loc_79><loc_87><loc_81></location>درادناتسا مان</text>
|
||||
<text><location><page_1><loc_11><loc_75><loc_44><loc_81></location>یرگ هتخير شور هب هدش ديلوت لاشمش و هشمش فرصم دروم هتسويپ یا هزاس یاهدلاوف رد - قباطم تسويپ زيلانآ</text>
|
||||
<text><location><page_1><loc_71><loc_72><loc_87><loc_74></location>یلم درادناتسا هرامش</text>
|
||||
<text><location><page_1><loc_40><loc_73><loc_45><loc_74></location>20300</text>
|
||||
<text><location><page_1><loc_68><loc_70><loc_87><loc_72></location>؟تسا یرابجا درادناتسا</text>
|
||||
<checkbox_unselected><location><page_1><loc_33><loc_70><loc_44><loc_72></location>ريخ یلب</checkbox_unselected>
|
||||
<text><location><page_1><loc_65><loc_67><loc_87><loc_69></location>درادناتسا هدننکرداص عجرم</text>
|
||||
<text><location><page_1><loc_28><loc_67><loc_44><loc_69></location>ناريا درادناتسا یلم نامزاس</text>
|
||||
<text><location><page_1><loc_49><loc_62><loc_87><loc_66></location>ذخا ار روکذم درادناتسا ،لوصحم هدننکديلوت ايآ ؟تسا هدومن</text>
|
||||
<checkbox_selected><location><page_1><loc_33><loc_65><loc_35><loc_66></location>ريخ</checkbox_selected>
|
||||
<checkbox_unselected><location><page_1><loc_40><loc_65><loc_42><loc_66></location>یلب</checkbox_unselected>
|
||||
<section_header_level_1><location><page_1><loc_69><loc_56><loc_85><loc_58></location>سروب رد شريذپ -3</section_header_level_1>
|
||||
<text><location><page_1><loc_68><loc_54><loc_83><loc_56></location>کرادم هئارا خيرات</text>
|
||||
<text><location><page_1><loc_23><loc_54><loc_32><loc_56></location>1403/09/19</text>
|
||||
<text><location><page_1><loc_72><loc_51><loc_83><loc_53></location>شريذپ خيرات</text>
|
||||
<text><location><page_1><loc_23><loc_51><loc_32><loc_53></location>1403/10/04</text>
|
||||
<text><location><page_1><loc_62><loc_48><loc_83><loc_50></location>هضرع هتيمک هسلج هرامش</text>
|
||||
<text><location><page_1><loc_26><loc_49><loc_29><loc_50></location>436</text>
|
||||
<text><location><page_1><loc_67><loc_45><loc_83><loc_47></location>همانديما جرد خيرات</text>
|
||||
<text><location><page_1><loc_23><loc_46><loc_32><loc_48></location>1403/10/05</text>
|
||||
<text><location><page_1><loc_71><loc_43><loc_83><loc_45></location>شريذپ رواشم</text>
|
||||
<text><location><page_1><loc_21><loc_43><loc_34><loc_45></location>سروب نومرآ یرازگراک</text>
|
||||
<text><location><page_1><loc_47><loc_37><loc_83><loc_42></location>رد لااک شريذپ زا سپ هياپ تميق نييعت ةوحن سروب</text>
|
||||
<text><location><page_1><loc_18><loc_40><loc_36><loc_42></location>یناهج یاه تميق ساسا رب</text>
|
||||
<text><location><page_1><loc_45><loc_32><loc_83><loc_37></location>شورف /شورف لک /ديلوت زا هضرع دصرد لقادح یلخاد</text>
|
||||
<text><location><page_1><loc_14><loc_35><loc_40><loc_37></location>نت 47.500 اي هنايلاس ديلوت زا %50 لقادح</text>
|
||||
<text><location><page_1><loc_68><loc_29><loc_83><loc_31></location>ليوحت زاجم یاطخ</text>
|
||||
<text><location><page_1><loc_18><loc_30><loc_37><loc_31></location>ليوحت لباق هلومحم نيرخآ 5%</text>
|
||||
</document>
|
||||
<doctag><section_header_level_1><loc_58><loc_37><loc_225><loc_48>یلخاد یلااک - یلصا رازاب رد شريذپ همانديما</section_header_level_1>
|
||||
<picture><loc_326><loc_21><loc_405><loc_61></picture>
|
||||
<section_header_level_1><loc_314><loc_82><loc_403><loc_93>لااک درادناتسا -2-5</section_header_level_1>
|
||||
<text><loc_385><loc_96><loc_436><loc_106>درادناتسا مان</text>
|
||||
<text><loc_56><loc_96><loc_222><loc_125>یرگ هتخير شور هب هدش ديلوت لاشمش و هشمش فرصم دروم هتسويپ یا هزاس یاهدلاوف رد - قباطم تسويپ زيلانآ</text>
|
||||
<text><loc_354><loc_128><loc_436><loc_138>یلم درادناتسا هرامش</text>
|
||||
<text><loc_199><loc_128><loc_223><loc_136>20300</text>
|
||||
<text><loc_342><loc_142><loc_436><loc_152>؟تسا یرابجا درادناتسا</text>
|
||||
<checkbox_unselected><loc_166><loc_141><loc_222><loc_149>ريخ یلب</checkbox_unselected>
|
||||
<text><loc_327><loc_155><loc_436><loc_165>درادناتسا هدننکرداص عجرم</text>
|
||||
<text><loc_140><loc_154><loc_222><loc_163>ناريا درادناتسا یلم نامزاس</text>
|
||||
<text><loc_245><loc_169><loc_436><loc_192>ذخا ار روکذم درادناتسا ،لوصحم هدننکديلوت ايآ ؟تسا هدومن</text>
|
||||
<checkbox_selected><loc_166><loc_168><loc_175><loc_176>ريخ</checkbox_selected>
|
||||
<checkbox_unselected><loc_199><loc_168><loc_208><loc_176>یلب</checkbox_unselected>
|
||||
<section_header_level_1><loc_344><loc_209><loc_425><loc_219>سروب رد شريذپ -3</section_header_level_1>
|
||||
<text><loc_340><loc_222><loc_414><loc_232>کرادم هئارا خيرات</text>
|
||||
<text><loc_116><loc_221><loc_158><loc_230>1403/09/19</text>
|
||||
<text><loc_358><loc_236><loc_414><loc_246>شريذپ خيرات</text>
|
||||
<text><loc_116><loc_235><loc_158><loc_243>1403/10/04</text>
|
||||
<text><loc_308><loc_249><loc_414><loc_259>هضرع هتيمک هسلج هرامش</text>
|
||||
<text><loc_130><loc_248><loc_144><loc_257>436</text>
|
||||
<text><loc_335><loc_263><loc_414><loc_273>همانديما جرد خيرات</text>
|
||||
<text><loc_116><loc_262><loc_158><loc_270>1403/10/05</text>
|
||||
<text><loc_355><loc_276><loc_414><loc_286>شريذپ رواشم</text>
|
||||
<text><loc_103><loc_275><loc_171><loc_283>سروب نومرآ یرازگراک</text>
|
||||
<text><loc_236><loc_291><loc_414><loc_314>رد لااک شريذپ زا سپ هياپ تميق نييعت ةوحن سروب</text>
|
||||
<text><loc_92><loc_290><loc_179><loc_298>یناهج یاه تميق ساسا رب</text>
|
||||
<text><loc_224><loc_317><loc_414><loc_340>شورف /شورف لک /ديلوت زا هضرع دصرد لقادح یلخاد</text>
|
||||
<text><loc_72><loc_316><loc_202><loc_325>نت 47.500 اي هنايلاس ديلوت زا %50 لقادح</text>
|
||||
<text><loc_340><loc_344><loc_414><loc_354>ليوحت زاجم یاطخ</text>
|
||||
<text><loc_90><loc_343><loc_184><loc_351>ليوحت لباق هلومحم نيرخآ 5%</text>
|
||||
<page_footer><loc_224><loc_463><loc_247><loc_469>Page 7</page_footer>
|
||||
</doctag>
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -1 +1 @@
|
||||
{"_name": "", "type": "pdf-document", "description": {"title": null, "abstract": null, "authors": null, "affiliations": null, "subjects": null, "keywords": null, "publication_date": null, "languages": null, "license": null, "publishers": null, "url_refs": null, "references": null, "publication": null, "reference_count": null, "citation_count": null, "citation_date": null, "advanced": null, "analytics": null, "logs": [], "collection": null, "acquisition": null}, "file-info": {"filename": "ocr_test.pdf", "filename-prov": null, "document-hash": "80f38f5b87a84870681556176a9622186fd200dd32c5557be9e0c0af05b8bc61", "#-pages": 1, "collection-name": null, "description": null, "page-hashes": [{"hash": "14d896dc8bcb7ee7c08c0347eb6be8dcb92a3782501992f1ea14d2e58077d4e3", "model": "default", "page": 1}]}, "main-text": [{"prov": [{"bbox": [69.6796646118164, 689.012451171875, 504.87200927734375, 764.9216918945312], "page": 1, "span": [0, 94], "__ref_s3_data": null}], "text": "Docling bundles PDF document conversion to JSON and Markdown in an easy self contained package", "type": "paragraph", "payload": null, "name": "Text", "font": null}], "figures": [], "tables": [], "bitmaps": null, "equations": [], "footnotes": [], "page-dimensions": [{"height": 841.9216918945312, "page": 1, "width": 595.201171875}], "page-footers": [], "page-headers": [], "_s3_data": null, "identifiers": null}
|
||||
{"_name": "", "type": "pdf-document", "description": {"title": null, "abstract": null, "authors": null, "affiliations": null, "subjects": null, "keywords": null, "publication_date": null, "languages": null, "license": null, "publishers": null, "url_refs": null, "references": null, "publication": null, "reference_count": null, "citation_count": null, "citation_date": null, "advanced": null, "analytics": null, "logs": [], "collection": null, "acquisition": null}, "file-info": {"filename": "ocr_test.pdf", "filename-prov": null, "document-hash": "80f38f5b87a84870681556176a9622186fd200dd32c5557be9e0c0af05b8bc61", "#-pages": 1, "collection-name": null, "description": null, "page-hashes": [{"hash": "14d896dc8bcb7ee7c08c0347eb6be8dcb92a3782501992f1ea14d2e58077d4e3", "model": "default", "page": 1}]}, "main-text": [{"prov": [{"bbox": [72.33333587646484, 691.58837890625, 503.3333435058594, 763.9216918945312], "page": 1, "span": [0, 94], "__ref_s3_data": null}], "text": "Docling bundles PDF document conversion to JSON and Markdown in an easy self contained package", "type": "paragraph", "payload": null, "name": "Text", "font": null}], "figures": [], "tables": [], "bitmaps": null, "equations": [], "footnotes": [], "page-dimensions": [{"height": 841.9216918945312, "page": 1, "width": 595.201171875}], "page-footers": [], "page-headers": [], "_s3_data": null, "identifiers": null}
|
@ -1 +1 @@
|
||||
[{"page_no": 0, "size": {"width": 595.201171875, "height": 841.9216918945312}, "cells": [{"id": 0, "text": "Docling bundles PDF document conversion to", "bbox": {"l": 73.34702132031646, "t": 76.99999977896756, "r": 503.64955224479564, "b": 97.99999977896755, "coord_origin": "TOPLEFT"}}, {"id": 1, "text": "JSON and Markdown in an easy self contained", "bbox": {"l": 69.6796630536824, "t": 104.00000011573796, "r": 504.8720051760782, "b": 124.83139494707741, "coord_origin": "TOPLEFT"}}, {"id": 2, "text": "package", "bbox": {"l": 71.84193505100733, "t": 129.797125232046, "r": 153.088934155825, "b": 152.90926970226084, "coord_origin": "TOPLEFT"}}], "predictions": {"layout": {"clusters": [{"id": 0, "label": "text", "bbox": {"l": 69.6796630536824, "t": 76.99999977896756, "r": 504.8720051760782, "b": 152.90926970226084, "coord_origin": "TOPLEFT"}, "confidence": 0.9715733528137207, "cells": [{"id": 0, "text": "Docling bundles PDF document conversion to", "bbox": {"l": 73.34702132031646, "t": 76.99999977896756, "r": 503.64955224479564, "b": 97.99999977896755, "coord_origin": "TOPLEFT"}}, {"id": 1, "text": "JSON and Markdown in an easy self contained", "bbox": {"l": 69.6796630536824, "t": 104.00000011573796, "r": 504.8720051760782, "b": 124.83139494707741, "coord_origin": "TOPLEFT"}}, {"id": 2, "text": "package", "bbox": {"l": 71.84193505100733, "t": 129.797125232046, "r": 153.088934155825, "b": 152.90926970226084, "coord_origin": "TOPLEFT"}}], "children": []}]}, "tablestructure": {"table_map": {}}, "figures_classification": null, "equations_prediction": null}, "assembled": {"elements": [{"label": "text", "id": 0, "page_no": 0, "cluster": {"id": 0, "label": "text", "bbox": {"l": 69.6796630536824, "t": 76.99999977896756, "r": 504.8720051760782, "b": 152.90926970226084, "coord_origin": "TOPLEFT"}, "confidence": 0.9715733528137207, "cells": [{"id": 0, "text": "Docling bundles PDF document conversion to", "bbox": {"l": 73.34702132031646, "t": 76.99999977896756, "r": 503.64955224479564, "b": 97.99999977896755, "coord_origin": "TOPLEFT"}}, {"id": 1, "text": "JSON and Markdown in an easy self contained", "bbox": {"l": 69.6796630536824, "t": 104.00000011573796, "r": 504.8720051760782, "b": 124.83139494707741, "coord_origin": "TOPLEFT"}}, {"id": 2, "text": "package", "bbox": {"l": 71.84193505100733, "t": 129.797125232046, "r": 153.088934155825, "b": 152.90926970226084, "coord_origin": "TOPLEFT"}}], "children": []}, "text": "Docling bundles PDF document conversion to JSON and Markdown in an easy self contained package"}], "body": [{"label": "text", "id": 0, "page_no": 0, "cluster": {"id": 0, "label": "text", "bbox": {"l": 69.6796630536824, "t": 76.99999977896756, "r": 504.8720051760782, "b": 152.90926970226084, "coord_origin": "TOPLEFT"}, "confidence": 0.9715733528137207, "cells": [{"id": 0, "text": "Docling bundles PDF document conversion to", "bbox": {"l": 73.34702132031646, "t": 76.99999977896756, "r": 503.64955224479564, "b": 97.99999977896755, "coord_origin": "TOPLEFT"}}, {"id": 1, "text": "JSON and Markdown in an easy self contained", "bbox": {"l": 69.6796630536824, "t": 104.00000011573796, "r": 504.8720051760782, "b": 124.83139494707741, "coord_origin": "TOPLEFT"}}, {"id": 2, "text": "package", "bbox": {"l": 71.84193505100733, "t": 129.797125232046, "r": 153.088934155825, "b": 152.90926970226084, "coord_origin": "TOPLEFT"}}], "children": []}, "text": "Docling bundles PDF document conversion to JSON and Markdown in an easy self contained package"}], "headers": []}}]
|
||||
[{"page_no": 0, "size": {"width": 595.201171875, "height": 841.9216918945312}, "cells": [{"id": 0, "text": "Docling bundles PDF document conversion to", "bbox": {"l": 74.0, "t": 78.0, "r": 503.3333333333333, "b": 96.66666666666667, "coord_origin": "TOPLEFT"}}, {"id": 1, "text": "JSON and Markdown in an easy self contained", "bbox": {"l": 72.33333333333333, "t": 104.66666666666667, "r": 503.3333333333333, "b": 123.33333333333333, "coord_origin": "TOPLEFT"}}, {"id": 2, "text": "package", "bbox": {"l": 73.66666666666667, "t": 131.66666666666666, "r": 150.66666666666666, "b": 150.33333333333334, "coord_origin": "TOPLEFT"}}], "predictions": {"layout": {"clusters": [{"id": 0, "label": "text", "bbox": {"l": 72.33333333333333, "t": 78.0, "r": 503.3333333333333, "b": 150.33333333333334, "coord_origin": "TOPLEFT"}, "confidence": 0.9715733528137207, "cells": [{"id": 0, "text": "Docling bundles PDF document conversion to", "bbox": {"l": 74.0, "t": 78.0, "r": 503.3333333333333, "b": 96.66666666666667, "coord_origin": "TOPLEFT"}}, {"id": 1, "text": "JSON and Markdown in an easy self contained", "bbox": {"l": 72.33333333333333, "t": 104.66666666666667, "r": 503.3333333333333, "b": 123.33333333333333, "coord_origin": "TOPLEFT"}}, {"id": 2, "text": "package", "bbox": {"l": 73.66666666666667, "t": 131.66666666666666, "r": 150.66666666666666, "b": 150.33333333333334, "coord_origin": "TOPLEFT"}}], "children": []}]}, "tablestructure": {"table_map": {}}, "figures_classification": null, "equations_prediction": null}, "assembled": {"elements": [{"label": "text", "id": 0, "page_no": 0, "cluster": {"id": 0, "label": "text", "bbox": {"l": 72.33333333333333, "t": 78.0, "r": 503.3333333333333, "b": 150.33333333333334, "coord_origin": "TOPLEFT"}, "confidence": 0.9715733528137207, "cells": [{"id": 0, "text": "Docling bundles PDF document conversion to", "bbox": {"l": 74.0, "t": 78.0, "r": 503.3333333333333, "b": 96.66666666666667, "coord_origin": "TOPLEFT"}}, {"id": 1, "text": "JSON and Markdown in an easy self contained", "bbox": {"l": 72.33333333333333, "t": 104.66666666666667, "r": 503.3333333333333, "b": 123.33333333333333, "coord_origin": "TOPLEFT"}}, {"id": 2, "text": "package", "bbox": {"l": 73.66666666666667, "t": 131.66666666666666, "r": 150.66666666666666, "b": 150.33333333333334, "coord_origin": "TOPLEFT"}}], "children": []}, "text": "Docling bundles PDF document conversion to JSON and Markdown in an easy self contained package"}], "body": [{"label": "text", "id": 0, "page_no": 0, "cluster": {"id": 0, "label": "text", "bbox": {"l": 72.33333333333333, "t": 78.0, "r": 503.3333333333333, "b": 150.33333333333334, "coord_origin": "TOPLEFT"}, "confidence": 0.9715733528137207, "cells": [{"id": 0, "text": "Docling bundles PDF document conversion to", "bbox": {"l": 74.0, "t": 78.0, "r": 503.3333333333333, "b": 96.66666666666667, "coord_origin": "TOPLEFT"}}, {"id": 1, "text": "JSON and Markdown in an easy self contained", "bbox": {"l": 72.33333333333333, "t": 104.66666666666667, "r": 503.3333333333333, "b": 123.33333333333333, "coord_origin": "TOPLEFT"}}, {"id": 2, "text": "package", "bbox": {"l": 73.66666666666667, "t": 131.66666666666666, "r": 150.66666666666666, "b": 150.33333333333334, "coord_origin": "TOPLEFT"}}], "children": []}, "text": "Docling bundles PDF document conversion to JSON and Markdown in an easy self contained package"}], "headers": []}}]
|
@ -1,3 +1,2 @@
|
||||
<document>
|
||||
<text><location><page_1><loc_12><loc_82><loc_85><loc_91></location>Docling bundles PDF document conversion to JSON and Markdown in an easy self contained package</text>
|
||||
</document>
|
||||
<doctag><text><loc_61><loc_46><loc_423><loc_89>Docling bundles PDF document conversion to JSON and Markdown in an easy self contained package</text>
|
||||
</doctag>
|
@ -1 +1 @@
|
||||
{"schema_name": "DoclingDocument", "version": "1.1.0", "name": "ocr_test", "origin": {"mimetype": "application/pdf", "binary_hash": 14853448746796404529, "filename": "ocr_test.pdf", "uri": null}, "furniture": {"self_ref": "#/furniture", "parent": null, "children": [], "content_layer": "furniture", "name": "_root_", "label": "unspecified"}, "body": {"self_ref": "#/body", "parent": null, "children": [{"cref": "#/texts/0"}], "content_layer": "body", "name": "_root_", "label": "unspecified"}, "groups": [], "texts": [{"self_ref": "#/texts/0", "parent": {"cref": "#/body"}, "children": [], "content_layer": "body", "label": "text", "prov": [{"page_no": 1, "bbox": {"l": 69.6796646118164, "t": 764.9216918945312, "r": 504.87200927734375, "b": 689.012451171875, "coord_origin": "BOTTOMLEFT"}, "charspan": [0, 94]}], "orig": "Docling bundles PDF document conversion to JSON and Markdown in an easy self contained package", "text": "Docling bundles PDF document conversion to JSON and Markdown in an easy self contained package"}], "pictures": [], "tables": [], "key_value_items": [], "pages": {"1": {"size": {"width": 595.201171875, "height": 841.9216918945312}, "image": null, "page_no": 1}}}
|
||||
{"schema_name": "DoclingDocument", "version": "1.1.0", "name": "ocr_test", "origin": {"mimetype": "application/pdf", "binary_hash": 14853448746796404529, "filename": "ocr_test.pdf", "uri": null}, "furniture": {"self_ref": "#/furniture", "parent": null, "children": [], "content_layer": "furniture", "name": "_root_", "label": "unspecified"}, "body": {"self_ref": "#/body", "parent": null, "children": [{"cref": "#/texts/0"}], "content_layer": "body", "name": "_root_", "label": "unspecified"}, "groups": [], "texts": [{"self_ref": "#/texts/0", "parent": {"cref": "#/body"}, "children": [], "content_layer": "body", "label": "text", "prov": [{"page_no": 1, "bbox": {"l": 72.33333587646484, "t": 763.9216918945312, "r": 503.3333435058594, "b": 691.58837890625, "coord_origin": "BOTTOMLEFT"}, "charspan": [0, 94]}], "orig": "Docling bundles PDF document conversion to JSON and Markdown in an easy self contained package", "text": "Docling bundles PDF document conversion to JSON and Markdown in an easy self contained package"}], "pictures": [], "tables": [], "key_value_items": [], "pages": {"1": {"size": {"width": 595.201171875, "height": 841.9216918945312}, "image": null, "page_no": 1}}}
|
@ -1 +1 @@
|
||||
[{"page_no": 0, "size": {"width": 595.201171875, "height": 841.9216918945312}, "cells": [{"id": 0, "text": "Docling bundles PDF document conversion to", "bbox": {"l": 73.34702132031646, "t": 76.99999977896756, "r": 503.64955224479564, "b": 97.99999977896755, "coord_origin": "TOPLEFT"}}, {"id": 1, "text": "JSON and Markdown in an easy self contained", "bbox": {"l": 69.6796630536824, "t": 104.00000011573796, "r": 504.8720051760782, "b": 124.83139494707741, "coord_origin": "TOPLEFT"}}, {"id": 2, "text": "package", "bbox": {"l": 71.84193505100733, "t": 129.797125232046, "r": 153.088934155825, "b": 152.90926970226084, "coord_origin": "TOPLEFT"}}], "predictions": {"layout": {"clusters": [{"id": 0, "label": "text", "bbox": {"l": 69.6796630536824, "t": 76.99999977896756, "r": 504.8720051760782, "b": 152.90926970226084, "coord_origin": "TOPLEFT"}, "confidence": 0.9715733528137207, "cells": [{"id": 0, "text": "Docling bundles PDF document conversion to", "bbox": {"l": 73.34702132031646, "t": 76.99999977896756, "r": 503.64955224479564, "b": 97.99999977896755, "coord_origin": "TOPLEFT"}}, {"id": 1, "text": "JSON and Markdown in an easy self contained", "bbox": {"l": 69.6796630536824, "t": 104.00000011573796, "r": 504.8720051760782, "b": 124.83139494707741, "coord_origin": "TOPLEFT"}}, {"id": 2, "text": "package", "bbox": {"l": 71.84193505100733, "t": 129.797125232046, "r": 153.088934155825, "b": 152.90926970226084, "coord_origin": "TOPLEFT"}}], "children": []}]}, "tablestructure": {"table_map": {}}, "figures_classification": null, "equations_prediction": null}, "assembled": {"elements": [{"label": "text", "id": 0, "page_no": 0, "cluster": {"id": 0, "label": "text", "bbox": {"l": 69.6796630536824, "t": 76.99999977896756, "r": 504.8720051760782, "b": 152.90926970226084, "coord_origin": "TOPLEFT"}, "confidence": 0.9715733528137207, "cells": [{"id": 0, "text": "Docling bundles PDF document conversion to", "bbox": {"l": 73.34702132031646, "t": 76.99999977896756, "r": 503.64955224479564, "b": 97.99999977896755, "coord_origin": "TOPLEFT"}}, {"id": 1, "text": "JSON and Markdown in an easy self contained", "bbox": {"l": 69.6796630536824, "t": 104.00000011573796, "r": 504.8720051760782, "b": 124.83139494707741, "coord_origin": "TOPLEFT"}}, {"id": 2, "text": "package", "bbox": {"l": 71.84193505100733, "t": 129.797125232046, "r": 153.088934155825, "b": 152.90926970226084, "coord_origin": "TOPLEFT"}}], "children": []}, "text": "Docling bundles PDF document conversion to JSON and Markdown in an easy self contained package"}], "body": [{"label": "text", "id": 0, "page_no": 0, "cluster": {"id": 0, "label": "text", "bbox": {"l": 69.6796630536824, "t": 76.99999977896756, "r": 504.8720051760782, "b": 152.90926970226084, "coord_origin": "TOPLEFT"}, "confidence": 0.9715733528137207, "cells": [{"id": 0, "text": "Docling bundles PDF document conversion to", "bbox": {"l": 73.34702132031646, "t": 76.99999977896756, "r": 503.64955224479564, "b": 97.99999977896755, "coord_origin": "TOPLEFT"}}, {"id": 1, "text": "JSON and Markdown in an easy self contained", "bbox": {"l": 69.6796630536824, "t": 104.00000011573796, "r": 504.8720051760782, "b": 124.83139494707741, "coord_origin": "TOPLEFT"}}, {"id": 2, "text": "package", "bbox": {"l": 71.84193505100733, "t": 129.797125232046, "r": 153.088934155825, "b": 152.90926970226084, "coord_origin": "TOPLEFT"}}], "children": []}, "text": "Docling bundles PDF document conversion to JSON and Markdown in an easy self contained package"}], "headers": []}}]
|
||||
[{"page_no": 0, "size": {"width": 595.201171875, "height": 841.9216918945312}, "cells": [{"id": 0, "text": "Docling bundles PDF document conversion to", "bbox": {"l": 74.0, "t": 78.0, "r": 503.3333333333333, "b": 96.66666666666667, "coord_origin": "TOPLEFT"}}, {"id": 1, "text": "JSON and Markdown in an easy self contained", "bbox": {"l": 72.33333333333333, "t": 104.66666666666667, "r": 503.3333333333333, "b": 123.33333333333333, "coord_origin": "TOPLEFT"}}, {"id": 2, "text": "package", "bbox": {"l": 73.66666666666667, "t": 131.66666666666666, "r": 150.66666666666666, "b": 150.33333333333334, "coord_origin": "TOPLEFT"}}], "predictions": {"layout": {"clusters": [{"id": 0, "label": "text", "bbox": {"l": 72.33333333333333, "t": 78.0, "r": 503.3333333333333, "b": 150.33333333333334, "coord_origin": "TOPLEFT"}, "confidence": 0.9715733528137207, "cells": [{"id": 0, "text": "Docling bundles PDF document conversion to", "bbox": {"l": 74.0, "t": 78.0, "r": 503.3333333333333, "b": 96.66666666666667, "coord_origin": "TOPLEFT"}}, {"id": 1, "text": "JSON and Markdown in an easy self contained", "bbox": {"l": 72.33333333333333, "t": 104.66666666666667, "r": 503.3333333333333, "b": 123.33333333333333, "coord_origin": "TOPLEFT"}}, {"id": 2, "text": "package", "bbox": {"l": 73.66666666666667, "t": 131.66666666666666, "r": 150.66666666666666, "b": 150.33333333333334, "coord_origin": "TOPLEFT"}}], "children": []}]}, "tablestructure": {"table_map": {}}, "figures_classification": null, "equations_prediction": null}, "assembled": {"elements": [{"label": "text", "id": 0, "page_no": 0, "cluster": {"id": 0, "label": "text", "bbox": {"l": 72.33333333333333, "t": 78.0, "r": 503.3333333333333, "b": 150.33333333333334, "coord_origin": "TOPLEFT"}, "confidence": 0.9715733528137207, "cells": [{"id": 0, "text": "Docling bundles PDF document conversion to", "bbox": {"l": 74.0, "t": 78.0, "r": 503.3333333333333, "b": 96.66666666666667, "coord_origin": "TOPLEFT"}}, {"id": 1, "text": "JSON and Markdown in an easy self contained", "bbox": {"l": 72.33333333333333, "t": 104.66666666666667, "r": 503.3333333333333, "b": 123.33333333333333, "coord_origin": "TOPLEFT"}}, {"id": 2, "text": "package", "bbox": {"l": 73.66666666666667, "t": 131.66666666666666, "r": 150.66666666666666, "b": 150.33333333333334, "coord_origin": "TOPLEFT"}}], "children": []}, "text": "Docling bundles PDF document conversion to JSON and Markdown in an easy self contained package"}], "body": [{"label": "text", "id": 0, "page_no": 0, "cluster": {"id": 0, "label": "text", "bbox": {"l": 72.33333333333333, "t": 78.0, "r": 503.3333333333333, "b": 150.33333333333334, "coord_origin": "TOPLEFT"}, "confidence": 0.9715733528137207, "cells": [{"id": 0, "text": "Docling bundles PDF document conversion to", "bbox": {"l": 74.0, "t": 78.0, "r": 503.3333333333333, "b": 96.66666666666667, "coord_origin": "TOPLEFT"}}, {"id": 1, "text": "JSON and Markdown in an easy self contained", "bbox": {"l": 72.33333333333333, "t": 104.66666666666667, "r": 503.3333333333333, "b": 123.33333333333333, "coord_origin": "TOPLEFT"}}, {"id": 2, "text": "package", "bbox": {"l": 73.66666666666667, "t": 131.66666666666666, "r": 150.66666666666666, "b": 150.33333333333334, "coord_origin": "TOPLEFT"}}], "children": []}, "text": "Docling bundles PDF document conversion to JSON and Markdown in an easy self contained package"}], "headers": []}}]
|
Loading…
Reference in New Issue
Block a user