feat: new torch-based docling models (#120)

---------

Signed-off-by: Maxim Lysak <mly@zurich.ibm.com>
Co-authored-by: Maxim Lysak <mly@zurich.ibm.com>
This commit is contained in:
Maxim Lysak 2024-10-03 18:42:33 +02:00 committed by GitHub
parent 9ebbbc1245
commit 2422f706a1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
30 changed files with 1159 additions and 1185 deletions

View File

@ -67,7 +67,10 @@ class DocumentConverter:
from huggingface_hub import snapshot_download
download_path = snapshot_download(
repo_id="ds4sd/docling-models", force_download=force, local_dir=local_dir
repo_id="ds4sd/docling-models",
force_download=force,
local_dir=local_dir,
revision="v2.0.0",
)
return Path(download_path)

View File

@ -33,6 +33,7 @@ class LayoutModel:
"Page-footer",
"Code",
"List-item",
# "Title"
# "Formula",
]
PAGE_HEADER_LABELS = ["Page-header", "Page-footer"]
@ -69,9 +70,7 @@ class LayoutModel:
"Key-Value Region": 0.45,
}
CLASS_REMAPPINGS = {
"Document Index": "Table",
}
CLASS_REMAPPINGS = {"Document Index": "Table", "Title": "Section-header"}
_log.debug("================= Start postprocess function ====================")
start_time = time.time()
@ -277,6 +276,7 @@ class LayoutModel:
bbox=BoundingBox.model_validate(pred_item),
cells=[],
)
clusters.append(cluster)
# Map cells to clusters

View File

@ -8,7 +8,7 @@ from docling.pipeline.base_model_pipeline import BaseModelPipeline
class StandardModelPipeline(BaseModelPipeline):
_layout_model_path = "model_artifacts/layout/beehive_v0.0.5"
_layout_model_path = "model_artifacts/layout/beehive_v0.0.5_pt"
_table_model_path = "model_artifacts/tableformer"
def __init__(self, artifacts_path: Path, pipeline_options: PipelineOptions):

112
poetry.lock generated
View File

@ -565,23 +565,6 @@ files = [
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
]
[[package]]
name = "coloredlogs"
version = "15.0.1"
description = "Colored terminal output for Python's logging module"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
files = [
{file = "coloredlogs-15.0.1-py2.py3-none-any.whl", hash = "sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934"},
{file = "coloredlogs-15.0.1.tar.gz", hash = "sha256:7c991aa71a4577af2f82600d8f8f3a89f936baeaf9b50a9c197da014e5bf16b0"},
]
[package.dependencies]
humanfriendly = ">=9.1"
[package.extras]
cron = ["capturer (>=2.4)"]
[[package]]
name = "comm"
version = "0.2.2"
@ -983,13 +966,13 @@ tabulate = ">=0.9.0,<0.10.0"
[[package]]
name = "docling-ibm-models"
version = "1.3.1"
version = "2.0.0"
description = "This package contains the AI models used by the Docling PDF conversion package"
optional = false
python-versions = "<4.0,>=3.10"
files = [
{file = "docling_ibm_models-1.3.1-py3-none-any.whl", hash = "sha256:1d9e15f2e79333e74847d2cfec573c2c3a5ecb969feb8f75a78768d474e29c16"},
{file = "docling_ibm_models-1.3.1.tar.gz", hash = "sha256:e7bf7c6eceab498e8828790aabe0968c293f4c51c3326a19483da3fb7abd367b"},
{file = "docling_ibm_models-2.0.0-py3-none-any.whl", hash = "sha256:ce40e70ef0dfd88bd80d4ee7c20b9d2f803b126f51ac171d9e38997942668dbc"},
{file = "docling_ibm_models-2.0.0.tar.gz", hash = "sha256:3db5e089ee138d2799acba21bfa8b6ac3e1cdbc2d50aeb608337934bdf24f69a"},
]
[package.dependencies]
@ -998,7 +981,6 @@ jsonlines = ">=3.1.0,<4.0.0"
lxml = ">=4.9.1,<5.0.0"
mean_average_precision = ">=2021.4.26.0,<2022.0.0.0"
numpy = ">=1.24.4,<2.0.0"
onnxruntime = ">=1.16.2,<2.0.0"
opencv-python-headless = ">=4.6.0.66,<5.0.0.0"
Pillow = ">=10.0.0,<11.0.0"
torch = [
@ -1216,17 +1198,6 @@ TOMLi = {version = "*", markers = "python_version < \"3.11\""}
[package.extras]
dev = ["pyTest", "pyTest-cov"]
[[package]]
name = "flatbuffers"
version = "24.3.25"
description = "The FlatBuffers serialization format for Python"
optional = false
python-versions = "*"
files = [
{file = "flatbuffers-24.3.25-py2.py3-none-any.whl", hash = "sha256:8dbdec58f935f3765e4f7f3cf635ac3a77f83568138d6a2311f524ec96364812"},
{file = "flatbuffers-24.3.25.tar.gz", hash = "sha256:de2ec5b203f21441716617f38443e0a8ebf3d25bf0d9c0bb0ce68fa00ad546a4"},
]
[[package]]
name = "fonttools"
version = "4.53.1"
@ -1692,20 +1663,6 @@ testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gr
torch = ["safetensors", "torch"]
typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"]
[[package]]
name = "humanfriendly"
version = "10.0"
description = "Human friendly output for text interfaces using Python"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
files = [
{file = "humanfriendly-10.0-py2.py3-none-any.whl", hash = "sha256:1697e1a8a8f550fd43c2865cd84542fc175a61dcb779b6fee18cf6b6ccba1477"},
{file = "humanfriendly-10.0.tar.gz", hash = "sha256:6b0b831ce8f15f7300721aa49829fc4e83921a9a301cc7f606be6686a2288ddc"},
]
[package.dependencies]
pyreadline3 = {version = "*", markers = "sys_platform == \"win32\" and python_version >= \"3.8\""}
[[package]]
name = "identify"
version = "2.6.1"
@ -3573,48 +3530,6 @@ files = [
{file = "nvidia_nvtx_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:65f4d98982b31b60026e0e6de73fbdfc09d08a96f4656dd3665ca616a11e1e82"},
]
[[package]]
name = "onnxruntime"
version = "1.19.2"
description = "ONNX Runtime is a runtime accelerator for Machine Learning models"
optional = false
python-versions = "*"
files = [
{file = "onnxruntime-1.19.2-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:84fa57369c06cadd3c2a538ae2a26d76d583e7c34bdecd5769d71ca5c0fc750e"},
{file = "onnxruntime-1.19.2-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bdc471a66df0c1cdef774accef69e9f2ca168c851ab5e4f2f3341512c7ef4666"},
{file = "onnxruntime-1.19.2-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e3a4ce906105d99ebbe817f536d50a91ed8a4d1592553f49b3c23c4be2560ae6"},
{file = "onnxruntime-1.19.2-cp310-cp310-win32.whl", hash = "sha256:4b3d723cc154c8ddeb9f6d0a8c0d6243774c6b5930847cc83170bfe4678fafb3"},
{file = "onnxruntime-1.19.2-cp310-cp310-win_amd64.whl", hash = "sha256:17ed7382d2c58d4b7354fb2b301ff30b9bf308a1c7eac9546449cd122d21cae5"},
{file = "onnxruntime-1.19.2-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:d863e8acdc7232d705d49e41087e10b274c42f09e259016a46f32c34e06dc4fd"},
{file = "onnxruntime-1.19.2-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c1dfe4f660a71b31caa81fc298a25f9612815215a47b286236e61d540350d7b6"},
{file = "onnxruntime-1.19.2-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a36511dc07c5c964b916697e42e366fa43c48cdb3d3503578d78cef30417cb84"},
{file = "onnxruntime-1.19.2-cp311-cp311-win32.whl", hash = "sha256:50cbb8dc69d6befad4746a69760e5b00cc3ff0a59c6c3fb27f8afa20e2cab7e7"},
{file = "onnxruntime-1.19.2-cp311-cp311-win_amd64.whl", hash = "sha256:1c3e5d415b78337fa0b1b75291e9ea9fb2a4c1f148eb5811e7212fed02cfffa8"},
{file = "onnxruntime-1.19.2-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:68e7051bef9cfefcbb858d2d2646536829894d72a4130c24019219442b1dd2ed"},
{file = "onnxruntime-1.19.2-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d2d366fbcc205ce68a8a3bde2185fd15c604d9645888703785b61ef174265168"},
{file = "onnxruntime-1.19.2-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:477b93df4db467e9cbf34051662a4b27c18e131fa1836e05974eae0d6e4cf29b"},
{file = "onnxruntime-1.19.2-cp312-cp312-win32.whl", hash = "sha256:9a174073dc5608fad05f7cf7f320b52e8035e73d80b0a23c80f840e5a97c0147"},
{file = "onnxruntime-1.19.2-cp312-cp312-win_amd64.whl", hash = "sha256:190103273ea4507638ffc31d66a980594b237874b65379e273125150eb044857"},
{file = "onnxruntime-1.19.2-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:636bc1d4cc051d40bc52e1f9da87fbb9c57d9d47164695dfb1c41646ea51ea66"},
{file = "onnxruntime-1.19.2-cp38-cp38-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5bd8b875757ea941cbcfe01582970cc299893d1b65bd56731e326a8333f638a3"},
{file = "onnxruntime-1.19.2-cp38-cp38-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b2046fc9560f97947bbc1acbe4c6d48585ef0f12742744307d3364b131ac5778"},
{file = "onnxruntime-1.19.2-cp38-cp38-win32.whl", hash = "sha256:31c12840b1cde4ac1f7d27d540c44e13e34f2345cf3642762d2a3333621abb6a"},
{file = "onnxruntime-1.19.2-cp38-cp38-win_amd64.whl", hash = "sha256:016229660adea180e9a32ce218b95f8f84860a200f0f13b50070d7d90e92956c"},
{file = "onnxruntime-1.19.2-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:006c8d326835c017a9e9f74c9c77ebb570a71174a1e89fe078b29a557d9c3848"},
{file = "onnxruntime-1.19.2-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:df2a94179a42d530b936f154615b54748239c2908ee44f0d722cb4df10670f68"},
{file = "onnxruntime-1.19.2-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fae4b4de45894b9ce7ae418c5484cbf0341db6813effec01bb2216091c52f7fb"},
{file = "onnxruntime-1.19.2-cp39-cp39-win32.whl", hash = "sha256:dc5430f473e8706fff837ae01323be9dcfddd3ea471c900a91fa7c9b807ec5d3"},
{file = "onnxruntime-1.19.2-cp39-cp39-win_amd64.whl", hash = "sha256:38475e29a95c5f6c62c2c603d69fc7d4c6ccbf4df602bd567b86ae1138881c49"},
]
[package.dependencies]
coloredlogs = "*"
flatbuffers = "*"
numpy = ">=1.21.6"
packaging = "*"
protobuf = "*"
sympy = "*"
[[package]]
name = "opencv-python-headless"
version = "4.6.0.66"
@ -4483,20 +4398,6 @@ files = [
flake8 = "6.1.0"
tomli = {version = "*", markers = "python_version < \"3.11\""}
[[package]]
name = "pyreadline3"
version = "3.5.3"
description = "A python implementation of GNU readline."
optional = false
python-versions = ">=3.8"
files = [
{file = "pyreadline3-3.5.3-py3-none-any.whl", hash = "sha256:ddede153a92e5aad9c1fe63d692efd6a3e478f686adcd4938a051ffb63ec4f52"},
{file = "pyreadline3-3.5.3.tar.gz", hash = "sha256:9234684ca75a00a702fda42b17cc26ca665bc9d7c2da06af450468253099ff61"},
]
[package.extras]
dev = ["build", "flake8", "mypy", "pytest", "twine"]
[[package]]
name = "pytest"
version = "7.4.4"
@ -6613,11 +6514,6 @@ files = [
{file = "triton-3.0.0-1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:34e509deb77f1c067d8640725ef00c5cbfcb2052a1a3cb6a6d343841f92624eb"},
{file = "triton-3.0.0-1-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:bcbf3b1c48af6a28011a5c40a5b3b9b5330530c3827716b5fbf6d7adcc1e53e9"},
{file = "triton-3.0.0-1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6e5727202f7078c56f91ff13ad0c1abab14a0e7f2c87e91b12b6f64f3e8ae609"},
{file = "triton-3.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39b052da883351fdf6be3d93cedae6db3b8e3988d3b09ed221bccecfa9612230"},
{file = "triton-3.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd34f19a8582af96e6291d4afce25dac08cb2a5d218c599163761e8e0827208e"},
{file = "triton-3.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d5e10de8c011adeb7c878c6ce0dd6073b14367749e34467f1cff2bde1b78253"},
{file = "triton-3.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8903767951bf86ec960b4fe4e21bc970055afc65e9d57e916d79ae3c93665e3"},
{file = "triton-3.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41004fb1ae9a53fcb3e970745feb87f0e3c94c6ce1ba86e95fa3b8537894bef7"},
]
[package.dependencies]
@ -7228,4 +7124,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.0"
python-versions = "^3.10"
content-hash = "b469f14e5b97a95009b9d02c2762b36040e05c167dd4b0c2e0bcc1dcb7631507"
content-hash = "7c5fb235944009b74193d045f36c1be2a8e168393012bf952541e6e7dea08072"

View File

@ -38,7 +38,7 @@ torchvision = [
python = "^3.10"
pydantic = "^2.0.0"
docling-core = "^1.6.2"
docling-ibm-models = "^1.3.1"
docling-ibm-models = "^2.0.0"
deepsearch-glm = "^0.22.0"
filetype = "^1.2.0"
pypdfium2 = "^4.30.0"

View File

@ -1,9 +1,9 @@
<document>
<subtitle-level-1><location><page_1><loc_16><loc_85><loc_82><loc_87></location>TableFormer: Table Structure Understanding with Transformers.</subtitle-level-1>
<subtitle-level-1><location><page_1><loc_23><loc_78><loc_74><loc_82></location>Ahmed Nassar, Nikolaos Livathinos, Maksym Lysak, Peter Staar IBM Research</subtitle-level-1>
<paragraph><location><page_1><loc_23><loc_78><loc_74><loc_82></location>Ahmed Nassar, Nikolaos Livathinos, Maksym Lysak, Peter Staar IBM Research</paragraph>
<paragraph><location><page_1><loc_34><loc_77><loc_62><loc_78></location>{ ahn,nli,mly,taa } @zurich.ibm.com</paragraph>
<subtitle-level-1><location><page_1><loc_24><loc_71><loc_31><loc_73></location>Abstract</subtitle-level-1>
<subtitle-level-1><location><page_1><loc_52><loc_71><loc_67><loc_73></location>a. Picture of a table:</subtitle-level-1>
<subtitle-level-1><location><page_1><loc_52><loc_71><loc_67><loc_72></location>a. Picture of a table:</subtitle-level-1>
<subtitle-level-1><location><page_1><loc_8><loc_30><loc_21><loc_32></location>1. Introduction</subtitle-level-1>
<paragraph><location><page_1><loc_8><loc_10><loc_47><loc_29></location>The occurrence of tables in documents is ubiquitous. They often summarise quantitative or factual data, which is cumbersome to describe in verbose text but nevertheless extremely valuable. Unfortunately, this compact representation is often not easy to parse by machines. There are many implicit conventions used to obtain a compact table representation. For example, tables often have complex columnand row-headers in order to reduce duplicated cell content. Lines of different shapes and sizes are leveraged to separate content or indicate a tree structure. Additionally, tables can also have empty/missing table-entries or multi-row textual table-entries. Fig. 1 shows a table which presents all these issues.</paragraph>
<paragraph><location><page_1><loc_8><loc_35><loc_47><loc_70></location>Tables organize valuable content in a concise and compact representation. This content is extremely valuable for systems such as search engines, Knowledge Graph's, etc, since they enhance their predictive capabilities. Unfortunately, tables come in a large variety of shapes and sizes. Furthermore, they can have complex column/row-header configurations, multiline rows, different variety of separation lines, missing entries, etc. As such, the correct identification of the table-structure from an image is a nontrivial task. In this paper, we present a new table-structure identification model. The latter improves the latest end-toend deep learning model (i.e. encoder-dual-decoder from PubTabNet) in two significant ways. First, we introduce a new object detection decoder for table-cells. In this way, we can obtain the content of the table-cells from programmatic PDF's directly from the PDF source and avoid the training of the custom OCR decoders. This architectural change leads to more accurate table-content extraction and allows us to tackle non-english tables. Second, we replace the LSTM decoders with transformer based decoders. This upgrade improves significantly the previous state-of-the-art tree-editing-distance-score (TEDS) from 91% to 98.5% on simple tables and from 88.7% to 95% on complex tables.</paragraph>
@ -13,7 +13,7 @@
<row_0><col_0><body></col_0><col_1><col_header>3</col_1><col_2><col_header>1</col_2></row_0>
<row_1><col_0><row_header>2</col_0><col_1><body></col_1><col_2><body></col_2></row_1>
</table>
<paragraph><location><page_1><loc_52><loc_58><loc_79><loc_61></location>b. Red-annotation of bounding boxes, Blue-predictions by TableFormer</paragraph>
<paragraph><location><page_1><loc_52><loc_58><loc_80><loc_60></location>b. Red-annotation of bounding boxes, Blue-predictions by TableFormer</paragraph>
<figure>
<location><page_1><loc_51><loc_48><loc_88><loc_57></location>
</figure>
@ -22,11 +22,11 @@
<table>
<location><page_1><loc_52><loc_37><loc_88><loc_45></location>
<caption>Figure 1: Picture of a table with subtle, complex features such as (1) multi-column headers, (2) cell with multi-row text and (3) cells with no content. Image from PubTabNet evaluation set, filename: 'PMC2944238 004 02'.</caption>
<row_0><col_0><col_header>0</col_0><col_1><col_header>1</col_1><col_2><col_header>1</col_2><col_3><col_header>2 1</col_3><col_4><col_header>2 1</col_4><col_5><body></col_5></row_0>
<row_1><col_0><body>3</col_0><col_1><body>4</col_1><col_2><body>5 3</col_2><col_3><body>6</col_3><col_4><body>7</col_4><col_5><body></col_5></row_1>
<row_2><col_0><body>8</col_0><col_1><body>9</col_1><col_2><body>10</col_2><col_3><body>11</col_3><col_4><body>12</col_4><col_5><body>2</col_5></row_2>
<row_3><col_0><body></col_0><col_1><body>13</col_1><col_2><body>14</col_2><col_3><body>15</col_3><col_4><body>16</col_4><col_5><body>2</col_5></row_3>
<row_4><col_0><body></col_0><col_1><body>17</col_1><col_2><body>18</col_2><col_3><body>19</col_3><col_4><body>20</col_4><col_5><body>2</col_5></row_4>
<row_0><col_0><col_header>0</col_0><col_1><col_header>1</col_1><col_2><col_header>1</col_2><col_3><col_header>2 1</col_3><col_4><col_header>2 1</col_4><col_5><col_header>2 1</col_5></row_0>
<row_1><col_0><body>3</col_0><col_1><body>4</col_1><col_2><body>5 3</col_2><col_3><body>6</col_3><col_4><body></col_4><col_5><body>7</col_5></row_1>
<row_2><col_0><body>8</col_0><col_1><body>9</col_1><col_2><body>10</col_2><col_3><body>11</col_3><col_4><body>12</col_4><col_5><body>16</col_5></row_2>
<row_3><col_0><body>2</col_0><col_1><body>13</col_1><col_2><body>14</col_2><col_3><body>15</col_3><col_4><body></col_4><col_5><body>16</col_5></row_3>
<row_4><col_0><body></col_0><col_1><body>17</col_1><col_2><body>18</col_2><col_3><body>19</col_3><col_4><body>20</col_4><col_5><body>16</col_5></row_4>
</table>
<paragraph><location><page_1><loc_50><loc_16><loc_89><loc_26></location>Recently, significant progress has been made with vision based approaches to extract tables in documents. For the sake of completeness, the issue of table extraction from documents is typically decomposed into two separate challenges, i.e. (1) finding the location of the table(s) on a document-page and (2) finding the structure of a given table in the document.</paragraph>
<paragraph><location><page_1><loc_50><loc_10><loc_89><loc_16></location>The first problem is called table-location and has been previously addressed [30, 38, 19, 21, 23, 26, 8] with stateof-the-art object-detection networks (e.g. YOLO and later on Mask-RCNN [9]). For all practical purposes, it can be</paragraph>
@ -77,8 +77,8 @@
</table>
<paragraph><location><page_4><loc_50><loc_63><loc_89><loc_69></location>one adopts a colorful appearance with high contrast and the last one contains tables with sparse content. Lastly, we have combined all synthetic datasets into one big unified synthetic dataset of 600k examples.</paragraph>
<paragraph><location><page_4><loc_52><loc_61><loc_89><loc_62></location>Tab. 1 summarizes the various attributes of the datasets.</paragraph>
<subtitle-level-1><location><page_4><loc_50><loc_58><loc_73><loc_60></location>4. The TableFormer model</subtitle-level-1>
<paragraph><location><page_4><loc_50><loc_43><loc_89><loc_57></location>Given the image of a table, TableFormer is able to predict: 1) a sequence of tokens that represent the structure of a table, and 2) a bounding box coupled to a subset of those tokens. The conversion of an image into a sequence of tokens is a well-known task [35, 16]. While attention is often used as an implicit method to associate each token of the sequence with a position in the original image, an explicit association between the individual table-cells and the image bounding boxes is also required.</paragraph>
<subtitle-level-1><location><page_4><loc_50><loc_58><loc_73><loc_59></location>4. The TableFormer model</subtitle-level-1>
<paragraph><location><page_4><loc_50><loc_44><loc_89><loc_57></location>Given the image of a table, TableFormer is able to predict: 1) a sequence of tokens that represent the structure of a table, and 2) a bounding box coupled to a subset of those tokens. The conversion of an image into a sequence of tokens is a well-known task [35, 16]. While attention is often used as an implicit method to associate each token of the sequence with a position in the original image, an explicit association between the individual table-cells and the image bounding boxes is also required.</paragraph>
<subtitle-level-1><location><page_4><loc_50><loc_41><loc_69><loc_42></location>4.1. Model architecture.</subtitle-level-1>
<paragraph><location><page_4><loc_50><loc_16><loc_89><loc_40></location>We now describe in detail the proposed method, which is composed of three main components, see Fig. 4. Our CNN Backbone Network encodes the input as a feature vector of predefined length. The input feature vector of the encoded image is passed to the Structure Decoder to produce a sequence of HTML tags that represent the structure of the table. With each prediction of an HTML standard data cell (' < td > ') the hidden state of that cell is passed to the Cell BBox Decoder. As for spanning cells, such as row or column span, the tag is broken down to ' < ', 'rowspan=' or 'colspan=', with the number of spanning cells (attribute), and ' > '. The hidden state attached to ' < ' is passed to the Cell BBox Decoder. A shared feed forward network (FFN) receives the hidden states from the Structure Decoder, to provide the final detection predictions of the bounding box coordinates and their classification.</paragraph>
<paragraph><location><page_4><loc_50><loc_10><loc_89><loc_16></location>CNN Backbone Network. A ResNet-18 CNN is the backbone that receives the table image and encodes it as a vector of predefined length. The network has been modified by removing the linear and pooling layer, as we are not per-</paragraph>
@ -89,18 +89,18 @@
</figure>
<caption><location><page_5><loc_8><loc_14><loc_47><loc_33></location>Figure 4: Given an input image of a table, the Encoder produces fixed-length features that represent the input image. The features are then passed to both the Structure Decoder and Cell BBox Decoder . During training, the Structure Decoder receives 'tokenized tags' of the HTML code that represent the table structure. Afterwards, a transformer encoder and decoder architecture is employed to produce features that are received by a linear layer, and the Cell BBox Decoder. The linear layer is applied to the features to predict the tags. Simultaneously, the Cell BBox Decoder selects features referring to the data cells (' < td > ', ' < ') and passes them through an attention network, an MLP, and a linear layer to predict the bounding boxes.</caption>
<figure>
<location><page_5><loc_9><loc_36><loc_47><loc_67></location>
<location><page_5><loc_9><loc_36><loc_47><loc_68></location>
<caption>Figure 4: Given an input image of a table, the Encoder produces fixed-length features that represent the input image. The features are then passed to both the Structure Decoder and Cell BBox Decoder . During training, the Structure Decoder receives 'tokenized tags' of the HTML code that represent the table structure. Afterwards, a transformer encoder and decoder architecture is employed to produce features that are received by a linear layer, and the Cell BBox Decoder. The linear layer is applied to the features to predict the tags. Simultaneously, the Cell BBox Decoder selects features referring to the data cells (' < td > ', ' < ') and passes them through an attention network, an MLP, and a linear layer to predict the bounding boxes.</caption>
</figure>
<paragraph><location><page_5><loc_50><loc_63><loc_89><loc_69></location>forming classification, and adding an adaptive pooling layer of size 28*28. ResNet by default downsamples the image resolution by 32 and then the encoded image is provided to both the Structure Decoder , and Cell BBox Decoder .</paragraph>
<paragraph><location><page_5><loc_50><loc_48><loc_89><loc_63></location>Structure Decoder. The transformer architecture of this component is based on the work proposed in [31]. After extensive experimentation, the Structure Decoder is modeled as a transformer encoder with two encoder layers and a transformer decoder made from a stack of 4 decoder layers that comprise mainly of multi-head attention and feed forward layers. This configuration uses fewer layers and heads in comparison to networks applied to other problems (e.g. "Scene Understanding", "Image Captioning"), something which we relate to the simplicity of table images.</paragraph>
<paragraph><location><page_5><loc_50><loc_48><loc_89><loc_62></location>Structure Decoder. The transformer architecture of this component is based on the work proposed in [31]. After extensive experimentation, the Structure Decoder is modeled as a transformer encoder with two encoder layers and a transformer decoder made from a stack of 4 decoder layers that comprise mainly of multi-head attention and feed forward layers. This configuration uses fewer layers and heads in comparison to networks applied to other problems (e.g. "Scene Understanding", "Image Captioning"), something which we relate to the simplicity of table images.</paragraph>
<paragraph><location><page_5><loc_50><loc_31><loc_89><loc_47></location>The transformer encoder receives an encoded image from the CNN Backbone Network and refines it through a multi-head dot-product attention layer, followed by a Feed Forward Network. During training, the transformer decoder receives as input the output feature produced by the transformer encoder, and the tokenized input of the HTML ground-truth tags. Using a stack of multi-head attention layers, different aspects of the tag sequence could be inferred. This is achieved by each attention head on a layer operating in a different subspace, and then combining altogether their attention score.</paragraph>
<paragraph><location><page_5><loc_50><loc_17><loc_89><loc_31></location>Cell BBox Decoder. Our architecture allows to simultaneously predict HTML tags and bounding boxes for each table cell without the need of a separate object detector end to end. This approach is inspired by DETR [1] which employs a Transformer Encoder, and Decoder that looks for a specific number of object queries (potential object detections). As our model utilizes a transformer architecture, the hidden state of the < td > ' and ' < ' HTML structure tags become the object query.</paragraph>
<paragraph><location><page_5><loc_50><loc_10><loc_89><loc_17></location>The encoding generated by the CNN Backbone Network along with the features acquired for every data cell from the Transformer Decoder are then passed to the attention network. The attention network takes both inputs and learns to provide an attention weighted encoding. This weighted at-</paragraph>
<paragraph><location><page_6><loc_8><loc_80><loc_47><loc_91></location>tention encoding is then multiplied to the encoded image to produce a feature for each table cell. Notice that this is different than the typical object detection problem where imbalances between the number of detections and the amount of objects may exist. In our case, we know up front that the produced detections always match with the table cells in number and correspondence.</paragraph>
<paragraph><location><page_6><loc_8><loc_70><loc_47><loc_80></location>The output features for each table cell are then fed into the feed-forward network (FFN). The FFN consists of a Multi-Layer Perceptron (3 layers with ReLU activation function) that predicts the normalized coordinates for the bounding box of each table cell. Finally, the predicted bounding boxes are classified based on whether they are empty or not using a linear layer.</paragraph>
<paragraph><location><page_6><loc_8><loc_44><loc_47><loc_69></location>Loss Functions. We formulate a multi-task loss Eq. 2 to train our network. The Cross-Entropy loss (denoted as l$_{s}$ ) is used to train the Structure Decoder which predicts the structure tokens. As for the Cell BBox Decoder it is trained with a combination of losses denoted as l$_{box}$ . l$_{box}$ consists of the generally used l$_{1}$ loss for object detection and the IoU loss ( l$_{iou}$ ) to be scale invariant as explained in [25]. In comparison to DETR, we do not use the Hungarian algorithm [15] to match the predicted bounding boxes with the ground-truth boxes, as we have already achieved a one-toone match through two steps: 1) Our token input sequence is naturally ordered, therefore the hidden states of the table data cells are also in order when they are provided as input to the Cell BBox Decoder , and 2) Our bounding boxes generation mechanism (see Sec. 3) ensures a one-to-one mapping between the cell content and its bounding box for all post-processed datasets.</paragraph>
<paragraph><location><page_6><loc_8><loc_41><loc_47><loc_44></location>The loss used to train the TableFormer can be defined as following:</paragraph>
<paragraph><location><page_6><loc_8><loc_41><loc_47><loc_43></location>The loss used to train the TableFormer can be defined as following:</paragraph>
<paragraph><location><page_6><loc_8><loc_32><loc_46><loc_33></location>where λ ∈ [0, 1], and λ$_{iou}$, λ$_{l}$$_{1}$ ∈$_{R}$ are hyper-parameters.</paragraph>
<subtitle-level-1><location><page_6><loc_8><loc_28><loc_28><loc_30></location>5. Experimental Results</subtitle-level-1>
<subtitle-level-1><location><page_6><loc_8><loc_26><loc_29><loc_27></location>5.1. Implementation Details</subtitle-level-1>
@ -118,10 +118,10 @@
<paragraph><location><page_7><loc_8><loc_73><loc_47><loc_77></location>where T$_{a}$ and T$_{b}$ represent tables in tree structure HTML format. EditDist denotes the tree-edit distance, and | T | represents the number of nodes in T .</paragraph>
<subtitle-level-1><location><page_7><loc_8><loc_70><loc_28><loc_72></location>5.4. Quantitative Analysis</subtitle-level-1>
<paragraph><location><page_7><loc_8><loc_50><loc_47><loc_69></location>Structure. As shown in Tab. 2, TableFormer outperforms all SOTA methods across different datasets by a large margin for predicting the table structure from an image. All the more, our model outperforms pre-trained methods. During the evaluation we do not apply any table filtering. We also provide our baseline results on the SynthTabNet dataset. It has been observed that large tables (e.g. tables that occupy half of the page or more) yield poor predictions. We attribute this issue to the image resizing during the preprocessing step, that produces downsampled images with indistinguishable features. This problem can be addressed by treating such big tables with a separate model which accepts a large input image size.</paragraph>
<caption><location><page_7><loc_8><loc_22><loc_47><loc_25></location>Table 2: Structure results on PubTabNet (PTN), FinTabNet (FTN), TableBank (TB) and SynthTabNet (STN).</caption>
<caption><location><page_7><loc_8><loc_21><loc_47><loc_25></location>Table 2: Structure results on PubTabNet (PTN), FinTabNet (FTN), TableBank (TB) and SynthTabNet (STN). FT: Model was trained on PubTabNet then finetuned.</caption>
<table>
<location><page_7><loc_9><loc_27><loc_46><loc_48></location>
<caption>Table 2: Structure results on PubTabNet (PTN), FinTabNet (FTN), TableBank (TB) and SynthTabNet (STN).</caption>
<caption>Table 2: Structure results on PubTabNet (PTN), FinTabNet (FTN), TableBank (TB) and SynthTabNet (STN). FT: Model was trained on PubTabNet then finetuned.</caption>
<row_0><col_0><col_header>Model</col_0><col_1><col_header>Dataset</col_1><col_2><col_header>Simple</col_2><col_3><col_header>TEDS Complex</col_3><col_4><col_header>All</col_4></row_0>
<row_1><col_0><row_header>EDD</col_0><col_1><body>PTN</col_1><col_2><body>91.1</col_2><col_3><body>88.7</col_3><col_4><body>89.9</col_4></row_1>
<row_2><col_0><row_header>GTE</col_0><col_1><body>PTN</col_1><col_2><body>-</col_2><col_3><body>-</col_3><col_4><body>93.01</col_4></row_2>
@ -134,7 +134,6 @@
<row_9><col_0><row_header>TableFormer</col_0><col_1><body>TB</col_1><col_2><body>89.6</col_2><col_3><body>-</col_3><col_4><body>89.6</col_4></row_9>
<row_10><col_0><row_header>TableFormer</col_0><col_1><body>STN</col_1><col_2><body>96.9</col_2><col_3><body>95.7</col_3><col_4><body>96.7</col_4></row_10>
</table>
<paragraph><location><page_7><loc_8><loc_21><loc_43><loc_22></location>FT: Model was trained on PubTabNet then finetuned.</paragraph>
<paragraph><location><page_7><loc_8><loc_10><loc_47><loc_19></location>Cell Detection. Like any object detector, our Cell BBox Detector provides bounding boxes that can be improved with post-processing during inference. We make use of the grid-like structure of tables to refine the predictions. A detailed explanation on the post-processing is available in the supplementary material. As shown in Tab. 3, we evaluate</paragraph>
<paragraph><location><page_7><loc_50><loc_71><loc_89><loc_91></location>our Cell BBox Decoder accuracy for cells with a class label of 'content' only using the PASCAL VOC mAP metric for pre-processing and post-processing. Note that we do not have post-processing results for SynthTabNet as images are only provided. To compare the performance of our proposed approach, we've integrated TableFormer's Cell BBox Decoder into EDD architecture. As mentioned previously, the Structure Decoder provides the Cell BBox Decoder with the features needed to predict the bounding box predictions. Therefore, the accuracy of the Structure Decoder directly influences the accuracy of the Cell BBox Decoder . If the Structure Decoder predicts an extra column, this will result in an extra column of predicted bounding boxes.</paragraph>
<caption><location><page_7><loc_50><loc_57><loc_89><loc_60></location>Table 3: Cell Bounding Box detection results on PubTabNet, and FinTabNet. PP: Post-processing.</caption>
@ -160,63 +159,27 @@
<row_6><col_0><row_header>TableFormer</col_0><col_1><body>95.4</col_1><col_2><body>90.1</col_2><col_3><body>93.6</col_3></row_6>
</table>
<paragraph><location><page_8><loc_9><loc_89><loc_82><loc_90></location>a. Red - PDF cells, Green - predicted bounding boxes, Blue - post-processed predictions matched to PDF cells</paragraph>
<paragraph><location><page_8><loc_9><loc_87><loc_46><loc_88></location>Japanese language (previously unseen by TableFormer):</paragraph>
<paragraph><location><page_8><loc_50><loc_87><loc_70><loc_88></location>Example table from FinTabNet:</paragraph>
<figure>
<location><page_8><loc_8><loc_76><loc_49><loc_87></location>
</figure>
<figure>
<location><page_8><loc_50><loc_77><loc_91><loc_87></location>
</figure>
<paragraph><location><page_8><loc_9><loc_73><loc_63><loc_74></location>b. Structure predicted by TableFormer, with superimposed matched PDF cell text:</paragraph>
<table>
<location><page_8><loc_9><loc_63><loc_49><loc_72></location>
<row_0><col_0><body></col_0><col_1><body></col_1><col_2><col_header>論文ファイル</col_2><col_3><col_header>論文ファイル</col_3><col_4><col_header>参考文献</col_4><col_5><col_header>参考文献</col_5></row_0>
<row_1><col_0><body>出典</col_0><col_1><col_header>ファイル 数</col_1><col_2><col_header>英語</col_2><col_3><col_header>日本語</col_3><col_4><col_header>英語</col_4><col_5><col_header>日本語</col_5></row_1>
<row_2><col_0><row_header>Association for Computational Linguistics(ACL2003)</col_0><col_1><body>65</col_1><col_2><body>65</col_2><col_3><body>0</col_3><col_4><body>150</col_4><col_5><body>0</col_5></row_2>
<row_3><col_0><row_header>Computational Linguistics(COLING2002)</col_0><col_1><body>140</col_1><col_2><body>140</col_2><col_3><body>0</col_3><col_4><body>150</col_4><col_5><body>0</col_5></row_3>
<row_4><col_0><row_header>電気情報通信学会 2003 年総合大会</col_0><col_1><body>150</col_1><col_2><body>8</col_2><col_3><body>142</col_3><col_4><body>223</col_4><col_5><body>147</col_5></row_4>
<row_5><col_0><row_header>情報処理学会第 65 回全国大会 (2003)</col_0><col_1><body>177</col_1><col_2><body>1</col_2><col_3><body>176</col_3><col_4><body>150</col_4><col_5><body>236</col_5></row_5>
<row_6><col_0><row_header>第 17 回人工知能学会全国大会 (2003)</col_0><col_1><body>208</col_1><col_2><body>5</col_2><col_3><body>203</col_3><col_4><body>152</col_4><col_5><body>244</col_5></row_6>
<row_7><col_0><row_header>自然言語処理研究会第 146 〜 155 回</col_0><col_1><body>98</col_1><col_2><body>2</col_2><col_3><body>96</col_3><col_4><body>150</col_4><col_5><body>232</col_5></row_7>
<row_8><col_0><row_header>WWW から収集した論文</col_0><col_1><body>107</col_1><col_2><body>73</col_2><col_3><body>34</col_3><col_4><body>147</col_4><col_5><body>96</col_5></row_8>
<row_9><col_0><body></col_0><col_1><body>945</col_1><col_2><body>294</col_2><col_3><body>651</col_3><col_4><body>1122</col_4><col_5><body>955</col_5></row_9>
</table>
<caption><location><page_8><loc_62><loc_62><loc_90><loc_63></location>Text is aligned to match original for ease of viewing</caption>
<table>
<location><page_8><loc_50><loc_64><loc_90><loc_72></location>
<figure>
<location><page_8><loc_8><loc_63><loc_90><loc_88></location>
<caption>Text is aligned to match original for ease of viewing</caption>
<row_0><col_0><body></col_0><col_1><col_header>Shares (in millions)</col_1><col_2><col_header>Shares (in millions)</col_2><col_3><col_header>Weighted Average Grant Date Fair Value</col_3><col_4><col_header>Weighted Average Grant Date Fair Value</col_4></row_0>
<row_1><col_0><body></col_0><col_1><col_header>RS U s</col_1><col_2><col_header>PSUs</col_2><col_3><col_header>RSUs</col_3><col_4><col_header>PSUs</col_4></row_1>
<row_2><col_0><row_header>Nonvested on Janua ry 1</col_0><col_1><body>1. 1</col_1><col_2><body>0.3</col_2><col_3><body>90.10 $</col_3><col_4><body>$ 91.19</col_4></row_2>
<row_3><col_0><row_header>Granted</col_0><col_1><body>0. 5</col_1><col_2><body>0.1</col_2><col_3><body>117.44</col_3><col_4><body>122.41</col_4></row_3>
<row_4><col_0><row_header>Vested</col_0><col_1><body>(0. 5 )</col_1><col_2><body>(0.1)</col_2><col_3><body>87.08</col_3><col_4><body>81.14</col_4></row_4>
<row_5><col_0><row_header>Canceled or forfeited</col_0><col_1><body>(0. 1 )</col_1><col_2><body>-</col_2><col_3><body>102.01</col_3><col_4><body>92.18</col_4></row_5>
<row_6><col_0><row_header>Nonvested on December 31</col_0><col_1><body>1.0</col_1><col_2><body>0.3</col_2><col_3><body>104.85 $</col_3><col_4><body>$ 104.51</col_4></row_6>
</table>
<caption><location><page_8><loc_8><loc_54><loc_89><loc_60></location>Figure 5: One of the benefits of TableFormer is that it is language agnostic, as an example, the left part of the illustration demonstrates TableFormer predictions on previously unseen language (Japanese). Additionally, we see that TableFormer is robust to variability in style and content, right side of the illustration shows the example of the TableFormer prediction from the FinTabNet dataset.</caption>
<figure>
<location><page_8><loc_8><loc_44><loc_35><loc_52></location>
<caption>Figure 5: One of the benefits of TableFormer is that it is language agnostic, as an example, the left part of the illustration demonstrates TableFormer predictions on previously unseen language (Japanese). Additionally, we see that TableFormer is robust to variability in style and content, right side of the illustration shows the example of the TableFormer prediction from the FinTabNet dataset.</caption>
</figure>
<figure>
<location><page_8><loc_62><loc_44><loc_89><loc_52></location>
</figure>
<paragraph><location><page_8><loc_8><loc_54><loc_89><loc_60></location>Figure 5: One of the benefits of TableFormer is that it is language agnostic, as an example, the left part of the illustration demonstrates TableFormer predictions on previously unseen language (Japanese). Additionally, we see that TableFormer is robust to variability in style and content, right side of the illustration shows the example of the TableFormer prediction from the FinTabNet dataset.</paragraph>
<caption><location><page_8><loc_10><loc_41><loc_87><loc_42></location>Figure 6: An example of TableFormer predictions (bounding boxes and structure) from generated SynthTabNet table.</caption>
<figure>
<location><page_8><loc_35><loc_44><loc_61><loc_52></location>
<location><page_8><loc_8><loc_44><loc_89><loc_52></location>
<caption>Figure 6: An example of TableFormer predictions (bounding boxes and structure) from generated SynthTabNet table.</caption>
</figure>
<subtitle-level-1><location><page_8><loc_8><loc_37><loc_27><loc_38></location>5.5. Qualitative Analysis</subtitle-level-1>
<paragraph><location><page_8><loc_8><loc_10><loc_47><loc_32></location>We showcase several visualizations for the different components of our network on various "complex" tables within datasets presented in this work in Fig. 5 and Fig. 6 As it is shown, our model is able to predict bounding boxes for all table cells, even for the empty ones. Additionally, our post-processing techniques can extract the cell content by matching the predicted bounding boxes to the PDF cells based on their overlap and spatial proximity. The left part of Fig. 5 demonstrates also the adaptability of our method to any language, as it can successfully extract Japanese text, although the training set contains only English content. We provide more visualizations including the intermediate steps in the supplementary material. Overall these illustrations justify the versatility of our method across a diverse range of table appearances and content type.</paragraph>
<subtitle-level-1><location><page_8><loc_50><loc_37><loc_75><loc_38></location>6. Future Work & Conclusion</subtitle-level-1>
<paragraph><location><page_8><loc_50><loc_18><loc_89><loc_35></location>In this paper, we presented TableFormer an end-to-end transformer based approach to predict table structures and bounding boxes of cells from an image. This approach enables us to recreate the table structure, and extract the cell content from PDF or OCR by using bounding boxes. Additionally, it provides the versatility required in real-world scenarios when dealing with various types of PDF documents, and languages. Furthermore, our method outperforms all state-of-the-arts with a wide margin. Finally, we introduce "SynthTabNet" a challenging synthetically generated dataset that reinforces missing characteristics from other datasets.</paragraph>
<paragraph><location><page_8><loc_50><loc_17><loc_89><loc_35></location>In this paper, we presented TableFormer an end-to-end transformer based approach to predict table structures and bounding boxes of cells from an image. This approach enables us to recreate the table structure, and extract the cell content from PDF or OCR by using bounding boxes. Additionally, it provides the versatility required in real-world scenarios when dealing with various types of PDF documents, and languages. Furthermore, our method outperforms all state-of-the-arts with a wide margin. Finally, we introduce "SynthTabNet" a challenging synthetically generated dataset that reinforces missing characteristics from other datasets.</paragraph>
<subtitle-level-1><location><page_8><loc_50><loc_14><loc_60><loc_15></location>References</subtitle-level-1>
<paragraph><location><page_8><loc_51><loc_10><loc_89><loc_12></location>[1] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-</paragraph>
<paragraph><location><page_9><loc_11><loc_85><loc_47><loc_91></location>end object detection with transformers. In Andrea Vedaldi, Horst Bischof, Thomas Brox, and Jan-Michael Frahm, editors, Computer Vision - ECCV 2020 , pages 213-229, Cham, 2020. Springer International Publishing. 5</paragraph>
<paragraph><location><page_9><loc_11><loc_85><loc_47><loc_90></location>end object detection with transformers. In Andrea Vedaldi, Horst Bischof, Thomas Brox, and Jan-Michael Frahm, editors, Computer Vision - ECCV 2020 , pages 213-229, Cham, 2020. Springer International Publishing. 5</paragraph>
<paragraph><location><page_9><loc_9><loc_81><loc_47><loc_85></location>[2] Zewen Chi, Heyan Huang, Heng-Da Xu, Houjin Yu, Wanxuan Yin, and Xian-Ling Mao. Complicated table structure recognition. arXiv preprint arXiv:1908.04729 , 2019. 3</paragraph>
<paragraph><location><page_9><loc_9><loc_77><loc_47><loc_81></location>[3] Bertrand Couasnon and Aurelie Lemaitre. Recognition of Tables and Forms , pages 647-677. Springer London, London, 2014. 2</paragraph>
<paragraph><location><page_9><loc_9><loc_71><loc_47><loc_77></location>[4] Herv'e D'ejean, Jean-Luc Meunier, Liangcai Gao, Yilun Huang, Yu Fang, Florian Kleber, and Eva-Maria Lang. ICDAR 2019 Competition on Table Detection and Recognition (cTDaR), Apr. 2019. http://sac.founderit.com/. 2</paragraph>
<paragraph><location><page_9><loc_9><loc_71><loc_47><loc_76></location>[4] Herv'e D'ejean, Jean-Luc Meunier, Liangcai Gao, Yilun Huang, Yu Fang, Florian Kleber, and Eva-Maria Lang. ICDAR 2019 Competition on Table Detection and Recognition (cTDaR), Apr. 2019. http://sac.founderit.com/. 2</paragraph>
<paragraph><location><page_9><loc_9><loc_66><loc_47><loc_71></location>[5] Basilios Gatos, Dimitrios Danatsas, Ioannis Pratikakis, and Stavros J Perantonis. Automatic table detection in document images. In International Conference on Pattern Recognition and Image Analysis , pages 609-618. Springer, 2005. 2</paragraph>
<paragraph><location><page_9><loc_9><loc_60><loc_47><loc_65></location>[6] Max Gobel, Tamir Hassan, Ermelinda Oro, and Giorgio Orsi. Icdar 2013 table competition. In 2013 12th International Conference on Document Analysis and Recognition , pages 1449-1453, 2013. 2</paragraph>
<paragraph><location><page_9><loc_9><loc_56><loc_47><loc_60></location>[7] EA Green and M Krishnamoorthy. Recognition of tables using table grammars. procs. In Symposium on Document Analysis and Recognition (SDAIR'95) , pages 261-277. 2</paragraph>
@ -228,22 +191,22 @@
<paragraph><location><page_9><loc_8><loc_18><loc_47><loc_25></location>[13] Thotreingam Kasar, Philippine Barlas, Sebastien Adam, Cl'ement Chatelain, and Thierry Paquet. Learning to detect tables in scanned document images using line information. In 2013 12th International Conference on Document Analysis and Recognition , pages 1185-1189. IEEE, 2013. 2</paragraph>
<paragraph><location><page_9><loc_8><loc_14><loc_47><loc_18></location>[14] Pratik Kayal, Mrinal Anand, Harsh Desai, and Mayank Singh. Icdar 2021 competition on scientific table image recognition to latex, 2021. 2</paragraph>
<paragraph><location><page_9><loc_8><loc_10><loc_47><loc_14></location>[15] Harold W Kuhn. The hungarian method for the assignment problem. Naval research logistics quarterly , 2(1-2):83-97, 1955. 6</paragraph>
<paragraph><location><page_9><loc_50><loc_82><loc_89><loc_91></location>[16] Girish Kulkarni, Visruth Premraj, Vicente Ordonez, Sagnik Dhar, Siming Li, Yejin Choi, Alexander C. Berg, and Tamara L. Berg. Babytalk: Understanding and generating simple image descriptions. IEEE Transactions on Pattern Analysis and Machine Intelligence , 35(12):2891-2903, 2013. 4</paragraph>
<paragraph><location><page_9><loc_50><loc_82><loc_89><loc_90></location>[16] Girish Kulkarni, Visruth Premraj, Vicente Ordonez, Sagnik Dhar, Siming Li, Yejin Choi, Alexander C. Berg, and Tamara L. Berg. Babytalk: Understanding and generating simple image descriptions. IEEE Transactions on Pattern Analysis and Machine Intelligence , 35(12):2891-2903, 2013. 4</paragraph>
<paragraph><location><page_9><loc_50><loc_78><loc_89><loc_82></location>[17] Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou, and Zhoujun Li. Tablebank: A benchmark dataset for table detection and recognition, 2019. 2, 3</paragraph>
<paragraph><location><page_9><loc_50><loc_67><loc_89><loc_78></location>[18] Yiren Li, Zheng Huang, Junchi Yan, Yi Zhou, Fan Ye, and Xianhui Liu. Gfte: Graph-based financial table extraction. In Alberto Del Bimbo, Rita Cucchiara, Stan Sclaroff, Giovanni Maria Farinella, Tao Mei, Marco Bertini, Hugo Jair Escalante, and Roberto Vezzani, editors, Pattern Recognition. ICPR International Workshops and Challenges , pages 644-658, Cham, 2021. Springer International Publishing. 2, 3</paragraph>
<paragraph><location><page_9><loc_50><loc_59><loc_89><loc_67></location>[19] Nikolaos Livathinos, Cesar Berrospi, Maksym Lysak, Viktor Kuropiatnyk, Ahmed Nassar, Andre Carvalho, Michele Dolfi, Christoph Auer, Kasper Dinkla, and Peter Staar. Robust pdf document conversion using recurrent neural networks. Proceedings of the AAAI Conference on Artificial Intelligence , 35(17):15137-15145, May 2021. 1</paragraph>
<paragraph><location><page_9><loc_50><loc_53><loc_89><loc_58></location>[20] Rujiao Long, Wen Wang, Nan Xue, Feiyu Gao, Zhibo Yang, Yongpan Wang, and Gui-Song Xia. Parsing table structures in the wild. In Proceedings of the IEEE/CVF International Conference on Computer Vision , pages 944-952, 2021. 2</paragraph>
<paragraph><location><page_9><loc_50><loc_45><loc_89><loc_53></location>[21] Shubham Singh Paliwal, D Vishwanath, Rohit Rahul, Monika Sharma, and Lovekesh Vig. Tablenet: Deep learning model for end-to-end table detection and tabular data extraction from scanned document images. In 2019 International Conference on Document Analysis and Recognition (ICDAR) , pages 128-133. IEEE, 2019. 1</paragraph>
<paragraph><location><page_9><loc_50><loc_30><loc_89><loc_45></location>[22] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alch'e-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems 32 , pages 8024-8035. Curran Associates, Inc., 2019. 6</paragraph>
<paragraph><location><page_9><loc_50><loc_30><loc_89><loc_44></location>[22] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alch'e-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems 32 , pages 8024-8035. Curran Associates, Inc., 2019. 6</paragraph>
<paragraph><location><page_9><loc_50><loc_21><loc_89><loc_29></location>[23] Devashish Prasad, Ayan Gadpal, Kshitij Kapadni, Manish Visave, and Kavita Sultanpure. Cascadetabnet: An approach for end to end table detection and structure recognition from image-based documents. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops , pages 572-573, 2020. 1</paragraph>
<paragraph><location><page_9><loc_50><loc_16><loc_89><loc_21></location>[24] Shah Rukh Qasim, Hassan Mahmood, and Faisal Shafait. Rethinking table recognition using graph neural networks. In 2019 International Conference on Document Analysis and Recognition (ICDAR) , pages 142-147. IEEE, 2019. 3</paragraph>
<paragraph><location><page_9><loc_50><loc_10><loc_89><loc_15></location>[25] Hamid Rezatofighi, Nathan Tsoi, JunYoung Gwak, Amir Sadeghian, Ian Reid, and Silvio Savarese. Generalized intersection over union: A metric and a loss for bounding box regression. In Proceedings of the IEEE/CVF Conference on</paragraph>
<paragraph><location><page_10><loc_11><loc_88><loc_47><loc_91></location>Computer Vision and Pattern Recognition , pages 658-666, 2019. 6</paragraph>
<paragraph><location><page_10><loc_11><loc_88><loc_47><loc_90></location>Computer Vision and Pattern Recognition , pages 658-666, 2019. 6</paragraph>
<paragraph><location><page_10><loc_8><loc_80><loc_47><loc_88></location>[26] Sebastian Schreiber, Stefan Agne, Ivo Wolf, Andreas Dengel, and Sheraz Ahmed. Deepdesrt: Deep learning for detection and structure recognition of tables in document images. In 2017 14th IAPR International Conference on Document Analysis and Recognition (ICDAR) , volume 01, pages 11621167, 2017. 1</paragraph>
<paragraph><location><page_10><loc_8><loc_71><loc_47><loc_79></location>[27] Sebastian Schreiber, Stefan Agne, Ivo Wolf, Andreas Dengel, and Sheraz Ahmed. Deepdesrt: Deep learning for detection and structure recognition of tables in document images. In 2017 14th IAPR international conference on document analysis and recognition (ICDAR) , volume 1, pages 1162-1167. IEEE, 2017. 3</paragraph>
<paragraph><location><page_10><loc_8><loc_66><loc_47><loc_71></location>[28] Faisal Shafait and Ray Smith. Table detection in heterogeneous documents. In Proceedings of the 9th IAPR International Workshop on Document Analysis Systems , pages 6572, 2010. 2</paragraph>
<paragraph><location><page_10><loc_8><loc_59><loc_47><loc_65></location>[29] Shoaib Ahmed Siddiqui, Imran Ali Fateh, Syed Tahseen Raza Rizvi, Andreas Dengel, and Sheraz Ahmed. Deeptabstr: Deep learning based table structure recognition. In 2019 International Conference on Document Analysis and Recognition (ICDAR) , pages 1403-1409. IEEE, 2019. 3</paragraph>
<paragraph><location><page_10><loc_8><loc_52><loc_47><loc_59></location>[30] Peter W J Staar, Michele Dolfi, Christoph Auer, and Costas Bekas. Corpus conversion service: A machine learning platform to ingest documents at scale. In Proceedings of the 24th ACM SIGKDD , KDD '18, pages 774-782, New York, NY, USA, 2018. ACM. 1</paragraph>
<paragraph><location><page_10><loc_8><loc_52><loc_47><loc_58></location>[30] Peter W J Staar, Michele Dolfi, Christoph Auer, and Costas Bekas. Corpus conversion service: A machine learning platform to ingest documents at scale. In Proceedings of the 24th ACM SIGKDD , KDD '18, pages 774-782, New York, NY, USA, 2018. ACM. 1</paragraph>
<paragraph><location><page_10><loc_8><loc_42><loc_47><loc_51></location>[31] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Ł ukasz Kaiser, and Illia Polosukhin. Attention is all you need. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett, editors, Advances in Neural Information Processing Systems 30 , pages 5998-6008. Curran Associates, Inc., 2017. 5</paragraph>
<paragraph><location><page_10><loc_8><loc_37><loc_47><loc_42></location>[32] Oriol Vinyals, Alexander Toshev, Samy Bengio, and Dumitru Erhan. Show and tell: A neural image caption generator. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) , June 2015. 2</paragraph>
<paragraph><location><page_10><loc_8><loc_31><loc_47><loc_36></location>[33] Wenyuan Xue, Qingyong Li, and Dacheng Tao. Res2tim: reconstruct syntactic structures from table images. In 2019 International Conference on Document Analysis and Recognition (ICDAR) , pages 749-755. IEEE, 2019. 3</paragraph>
@ -251,15 +214,14 @@
<paragraph><location><page_10><loc_8><loc_20><loc_47><loc_25></location>[35] Quanzeng You, Hailin Jin, Zhaowen Wang, Chen Fang, and Jiebo Luo. Image captioning with semantic attention. In Proceedings of the IEEE conference on computer vision and pattern recognition , pages 4651-4659, 2016. 4</paragraph>
<paragraph><location><page_10><loc_8><loc_13><loc_47><loc_19></location>[36] Xinyi Zheng, Doug Burdick, Lucian Popa, Peter Zhong, and Nancy Xin Ru Wang. Global table extractor (gte): A framework for joint table identification and cell structure recognition using visual context. Winter Conference for Applications in Computer Vision (WACV) , 2021. 2, 3</paragraph>
<paragraph><location><page_10><loc_8><loc_10><loc_47><loc_12></location>[37] Xu Zhong, Elaheh ShafieiBavani, and Antonio Jimeno Yepes. Image-based table recognition: Data, model,</paragraph>
<paragraph><location><page_10><loc_53><loc_85><loc_89><loc_91></location>and evaluation. In Andrea Vedaldi, Horst Bischof, Thomas Brox, and Jan-Michael Frahm, editors, Computer Vision ECCV 2020 , pages 564-580, Cham, 2020. Springer International Publishing. 2, 3, 7</paragraph>
<paragraph><location><page_10><loc_54><loc_85><loc_89><loc_90></location>and evaluation. In Andrea Vedaldi, Horst Bischof, Thomas Brox, and Jan-Michael Frahm, editors, Computer Vision ECCV 2020 , pages 564-580, Cham, 2020. Springer International Publishing. 2, 3, 7</paragraph>
<paragraph><location><page_10><loc_50><loc_80><loc_89><loc_85></location>[38] Xu Zhong, Jianbin Tang, and Antonio Jimeno Yepes. Publaynet: Largest dataset ever for document layout analysis. In 2019 International Conference on Document Analysis and Recognition (ICDAR) , pages 1015-1022, 2019. 1</paragraph>
<subtitle-level-1><location><page_11><loc_22><loc_85><loc_76><loc_86></location>TableFormer: Table Structure Understanding with Transformers</subtitle-level-1>
<paragraph><location><page_11><loc_36><loc_83><loc_61><loc_85></location>Supplementary Material</paragraph>
<subtitle-level-1><location><page_11><loc_22><loc_83><loc_76><loc_86></location>TableFormer: Table Structure Understanding with Transformers Supplementary Material</subtitle-level-1>
<subtitle-level-1><location><page_11><loc_8><loc_78><loc_29><loc_80></location>1. Details on the datasets</subtitle-level-1>
<subtitle-level-1><location><page_11><loc_8><loc_76><loc_25><loc_77></location>1.1. Data preparation</subtitle-level-1>
<paragraph><location><page_11><loc_8><loc_51><loc_47><loc_75></location>As a first step of our data preparation process, we have calculated statistics over the datasets across the following dimensions: (1) table size measured in the number of rows and columns, (2) complexity of the table, (3) strictness of the provided HTML structure and (4) completeness (i.e. no omitted bounding boxes). A table is considered to be simple if it does not contain row spans or column spans. Additionally, a table has a strict HTML structure if every row has the same number of columns after taking into account any row or column spans. Therefore a strict HTML structure looks always rectangular. However, HTML is a lenient encoding format, i.e. tables with rows of different sizes might still be regarded as correct due to implicit display rules. These implicit rules leave room for ambiguity, which we want to avoid. As such, we prefer to have "strict" tables, i.e. tables where every row has exactly the same length.</paragraph>
<paragraph><location><page_11><loc_8><loc_21><loc_47><loc_51></location>We have developed a technique that tries to derive a missing bounding box out of its neighbors. As a first step, we use the annotation data to generate the most fine-grained grid that covers the table structure. In case of strict HTML tables, all grid squares are associated with some table cell and in the presence of table spans a cell extends across multiple grid squares. When enough bounding boxes are known for a rectangular table, it is possible to compute the geometrical border lines between the grid rows and columns. Eventually this information is used to generate the missing bounding boxes. Additionally, the existence of unused grid squares indicates that the table rows have unequal number of columns and the overall structure is non-strict. The generation of missing bounding boxes for non-strict HTML tables is ambiguous and therefore quite challenging. Thus, we have decided to simply discard those tables. In case of PubTabNet we have computed missing bounding boxes for 48% of the simple and 69% of the complex tables. Regarding FinTabNet, 68% of the simple and 98% of the complex tables require the generation of bounding boxes.</paragraph>
<paragraph><location><page_11><loc_8><loc_18><loc_47><loc_21></location>Figure 7 illustrates the distribution of the tables across different dimensions per dataset.</paragraph>
<paragraph><location><page_11><loc_8><loc_18><loc_47><loc_20></location>Figure 7 illustrates the distribution of the tables across different dimensions per dataset.</paragraph>
<subtitle-level-1><location><page_11><loc_8><loc_15><loc_25><loc_16></location>1.2. Synthetic datasets</subtitle-level-1>
<paragraph><location><page_11><loc_8><loc_10><loc_47><loc_14></location>Aiming to train and evaluate our models in a broader spectrum of table data we have synthesized four types of datasets. Each one contains tables with different appear-</paragraph>
<paragraph><location><page_11><loc_50><loc_74><loc_89><loc_80></location>ances in regard to their size, structure, style and content. Every synthetic dataset contains 150k examples, summing up to 600k synthetic examples. All datasets are divided into Train, Test and Val splits (80%, 10%, 10%).</paragraph>
@ -273,7 +235,7 @@
<paragraph><location><page_11><loc_50><loc_10><loc_89><loc_17></location>Although TableFormer can predict the table structure and the bounding boxes for tables recognized inside PDF documents, this is not enough when a full reconstruction of the original table is required. This happens mainly due the following reasons:</paragraph>
<caption><location><page_12><loc_8><loc_76><loc_89><loc_79></location>Figure 7: Distribution of the tables across different dimensions per dataset. Simple vs complex tables per dataset and split, strict vs non strict html structures per dataset and table complexity, missing bboxes per dataset and table complexity.</caption>
<figure>
<location><page_12><loc_9><loc_81><loc_89><loc_91></location>
<location><page_12><loc_8><loc_81><loc_89><loc_91></location>
<caption>Figure 7: Distribution of the tables across different dimensions per dataset. Simple vs complex tables per dataset and split, strict vs non strict html structures per dataset and table complexity, missing bboxes per dataset and table complexity.</caption>
</figure>
<paragraph><location><page_12><loc_10><loc_71><loc_47><loc_73></location>· TableFormer output does not include the table cell content.</paragraph>
@ -297,52 +259,46 @@
<paragraph><location><page_12><loc_50><loc_16><loc_89><loc_20></location>9c. Compute the left and right boundary of the vertical band for each grid column (min/max x coordinates per column).</paragraph>
<paragraph><location><page_12><loc_50><loc_13><loc_89><loc_16></location>9d. Intersect the orphan's bounding box with the column bands, and map the cell to the closest grid column.</paragraph>
<paragraph><location><page_12><loc_50><loc_10><loc_89><loc_13></location>9e. If the table cell under the identified row and column is not empty, extend its content with the content of the or-</paragraph>
<subtitle-level-1><location><page_13><loc_8><loc_89><loc_15><loc_91></location>phan cell.</subtitle-level-1>
<paragraph><location><page_13><loc_8><loc_89><loc_15><loc_91></location>phan cell.</paragraph>
<paragraph><location><page_13><loc_8><loc_86><loc_47><loc_89></location>9f. Otherwise create a new structural cell and match it wit the orphan cell.</paragraph>
<paragraph><location><page_13><loc_8><loc_83><loc_47><loc_86></location>Aditional images with examples of TableFormer predictions and post-processing can be found below.</paragraph>
<paragraph><location><page_13><loc_10><loc_35><loc_45><loc_37></location>Figure 8: Example of a table with multi-line header.</paragraph>
<caption><location><page_13><loc_50><loc_59><loc_89><loc_61></location>Figure 9: Example of a table with big empty distance between cells.</caption>
<caption><location><page_13><loc_10><loc_35><loc_45><loc_37></location>Figure 8: Example of a table with multi-line header.</caption>
<figure>
<location><page_13><loc_51><loc_63><loc_91><loc_88></location>
<location><page_13><loc_14><loc_54><loc_39><loc_81></location>
<caption>Figure 8: Example of a table with multi-line header.</caption>
</figure>
<paragraph><location><page_13><loc_50><loc_59><loc_89><loc_61></location>Figure 9: Example of a table with big empty distance between cells.</paragraph>
<figure>
<location><page_13><loc_50><loc_63><loc_91><loc_88></location>
<caption>Figure 9: Example of a table with big empty distance between cells.</caption>
</figure>
<caption><location><page_13><loc_51><loc_13><loc_89><loc_14></location>Figure 10: Example of a complex table with empty cells.</caption>
<figure>
<location><page_13><loc_55><loc_16><loc_85><loc_25></location>
<location><page_13><loc_55><loc_16><loc_84><loc_52></location>
<caption>Figure 10: Example of a complex table with empty cells.</caption>
</figure>
<caption><location><page_14><loc_56><loc_13><loc_83><loc_14></location>Figure 14: Example with multi-line text.</caption>
<figure>
<location><page_14><loc_9><loc_68><loc_27><loc_73></location>
<location><page_14><loc_8><loc_68><loc_27><loc_73></location>
<caption>Figure 14: Example with multi-line text.</caption>
</figure>
<caption><location><page_14><loc_8><loc_52><loc_47><loc_55></location>Figure 11: Simple table with different style and empty cells.</caption>
<figure>
<location><page_14><loc_8><loc_57><loc_46><loc_66></location>
<caption>Figure 11: Simple table with different style and empty cells.</caption>
</figure>
<caption><location><page_14><loc_9><loc_14><loc_46><loc_15></location>Figure 12: Simple table predictions and post processing.</caption>
<figure>
<location><page_14><loc_8><loc_17><loc_29><loc_23></location>
<caption>Figure 12: Simple table predictions and post processing.</caption>
</figure>
<paragraph><location><page_14><loc_9><loc_14><loc_46><loc_15></location>Figure 12: Simple table predictions and post processing.</paragraph>
<caption><location><page_14><loc_52><loc_52><loc_88><loc_53></location>Figure 13: Table predictions example on colorful table.</caption>
<caption><location><page_14><loc_56><loc_13><loc_83><loc_14></location>Figure 14: Example with multi-line text.</caption>
<figure>
<location><page_14><loc_52><loc_25><loc_85><loc_31></location>
<caption>Figure 14: Example with multi-line text.</caption>
<location><page_14><loc_52><loc_55><loc_87><loc_89></location>
<caption>Figure 13: Table predictions example on colorful table.</caption>
</figure>
<caption><location><page_15><loc_50><loc_15><loc_89><loc_18></location>Figure 16: Example of how post-processing helps to restore mis-aligned bounding boxes prediction artifact.</caption>
<caption><location><page_15><loc_14><loc_18><loc_41><loc_19></location>Figure 15: Example with triangular table.</caption>
<figure>
<location><page_15><loc_9><loc_69><loc_46><loc_83></location>
<caption>Figure 16: Example of how post-processing helps to restore mis-aligned bounding boxes prediction artifact.</caption>
</figure>
<figure>
<location><page_15><loc_8><loc_20><loc_52><loc_36></location>
</figure>
<caption><location><page_15><loc_14><loc_17><loc_41><loc_19></location>Figure 15: Example with triangular table.</caption>
<figure>
<location><page_15><loc_58><loc_20><loc_81><loc_39></location>
<location><page_15><loc_8><loc_20><loc_86><loc_85></location>
<caption>Figure 15: Example with triangular table.</caption>
</figure>
<paragraph><location><page_15><loc_50><loc_15><loc_89><loc_18></location>Figure 16: Example of how post-processing helps to restore mis-aligned bounding boxes prediction artifact.</paragraph>
<caption><location><page_16><loc_8><loc_33><loc_89><loc_36></location>Figure 17: Example of long table. End-to-end example from initial PDF cells to prediction of bounding boxes, post processing and prediction of structure.</caption>
<figure>
<location><page_16><loc_11><loc_37><loc_86><loc_68></location>

File diff suppressed because one or more lines are too long

View File

@ -1,6 +1,6 @@
## TableFormer: Table Structure Understanding with Transformers.
## Ahmed Nassar, Nikolaos Livathinos, Maksym Lysak, Peter Staar IBM Research
Ahmed Nassar, Nikolaos Livathinos, Maksym Lysak, Peter Staar IBM Research
{ ahn,nli,mly,taa } @zurich.ibm.com
@ -27,12 +27,12 @@ b. Red-annotation of bounding boxes, Blue-predictions by TableFormer
c. Structure predicted by TableFormer:
Figure 1: Picture of a table with subtle, complex features such as (1) multi-column headers, (2) cell with multi-row text and (3) cells with no content. Image from PubTabNet evaluation set, filename: 'PMC2944238 004 02'.
| 0 | 1 | 1 | 2 1 | 2 1 | |
|-----|-----|-----|-------|-------|----|
| 3 | 4 | 5 3 | 6 | 7 | |
| 8 | 9 | 10 | 11 | 12 | 2 |
| | 13 | 14 | 15 | 16 | 2 |
| | 17 | 18 | 19 | 20 | 2 |
| 0 | 1 | 1 | 2 1 | 2 1 | 2 1 |
|-----|-----|-----|-------|-------|-------|
| 3 | 4 | 5 3 | 6 | | 7 |
| 8 | 9 | 10 | 11 | 12 | 16 |
| 2 | 13 | 14 | 15 | | 16 |
| | 17 | 18 | 19 | 20 | 16 |
Recently, significant progress has been made with vision based approaches to extract tables in documents. For the sake of completeness, the issue of table extraction from documents is typically decomposed into two separate challenges, i.e. (1) finding the location of the table(s) on a document-page and (2) finding the structure of a given table in the document.
@ -179,7 +179,7 @@ where T$_{a}$ and T$_{b}$ represent tables in tree structure HTML format. EditDi
Structure. As shown in Tab. 2, TableFormer outperforms all SOTA methods across different datasets by a large margin for predicting the table structure from an image. All the more, our model outperforms pre-trained methods. During the evaluation we do not apply any table filtering. We also provide our baseline results on the SynthTabNet dataset. It has been observed that large tables (e.g. tables that occupy half of the page or more) yield poor predictions. We attribute this issue to the image resizing during the preprocessing step, that produces downsampled images with indistinguishable features. This problem can be addressed by treating such big tables with a separate model which accepts a large input image size.
Table 2: Structure results on PubTabNet (PTN), FinTabNet (FTN), TableBank (TB) and SynthTabNet (STN).
Table 2: Structure results on PubTabNet (PTN), FinTabNet (FTN), TableBank (TB) and SynthTabNet (STN). FT: Model was trained on PubTabNet then finetuned.
| Model | Dataset | Simple | TEDS Complex | All |
|-------------|-----------|----------|----------------|-------|
| EDD | PTN | 91.1 | 88.7 | 89.9 |
@ -193,8 +193,6 @@ Table 2: Structure results on PubTabNet (PTN), FinTabNet (FTN), TableBank (TB) a
| TableFormer | TB | 89.6 | - | 89.6 |
| TableFormer | STN | 96.9 | 95.7 | 96.7 |
FT: Model was trained on PubTabNet then finetuned.
Cell Detection. Like any object detector, our Cell BBox Detector provides bounding boxes that can be improved with post-processing during inference. We make use of the grid-like structure of tables to refine the predictions. A detailed explanation on the post-processing is available in the supplementary material. As shown in Tab. 3, we evaluate
our Cell BBox Decoder accuracy for cells with a class label of 'content' only using the PASCAL VOC mAP metric for pre-processing and post-processing. Note that we do not have post-processing results for SynthTabNet as images are only provided. To compare the performance of our proposed approach, we've integrated TableFormer's Cell BBox Decoder into EDD architecture. As mentioned previously, the Structure Decoder provides the Cell BBox Decoder with the features needed to predict the bounding box predictions. Therefore, the accuracy of the Structure Decoder directly influences the accuracy of the Cell BBox Decoder . If the Structure Decoder predicts an extra column, this will result in an extra column of predicted bounding boxes.
@ -220,46 +218,10 @@ Table 4: Results of structure with content retrieved using cell detection on Pub
a. Red - PDF cells, Green - predicted bounding boxes, Blue - post-processed predictions matched to PDF cells
Japanese language (previously unseen by TableFormer):
Example table from FinTabNet:
<!-- image -->
<!-- image -->
b. Structure predicted by TableFormer, with superimposed matched PDF cell text:
| | | 論文ファイル | 論文ファイル | 参考文献 | 参考文献 |
|----------------------------------------------------|-------------|----------------|----------------|------------|------------|
| 出典 | ファイル 数 | 英語 | 日本語 | 英語 | 日本語 |
| Association for Computational Linguistics(ACL2003) | 65 | 65 | 0 | 150 | 0 |
| Computational Linguistics(COLING2002) | 140 | 140 | 0 | 150 | 0 |
| 電気情報通信学会 2003 年総合大会 | 150 | 8 | 142 | 223 | 147 |
| 情報処理学会第 65 回全国大会 (2003) | 177 | 1 | 176 | 150 | 236 |
| 第 17 回人工知能学会全国大会 (2003) | 208 | 5 | 203 | 152 | 244 |
| 自然言語処理研究会第 146 〜 155 回 | 98 | 2 | 96 | 150 | 232 |
| WWW から収集した論文 | 107 | 73 | 34 | 147 | 96 |
| | 945 | 294 | 651 | 1122 | 955 |
Text is aligned to match original for ease of viewing
| | Shares (in millions) | Shares (in millions) | Weighted Average Grant Date Fair Value | Weighted Average Grant Date Fair Value |
|--------------------------|------------------------|------------------------|------------------------------------------|------------------------------------------|
| | RS U s | PSUs | RSUs | PSUs |
| Nonvested on Janua ry 1 | 1. 1 | 0.3 | 90.10 $ | $ 91.19 |
| Granted | 0. 5 | 0.1 | 117.44 | 122.41 |
| Vested | (0. 5 ) | (0.1) | 87.08 | 81.14 |
| Canceled or forfeited | (0. 1 ) | - | 102.01 | 92.18 |
| Nonvested on December 31 | 1.0 | 0.3 | 104.85 $ | $ 104.51 |
<!-- image -->
Figure 5: One of the benefits of TableFormer is that it is language agnostic, as an example, the left part of the illustration demonstrates TableFormer predictions on previously unseen language (Japanese). Additionally, we see that TableFormer is robust to variability in style and content, right side of the illustration shows the example of the TableFormer prediction from the FinTabNet dataset.
<!-- image -->
<!-- image -->
Figure 6: An example of TableFormer predictions (bounding boxes and structure) from generated SynthTabNet table.
<!-- image -->
@ -356,9 +318,7 @@ and evaluation. In Andrea Vedaldi, Horst Bischof, Thomas Brox, and Jan-Michael F
[38] Xu Zhong, Jianbin Tang, and Antonio Jimeno Yepes. Publaynet: Largest dataset ever for document layout analysis. In 2019 International Conference on Document Analysis and Recognition (ICDAR) , pages 1015-1022, 2019. 1
## TableFormer: Table Structure Understanding with Transformers
Supplementary Material
## TableFormer: Table Structure Understanding with Transformers Supplementary Material
## 1. Details on the datasets
@ -437,13 +397,16 @@ dian cell size for all table cells. The usage of median during the computations,
9e. If the table cell under the identified row and column is not empty, extend its content with the content of the or-
## phan cell.
phan cell.
9f. Otherwise create a new structural cell and match it wit the orphan cell.
Aditional images with examples of TableFormer predictions and post-processing can be found below.
Figure 8: Example of a table with multi-line header.
<!-- image -->
Figure 9: Example of a table with big empty distance between cells.
Figure 9: Example of a table with big empty distance between cells.
<!-- image -->
@ -451,28 +414,21 @@ Figure 9: Example of a table with big empty distance between cells.
Figure 10: Example of a complex table with empty cells.
<!-- image -->
Figure 14: Example with multi-line text.
<!-- image -->
Figure 11: Simple table with different style and empty cells.
<!-- image -->
Figure 12: Simple table predictions and post processing.
<!-- image -->
Figure 13: Table predictions example on colorful table.
Figure 14: Example with multi-line text.
<!-- image -->
Figure 16: Example of how post-processing helps to restore mis-aligned bounding boxes prediction artifact.
<!-- image -->
<!-- image -->
Figure 15: Example with triangular table.
<!-- image -->
Figure 16: Example of how post-processing helps to restore mis-aligned bounding boxes prediction artifact.
Figure 17: Example of long table. End-to-end example from initial PDF cells to prediction of bounding boxes, post processing and prediction of structure.
<!-- image -->

File diff suppressed because one or more lines are too long

View File

@ -1,15 +1,14 @@
<document>
<subtitle-level-1><location><page_1><loc_18><loc_85><loc_83><loc_90></location>DocLayNet: A Large Human-Annotated Dataset for Document-Layout Analysis</subtitle-level-1>
<subtitle-level-1><location><page_1><loc_17><loc_85><loc_83><loc_89></location>DocLayNet: A Large Human-Annotated Dataset for Document-Layout Analysis</subtitle-level-1>
<paragraph><location><page_1><loc_15><loc_77><loc_32><loc_83></location>Birgit Pfitzmann IBM Research Rueschlikon, Switzerland bpf@zurich.ibm.com</paragraph>
<paragraph><location><page_1><loc_42><loc_77><loc_58><loc_83></location>Christoph Auer IBM Research Rueschlikon, Switzerland cau@zurich.ibm.com</paragraph>
<paragraph><location><page_1><loc_68><loc_77><loc_85><loc_83></location>Michele Dolfi IBM Research Rueschlikon, Switzerland dol@zurich.ibm.com</paragraph>
<paragraph><location><page_1><loc_28><loc_70><loc_45><loc_76></location>Ahmed S. Nassar IBM Research Rueschlikon, Switzerland ahn@zurich.ibm.com</paragraph>
<paragraph><location><page_1><loc_55><loc_70><loc_72><loc_76></location>Peter Staar IBM Research Rueschlikon, Switzerland taa@zurich.ibm.com</paragraph>
<subtitle-level-1><location><page_1><loc_9><loc_67><loc_18><loc_69></location>ABSTRACT</subtitle-level-1>
<paragraph><location><page_1><loc_9><loc_32><loc_48><loc_67></location>Accurate document layout analysis is a key requirement for highquality PDF document conversion. With the recent availability of public, large ground-truth datasets such as PubLayNet and DocBank, deep-learning models have proven to be very effective at layout detection and segmentation. While these datasets are of adequate size to train such models, they severely lack in layout variability since they are sourced from scientific article repositories such as PubMed and arXiv only. Consequently, the accuracy of the layout segmentation drops significantly when these models are applied on more challenging and diverse layouts. In this paper, we present DocLayNet , a new, publicly available, document-layout annotation dataset in COCO format. It contains 80863 manually annotated pages from diverse data sources to represent a wide variability in layouts. For each PDF page, the layout annotations provide labelled bounding-boxes with a choice of 11 distinct classes. DocLayNet also provides a subset of double- and triple-annotated pages to determine the inter-annotator agreement. In multiple experiments, we provide baseline accuracy scores (in mAP) for a set of popular object detection models. We also demonstrate that these models fall approximately 10% behind the inter-annotator agreement. Furthermore, we provide evidence that DocLayNet is of sufficient size. Lastly, we compare models trained on PubLayNet, DocBank and DocLayNet, showing that layout predictions of the DocLayNettrained models are more robust and thus the preferred choice for general-purpose document-layout analysis.</paragraph>
<paragraph><location><page_1><loc_9><loc_33><loc_48><loc_67></location>Accurate document layout analysis is a key requirement for highquality PDF document conversion. With the recent availability of public, large ground-truth datasets such as PubLayNet and DocBank, deep-learning models have proven to be very effective at layout detection and segmentation. While these datasets are of adequate size to train such models, they severely lack in layout variability since they are sourced from scientific article repositories such as PubMed and arXiv only. Consequently, the accuracy of the layout segmentation drops significantly when these models are applied on more challenging and diverse layouts. In this paper, we present DocLayNet , a new, publicly available, document-layout annotation dataset in COCO format. It contains 80863 manually annotated pages from diverse data sources to represent a wide variability in layouts. For each PDF page, the layout annotations provide labelled bounding-boxes with a choice of 11 distinct classes. DocLayNet also provides a subset of double- and triple-annotated pages to determine the inter-annotator agreement. In multiple experiments, we provide baseline accuracy scores (in mAP) for a set of popular object detection models. We also demonstrate that these models fall approximately 10% behind the inter-annotator agreement. Furthermore, we provide evidence that DocLayNet is of sufficient size. Lastly, we compare models trained on PubLayNet, DocBank and DocLayNet, showing that layout predictions of the DocLayNettrained models are more robust and thus the preferred choice for general-purpose document-layout analysis.</paragraph>
<subtitle-level-1><location><page_1><loc_9><loc_29><loc_22><loc_30></location>CCS CONCEPTS</subtitle-level-1>
<paragraph><location><page_1><loc_9><loc_25><loc_49><loc_29></location>· Information systems → Document structure ; · Applied computing → Document analysis ; · Computing methodologies → Machine learning ; Computer vision ; Object detection ;</paragraph>
<paragraph><location><page_1><loc_9><loc_15><loc_48><loc_20></location>Permission to make digital or hard copies of part or all of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for third-party components of this work must be honored. For all other uses, contact the owner/author(s).</paragraph>
<paragraph><location><page_1><loc_9><loc_11><loc_32><loc_15></location>KDD '22, August 14-18, 2022, Washington, DC, USA © 2022 Copyright held by the owner/author(s). ACM ISBN 978-1-4503-9385-0/22/08. https://doi.org/10.1145/3534678.3539043</paragraph>
<caption><location><page_1><loc_52><loc_29><loc_91><loc_32></location>Figure 1: Four examples of complex page layouts across different document categories</caption>
<figure>
@ -19,12 +18,11 @@
<subtitle-level-1><location><page_1><loc_52><loc_24><loc_62><loc_25></location>KEYWORDS</subtitle-level-1>
<paragraph><location><page_1><loc_52><loc_21><loc_91><loc_23></location>PDF document conversion, layout segmentation, object-detection, data set, Machine Learning</paragraph>
<subtitle-level-1><location><page_1><loc_52><loc_18><loc_66><loc_19></location>ACM Reference Format:</subtitle-level-1>
<paragraph><location><page_1><loc_52><loc_11><loc_91><loc_18></location>Birgit Pfitzmann, Christoph Auer, Michele Dolfi, Ahmed S. Nassar, and Peter Staar. 2022. DocLayNet: A Large Human-Annotated Dataset for DocumentLayout Analysis. In Proceedings of the 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining (KDD '22), August 14-18, 2022, Washington, DC, USA. ACM, New York, NY, USA, 9 pages. https://doi.org/10.1145/ 3534678.3539043</paragraph>
<subtitle-level-1><location><page_2><loc_9><loc_88><loc_26><loc_89></location>1 INTRODUCTION</subtitle-level-1>
<paragraph><location><page_2><loc_9><loc_71><loc_50><loc_86></location>Despite the substantial improvements achieved with machine-learning (ML) approaches and deep neural networks in recent years, document conversion remains a challenging problem, as demonstrated by the numerous public competitions held on this topic [1-4]. The challenge originates from the huge variability in PDF documents regarding layout, language and formats (scanned, programmatic or a combination of both). Engineering a single ML model that can be applied on all types of documents and provides high-quality layout segmentation remains to this day extremely challenging [5]. To highlight the variability in document layouts, we show a few example documents from the DocLayNet dataset in Figure 1.</paragraph>
<paragraph><location><page_2><loc_9><loc_37><loc_48><loc_71></location>A key problem in the process of document conversion is to understand the structure of a single document page, i.e. which segments of text should be grouped together in a unit. To train models for this task, there are currently two large datasets available to the community, PubLayNet [6] and DocBank [7]. They were introduced in 2019 and 2020 respectively and significantly accelerated the implementation of layout detection and segmentation models due to their sizes of 300K and 500K ground-truth pages. These sizes were achieved by leveraging an automation approach. The benefit of automated ground-truth generation is obvious: one can generate large ground-truth datasets at virtually no cost. However, the automation introduces a constraint on the variability in the dataset, because corresponding structured source data must be available. PubLayNet and DocBank were both generated from scientific document repositories (PubMed and arXiv), which provide XML or L A T E X sources. Those scientific documents present a limited variability in their layouts, because they are typeset in uniform templates provided by the publishers. Obviously, documents such as technical manuals, annual company reports, legal text, government tenders, etc. have very different and partially unique layouts. As a consequence, the layout predictions obtained from models trained on PubLayNet or DocBank is very reasonable when applied on scientific documents. However, for more artistic or free-style layouts, we see sub-par prediction quality from these models, which we demonstrate in Section 5.</paragraph>
<paragraph><location><page_2><loc_9><loc_36><loc_48><loc_71></location>A key problem in the process of document conversion is to understand the structure of a single document page, i.e. which segments of text should be grouped together in a unit. To train models for this task, there are currently two large datasets available to the community, PubLayNet [6] and DocBank [7]. They were introduced in 2019 and 2020 respectively and significantly accelerated the implementation of layout detection and segmentation models due to their sizes of 300K and 500K ground-truth pages. These sizes were achieved by leveraging an automation approach. The benefit of automated ground-truth generation is obvious: one can generate large ground-truth datasets at virtually no cost. However, the automation introduces a constraint on the variability in the dataset, because corresponding structured source data must be available. PubLayNet and DocBank were both generated from scientific document repositories (PubMed and arXiv), which provide XML or L A T E X sources. Those scientific documents present a limited variability in their layouts, because they are typeset in uniform templates provided by the publishers. Obviously, documents such as technical manuals, annual company reports, legal text, government tenders, etc. have very different and partially unique layouts. As a consequence, the layout predictions obtained from models trained on PubLayNet or DocBank is very reasonable when applied on scientific documents. However, for more artistic or free-style layouts, we see sub-par prediction quality from these models, which we demonstrate in Section 5.</paragraph>
<paragraph><location><page_2><loc_9><loc_27><loc_48><loc_36></location>In this paper, we present the DocLayNet dataset. It provides pageby-page layout annotation ground-truth using bounding-boxes for 11 distinct class labels on 80863 unique document pages, of which a fraction carry double- or triple-annotations. DocLayNet is similar in spirit to PubLayNet and DocBank and will likewise be made available to the public 1 in order to stimulate the document-layout analysis community. It distinguishes itself in the following aspects:</paragraph>
<paragraph><location><page_2><loc_11><loc_22><loc_48><loc_26></location>(1) Human Annotation : In contrast to PubLayNet and DocBank, we relied on human annotation instead of automation approaches to generate the data set.</paragraph>
<paragraph><location><page_2><loc_10><loc_22><loc_48><loc_26></location>(1) Human Annotation : In contrast to PubLayNet and DocBank, we relied on human annotation instead of automation approaches to generate the data set.</paragraph>
<paragraph><location><page_2><loc_11><loc_20><loc_48><loc_22></location>(2) Large Layout Variability : We include diverse and complex layouts from a large variety of public sources.</paragraph>
<paragraph><location><page_2><loc_10><loc_15><loc_48><loc_19></location>(3) Detailed Label Set : We define 11 class labels to distinguish layout features in high detail. PubLayNet provides 5 labels; DocBank provides 13, although not a superset of ours.</paragraph>
<paragraph><location><page_2><loc_11><loc_13><loc_48><loc_15></location>(4) Redundant Annotations : A fraction of the pages in the DocLayNet data set carry more than one human annotation.</paragraph>
@ -32,11 +30,11 @@
<paragraph><location><page_2><loc_54><loc_80><loc_91><loc_86></location>(5) Pre-defined Train-, Test- & Validation-set : Like DocBank, we provide fixed train-, test- & validation-sets to ensure proportional representation of the class-labels. Further, we prevent leakage of unique layouts across sets, which has a large effect on model accuracy scores.</paragraph>
<paragraph><location><page_2><loc_52><loc_72><loc_91><loc_79></location>All aspects outlined above are detailed in Section 3. In Section 4, we will elaborate on how we designed and executed this large-scale human annotation campaign. We will also share key insights and lessons learned that might prove helpful for other parties planning to set up annotation campaigns.</paragraph>
<paragraph><location><page_2><loc_52><loc_61><loc_91><loc_72></location>In Section 5, we will present baseline accuracy numbers for a variety of object detection methods (Faster R-CNN, Mask R-CNN and YOLOv5) trained on DocLayNet. We further show how the model performance is impacted by varying the DocLayNet dataset size, reducing the label set and modifying the train/test-split. Last but not least, we compare the performance of models trained on PubLayNet, DocBank and DocLayNet and demonstrate that a model trained on DocLayNet provides overall more robust layout recovery.</paragraph>
<subtitle-level-1><location><page_2><loc_52><loc_58><loc_69><loc_60></location>2 RELATED WORK</subtitle-level-1>
<subtitle-level-1><location><page_2><loc_52><loc_58><loc_69><loc_59></location>2 RELATED WORK</subtitle-level-1>
<paragraph><location><page_2><loc_52><loc_41><loc_91><loc_56></location>While early approaches in document-layout analysis used rulebased algorithms and heuristics [8], the problem is lately addressed with deep learning methods. The most common approach is to leverage object detection models [9-15]. In the last decade, the accuracy and speed of these models has increased dramatically. Furthermore, most state-of-the-art object detection methods can be trained and applied with very little work, thanks to a standardisation effort of the ground-truth data format [16] and common deep-learning frameworks [17]. Reference data sets such as PubLayNet [6] and DocBank provide their data in the commonly accepted COCO format [16].</paragraph>
<paragraph><location><page_2><loc_52><loc_30><loc_91><loc_41></location>Lately, new types of ML models for document-layout analysis have emerged in the community [18-21]. These models do not approach the problem of layout analysis purely based on an image representation of the page, as computer vision methods do. Instead, they combine the text tokens and image representation of a page in order to obtain a segmentation. While the reported accuracies appear to be promising, a broadly accepted data format which links geometric and textual features has yet to establish.</paragraph>
<subtitle-level-1><location><page_2><loc_52><loc_27><loc_78><loc_29></location>3 THE DOCLAYNET DATASET</subtitle-level-1>
<paragraph><location><page_2><loc_52><loc_15><loc_91><loc_26></location>DocLayNet contains 80863 PDF pages. Among these, 7059 carry two instances of human annotations, and 1591 carry three. This amounts to 91104 total annotation instances. The annotations provide layout information in the shape of labeled, rectangular boundingboxes. We define 11 distinct labels for layout features, namely Caption , Footnote , Formula , List-item , Page-footer , Page-header , Picture , Section-header , Table , Text , and Title . Our reasoning for picking this particular label set is detailed in Section 4.</paragraph>
<paragraph><location><page_2><loc_52><loc_15><loc_91><loc_25></location>DocLayNet contains 80863 PDF pages. Among these, 7059 carry two instances of human annotations, and 1591 carry three. This amounts to 91104 total annotation instances. The annotations provide layout information in the shape of labeled, rectangular boundingboxes. We define 11 distinct labels for layout features, namely Caption , Footnote , Formula , List-item , Page-footer , Page-header , Picture , Section-header , Table , Text , and Title . Our reasoning for picking this particular label set is detailed in Section 4.</paragraph>
<paragraph><location><page_2><loc_52><loc_11><loc_91><loc_14></location>In addition to open intellectual property constraints for the source documents, we required that the documents in DocLayNet adhere to a few conditions. Firstly, we kept scanned documents</paragraph>
<caption><location><page_3><loc_9><loc_68><loc_48><loc_70></location>Figure 2: Distribution of DocLayNet pages across document categories.</caption>
<figure>
@ -45,18 +43,19 @@
</figure>
<paragraph><location><page_3><loc_9><loc_54><loc_48><loc_64></location>to a minimum, since they introduce difficulties in annotation (see Section 4). As a second condition, we focussed on medium to large documents ( > 10 pages) with technical content, dense in complex tables, figures, plots and captions. Such documents carry a lot of information value, but are often hard to analyse with high accuracy due to their challenging layouts. Counterexamples of documents not included in the dataset are receipts, invoices, hand-written documents or photographs showing "text in the wild".</paragraph>
<paragraph><location><page_3><loc_9><loc_36><loc_48><loc_53></location>The pages in DocLayNet can be grouped into six distinct categories, namely Financial Reports , Manuals , Scientific Articles , Laws & Regulations , Patents and Government Tenders . Each document category was sourced from various repositories. For example, Financial Reports contain both free-style format annual reports 2 which expose company-specific, artistic layouts as well as the more formal SEC filings. The two largest categories ( Financial Reports and Manuals ) contain a large amount of free-style layouts in order to obtain maximum variability. In the other four categories, we boosted the variability by mixing documents from independent providers, such as different government websites or publishers. In Figure 2, we show the document categories contained in DocLayNet with their respective sizes.</paragraph>
<paragraph><location><page_3><loc_9><loc_23><loc_48><loc_36></location>We did not control the document selection with regard to language. The vast majority of documents contained in DocLayNet (close to 95%) are published in English language. However, DocLayNet also contains a number of documents in other languages such as German (2.5%), French (1.0%) and Japanese (1.0%). While the document language has negligible impact on the performance of computer vision methods such as object detection and segmentation models, it might prove challenging for layout analysis methods which exploit textual features.</paragraph>
<paragraph><location><page_3><loc_9><loc_13><loc_48><loc_23></location>To ensure that future benchmarks in the document-layout analysis community can be easily compared, we have split up DocLayNet into pre-defined train-, test- and validation-sets. In this way, we can avoid spurious variations in the evaluation scores due to random splitting in train-, test- and validation-sets. We also ensured that less frequent labels are represented in train and test sets in equal proportions.</paragraph>
<paragraph><location><page_3><loc_9><loc_23><loc_48><loc_35></location>We did not control the document selection with regard to language. The vast majority of documents contained in DocLayNet (close to 95%) are published in English language. However, DocLayNet also contains a number of documents in other languages such as German (2.5%), French (1.0%) and Japanese (1.0%). While the document language has negligible impact on the performance of computer vision methods such as object detection and segmentation models, it might prove challenging for layout analysis methods which exploit textual features.</paragraph>
<paragraph><location><page_3><loc_9><loc_14><loc_48><loc_23></location>To ensure that future benchmarks in the document-layout analysis community can be easily compared, we have split up DocLayNet into pre-defined train-, test- and validation-sets. In this way, we can avoid spurious variations in the evaluation scores due to random splitting in train-, test- and validation-sets. We also ensured that less frequent labels are represented in train and test sets in equal proportions.</paragraph>
<paragraph><location><page_3><loc_52><loc_80><loc_91><loc_89></location>Table 1 shows the overall frequency and distribution of the labels among the different sets. Importantly, we ensure that subsets are only split on full-document boundaries. This avoids that pages of the same document are spread over train, test and validation set, which can give an undesired evaluation advantage to models and lead to overestimation of their prediction accuracy. We will show the impact of this decision in Section 5.</paragraph>
<paragraph><location><page_3><loc_52><loc_66><loc_91><loc_79></location>In order to accommodate the different types of models currently in use by the community, we provide DocLayNet in an augmented COCO format [16]. This entails the standard COCO ground-truth file (in JSON format) with the associated page images (in PNG format, 1025 × 1025 pixels). Furthermore, custom fields have been added to each COCO record to specify document category, original document filename and page number. In addition, we also provide the original PDF pages, as well as sidecar files containing parsed PDF text and text-cell coordinates (in JSON). All additional files are linked to the primary page images by their matching filenames.</paragraph>
<paragraph><location><page_3><loc_52><loc_26><loc_91><loc_65></location>Despite being cost-intense and far less scalable than automation, human annotation has several benefits over automated groundtruth generation. The first and most obvious reason to leverage human annotations is the freedom to annotate any type of document without requiring a programmatic source. For most PDF documents, the original source document is not available. The latter is not a hard constraint with human annotation, but it is for automated methods. A second reason to use human annotations is that the latter usually provide a more natural interpretation of the page layout. The human-interpreted layout can significantly deviate from the programmatic layout used in typesetting. For example, "invisible" tables might be used solely for aligning text paragraphs on columns. Such typesetting tricks might be interpreted by automated methods incorrectly as an actual table, while the human annotation will interpret it correctly as Text or other styles. The same applies to multi-line text elements, when authors decided to space them as "invisible" list elements without bullet symbols. A third reason to gather ground-truth through human annotation is to estimate a "natural" upper bound on the segmentation accuracy. As we will show in Section 4, certain documents featuring complex layouts can have different but equally acceptable layout interpretations. This natural upper bound for segmentation accuracy can be found by annotating the same pages multiple times by different people and evaluating the inter-annotator agreement. Such a baseline consistency evaluation is very useful to define expectations for a good target accuracy in trained deep neural network models and avoid overfitting (see Table 1). On the flip side, achieving high annotation consistency proved to be a key challenge in human annotation, as we outline in Section 4.</paragraph>
<subtitle-level-1><location><page_3><loc_52><loc_22><loc_77><loc_23></location>4 ANNOTATION CAMPAIGN</subtitle-level-1>
<paragraph><location><page_3><loc_52><loc_11><loc_91><loc_20></location>The annotation campaign was carried out in four phases. In phase one, we identified and prepared the data sources for annotation. In phase two, we determined the class labels and how annotations should be done on the documents in order to obtain maximum consistency. The latter was guided by a detailed requirement analysis and exhaustive experiments. In phase three, we trained the annotation staff and performed exams for quality assurance. In phase four,</paragraph>
<paragraph><location><page_4><loc_9><loc_91><loc_91><loc_92></location>KDD '22, August 14-18, 2022, Washington, DC, USA Birgit Pfitzmann, Christoph Auer, Michele Dolfi, Ahmed S. Nassar, and Peter Staar</paragraph>
<caption><location><page_4><loc_9><loc_85><loc_91><loc_89></location>Table 1: DocLayNet dataset overview. Along with the frequency of each class label, we present the relative occurrence (as % of row "Total") in the train, test and validation sets. The inter-annotator agreement is computed as the mAP@0.5-0.95 metric between pairwise annotations from the triple-annotated pages, from which we obtain accuracy ranges.</caption>
<table>
<location><page_4><loc_16><loc_63><loc_84><loc_83></location>
<caption>Table 1: DocLayNet dataset overview. Along with the frequency of each class label, we present the relative occurrence (as % of row "Total") in the train, test and validation sets. The inter-annotator agreement is computed as the mAP@0.5-0.95 metric between pairwise annotations from the triple-annotated pages, from which we obtain accuracy ranges.</caption>
<row_0><col_0><body></col_0><col_1><body></col_1><col_2><col_header>% of Total</col_2><col_3><col_header>% of Total</col_3><col_4><col_header>% of Total</col_4><col_5><col_header>triple inter-annotator mAP @ 0.5-0.95 (%)</col_5><col_6><col_header>triple inter-annotator mAP @ 0.5-0.95 (%)</col_6><col_7><col_header>triple inter-annotator mAP @ 0.5-0.95 (%)</col_7><col_8><col_header>triple inter-annotator mAP @ 0.5-0.95 (%)</col_8><col_9><col_header>triple inter-annotator mAP @ 0.5-0.95 (%)</col_9><col_10><col_header>triple inter-annotator mAP @ 0.5-0.95 (%)</col_10><col_11><col_header>triple inter-annotator mAP @ 0.5-0.95 (%)</col_11></row_0>
<row_0><col_0><body></col_0><col_1><body></col_1><col_2><col_header>% of Total</col_2><col_3><col_header>% of Total</col_3><col_4><col_header>% of Total</col_4><col_5><col_header>% of Total</col_5><col_6><col_header>triple inter-annotator mAP @ 0.5-0.95 (%)</col_6><col_7><col_header>triple inter-annotator mAP @ 0.5-0.95 (%)</col_7><col_8><col_header>triple inter-annotator mAP @ 0.5-0.95 (%)</col_8><col_9><col_header>triple inter-annotator mAP @ 0.5-0.95 (%)</col_9><col_10><col_header>triple inter-annotator mAP @ 0.5-0.95 (%)</col_10><col_11><col_header>triple inter-annotator mAP @ 0.5-0.95 (%)</col_11></row_0>
<row_1><col_0><col_header>class label</col_0><col_1><col_header>Count</col_1><col_2><col_header>Train</col_2><col_3><col_header>Test</col_3><col_4><col_header>Val</col_4><col_5><col_header>All</col_5><col_6><col_header>Fin</col_6><col_7><col_header>Man</col_7><col_8><col_header>Sci</col_8><col_9><col_header>Law</col_9><col_10><col_header>Pat</col_10><col_11><col_header>Ten</col_11></row_1>
<row_2><col_0><row_header>Caption</col_0><col_1><body>22524</col_1><col_2><body>2.04</col_2><col_3><body>1.77</col_3><col_4><body>2.32</col_4><col_5><body>84-89</col_5><col_6><body>40-61</col_6><col_7><body>86-92</col_7><col_8><body>94-99</col_8><col_9><body>95-99</col_9><col_10><body>69-78</col_10><col_11><body>n/a</col_11></row_2>
<row_3><col_0><row_header>Footnote</col_0><col_1><body>6318</col_1><col_2><body>0.60</col_2><col_3><body>0.31</col_3><col_4><body>0.58</col_4><col_5><body>83-91</col_5><col_6><body>n/a</col_6><col_7><body>100</col_7><col_8><body>62-88</col_8><col_9><body>85-94</col_9><col_10><body>n/a</col_10><col_11><body>82-97</col_11></row_3>
@ -71,7 +70,7 @@
<row_12><col_0><row_header>Title</col_0><col_1><body>5071</col_1><col_2><body>0.47</col_2><col_3><body>0.30</col_3><col_4><body>0.50</col_4><col_5><body>60-72</col_5><col_6><body>24-63</col_6><col_7><body>50-63</col_7><col_8><body>94-100</col_8><col_9><body>82-96</col_9><col_10><body>68-79</col_10><col_11><body>24-56</col_11></row_12>
<row_13><col_0><row_header>Total</col_0><col_1><body>1107470</col_1><col_2><body>941123</col_2><col_3><body>99816</col_3><col_4><body>66531</col_4><col_5><body>82-83</col_5><col_6><body>71-74</col_6><col_7><body>79-81</col_7><col_8><body>89-94</col_8><col_9><body>86-91</col_9><col_10><body>71-76</col_10><col_11><body>68-85</col_11></row_13>
</table>
<caption><location><page_4><loc_9><loc_23><loc_48><loc_30></location>Figure 3: Corpus Conversion Service annotation user interface. The PDF page is shown in the background, with overlaid text-cells (in darker shades). The annotation boxes can be drawn by dragging a rectangle over each segment with the respective label from the palette on the right.</caption>
<paragraph><location><page_4><loc_9><loc_23><loc_48><loc_30></location>Figure 3: Corpus Conversion Service annotation user interface. The PDF page is shown in the background, with overlaid text-cells (in darker shades). The annotation boxes can be drawn by dragging a rectangle over each segment with the respective label from the palette on the right.</paragraph>
<figure>
<location><page_4><loc_9><loc_32><loc_48><loc_61></location>
<caption>Figure 3: Corpus Conversion Service annotation user interface. The PDF page is shown in the background, with overlaid text-cells (in darker shades). The annotation boxes can be drawn by dragging a rectangle over each segment with the respective label from the palette on the right.</caption>
@ -81,20 +80,21 @@
<paragraph><location><page_4><loc_52><loc_53><loc_91><loc_61></location>include publication repositories such as arXiv$^{3}$, government offices, company websites as well as data directory services for financial reports and patents. Scanned documents were excluded wherever possible because they can be rotated or skewed. This would not allow us to perform annotation with rectangular bounding-boxes and therefore complicate the annotation process.</paragraph>
<paragraph><location><page_4><loc_52><loc_36><loc_91><loc_52></location>Preparation work included uploading and parsing the sourced PDF documents in the Corpus Conversion Service (CCS) [22], a cloud-native platform which provides a visual annotation interface and allows for dataset inspection and analysis. The annotation interface of CCS is shown in Figure 3. The desired balance of pages between the different document categories was achieved by selective subsampling of pages with certain desired properties. For example, we made sure to include the title page of each document and bias the remaining page selection to those with figures or tables. The latter was achieved by leveraging pre-trained object detection models from PubLayNet, which helped us estimate how many figures and tables a given page contains.</paragraph>
<paragraph><location><page_4><loc_52><loc_12><loc_91><loc_36></location>Phase 2: Label selection and guideline. We reviewed the collected documents and identified the most common structural features they exhibit. This was achieved by identifying recurrent layout elements and lead us to the definition of 11 distinct class labels. These 11 class labels are Caption , Footnote , Formula , List-item , Pagefooter , Page-header , Picture , Section-header , Table , Text , and Title . Critical factors that were considered for the choice of these class labels were (1) the overall occurrence of the label, (2) the specificity of the label, (3) recognisability on a single page (i.e. no need for context from previous or next page) and (4) overall coverage of the page. Specificity ensures that the choice of label is not ambiguous, while coverage ensures that all meaningful items on a page can be annotated. We refrained from class labels that are very specific to a document category, such as Abstract in the Scientific Articles category. We also avoided class labels that are tightly linked to the semantics of the text. Labels such as Author and Affiliation , as seen in DocBank, are often only distinguishable by discriminating on</paragraph>
<paragraph><location><page_5><loc_9><loc_86><loc_48><loc_89></location>the textual content of an element, which goes beyond visual layout recognition, in particular outside the Scientific Articles category.</paragraph>
<paragraph><location><page_5><loc_9><loc_91><loc_57><loc_92></location>DocLayNet: A Large Human-Annotated Dataset for Document-Layout Analysis</paragraph>
<paragraph><location><page_5><loc_9><loc_87><loc_48><loc_89></location>the textual content of an element, which goes beyond visual layout recognition, in particular outside the Scientific Articles category.</paragraph>
<paragraph><location><page_5><loc_9><loc_69><loc_48><loc_86></location>At first sight, the task of visual document-layout interpretation appears intuitive enough to obtain plausible annotations in most cases. However, during early trial-runs in the core team, we observed many cases in which annotators use different annotation styles, especially for documents with challenging layouts. For example, if a figure is presented with subfigures, one annotator might draw a single figure bounding-box, while another might annotate each subfigure separately. The same applies for lists, where one might annotate all list items in one block or each list item separately. In essence, we observed that challenging layouts would be annotated in different but plausible ways. To illustrate this, we show in Figure 4 multiple examples of plausible but inconsistent annotations on the same pages.</paragraph>
<paragraph><location><page_5><loc_9><loc_57><loc_48><loc_68></location>Obviously, this inconsistency in annotations is not desirable for datasets which are intended to be used for model training. To minimise these inconsistencies, we created a detailed annotation guideline. While perfect consistency across 40 annotation staff members is clearly not possible to achieve, we saw a huge improvement in annotation consistency after the introduction of our annotation guideline. A few selected, non-trivial highlights of the guideline are:</paragraph>
<paragraph><location><page_5><loc_11><loc_51><loc_48><loc_56></location>(1) Every list-item is an individual object instance with class label List-item . This definition is different from PubLayNet and DocBank, where all list-items are grouped together into one List object.</paragraph>
<paragraph><location><page_5><loc_11><loc_45><loc_48><loc_51></location>(2) A List-item is a paragraph with hanging indentation. Singleline elements can qualify as List-item if the neighbour elements expose hanging indentation. Bullet or enumeration symbols are not a requirement.</paragraph>
<paragraph><location><page_5><loc_11><loc_42><loc_48><loc_45></location>(3) For every Caption , there must be exactly one corresponding Picture or Table .</paragraph>
<paragraph><location><page_5><loc_11><loc_40><loc_48><loc_42></location>(4) Connected sub-pictures are grouped together in one Picture object.</paragraph>
<paragraph><location><page_5><loc_10><loc_51><loc_48><loc_56></location>(1) Every list-item is an individual object instance with class label List-item . This definition is different from PubLayNet and DocBank, where all list-items are grouped together into one List object.</paragraph>
<paragraph><location><page_5><loc_10><loc_45><loc_48><loc_50></location>(2) A List-item is a paragraph with hanging indentation. Singleline elements can qualify as List-item if the neighbour elements expose hanging indentation. Bullet or enumeration symbols are not a requirement.</paragraph>
<paragraph><location><page_5><loc_10><loc_42><loc_48><loc_45></location>(3) For every Caption , there must be exactly one corresponding Picture or Table .</paragraph>
<paragraph><location><page_5><loc_10><loc_40><loc_48><loc_42></location>(4) Connected sub-pictures are grouped together in one Picture object.</paragraph>
<paragraph><location><page_5><loc_10><loc_38><loc_43><loc_39></location>(5) Formula numbers are included in a Formula object.</paragraph>
<paragraph><location><page_5><loc_10><loc_34><loc_48><loc_38></location>(6) Emphasised text (e.g. in italic or bold) at the beginning of a paragraph is not considered a Section-header , unless it appears exclusively on its own line.</paragraph>
<paragraph><location><page_5><loc_9><loc_27><loc_48><loc_33></location>The complete annotation guideline is over 100 pages long and a detailed description is obviously out of scope for this paper. Nevertheless, it will be made publicly available alongside with DocLayNet for future reference.</paragraph>
<paragraph><location><page_5><loc_9><loc_11><loc_48><loc_27></location>Phase 3: Training. After a first trial with a small group of people, we realised that providing the annotation guideline and a set of random practice pages did not yield the desired quality level for layout annotation. Therefore we prepared a subset of pages with two different complexity levels, each with a practice and an exam part. 974 pages were reference-annotated by one proficient core team member. Annotation staff were then given the task to annotate the same subsets (blinded from the reference). By comparing the annotations of each staff member with the reference annotations, we could quantify how closely their annotations matched the reference. Only after passing two exam levels with high annotation quality, staff were admitted into the production phase. Practice iterations</paragraph>
<caption><location><page_5><loc_52><loc_36><loc_91><loc_40></location>Figure 4: Examples of plausible annotation alternatives for the same page. Criteria in our annotation guideline can resolve cases A to C, while the case D remains ambiguous.</caption>
<figure>
<location><page_5><loc_52><loc_42><loc_91><loc_89></location>
<location><page_5><loc_51><loc_42><loc_91><loc_89></location>
<caption>Figure 4: Examples of plausible annotation alternatives for the same page. Criteria in our annotation guideline can resolve cases A to C, while the case D remains ambiguous.</caption>
</figure>
<paragraph><location><page_5><loc_52><loc_31><loc_91><loc_34></location>were carried out over a timeframe of 12 weeks, after which 8 of the 40 initially allocated annotators did not pass the bar.</paragraph>
@ -123,17 +123,18 @@
<paragraph><location><page_6><loc_9><loc_10><loc_48><loc_23></location>The primary goal of DocLayNet is to obtain high-quality ML models capable of accurate document-layout analysis on a wide variety of challenging layouts. As discussed in Section 2, object detection models are currently the easiest to use, due to the standardisation of ground-truth data in COCO format [16] and the availability of general frameworks such as detectron2 [17]. Furthermore, baseline numbers in PubLayNet and DocBank were obtained using standard object detection models such as Mask R-CNN and Faster R-CNN. As such, we will relate to these object detection methods in this</paragraph>
<caption><location><page_6><loc_52><loc_57><loc_91><loc_65></location>Figure 5: Prediction performance (mAP@0.5-0.95) of a Mask R-CNN network with ResNet50 backbone trained on increasing fractions of the DocLayNet dataset. The learning curve flattens around the 80% mark, indicating that increasing the size of the DocLayNet dataset with similar data will not yield significantly better predictions.</caption>
<figure>
<location><page_6><loc_53><loc_67><loc_90><loc_89></location>
<location><page_6><loc_53><loc_67><loc_91><loc_89></location>
<caption>Figure 5: Prediction performance (mAP@0.5-0.95) of a Mask R-CNN network with ResNet50 backbone trained on increasing fractions of the DocLayNet dataset. The learning curve flattens around the 80% mark, indicating that increasing the size of the DocLayNet dataset with similar data will not yield significantly better predictions.</caption>
</figure>
<paragraph><location><page_6><loc_52><loc_49><loc_91><loc_52></location>paper and leave the detailed evaluation of more recent methods mentioned in Section 2 for future work.</paragraph>
<paragraph><location><page_6><loc_52><loc_39><loc_91><loc_49></location>In this section, we will present several aspects related to the performance of object detection models on DocLayNet. Similarly as in PubLayNet, we will evaluate the quality of their predictions using mean average precision (mAP) with 10 overlaps that range from 0.5 to 0.95 in steps of 0.05 (mAP@0.5-0.95). These scores are computed by leveraging the evaluation code provided by the COCO API [16].</paragraph>
<subtitle-level-1><location><page_6><loc_52><loc_36><loc_76><loc_37></location>Baselines for Object Detection</subtitle-level-1>
<paragraph><location><page_6><loc_52><loc_11><loc_91><loc_35></location>In Table 2, we present baseline experiments (given in mAP) on Mask R-CNN [12], Faster R-CNN [11], and YOLOv5 [13]. Both training and evaluation were performed on RGB images with dimensions of 1025 × 1025 pixels. For training, we only used one annotation in case of redundantly annotated pages. As one can observe, the variation in mAP between the models is rather low, but overall between 6 and 10% lower than the mAP computed from the pairwise human annotations on triple-annotated pages. This gives a good indication that the DocLayNet dataset poses a worthwhile challenge for the research community to close the gap between human recognition and ML approaches. It is interesting to see that Mask R-CNN and Faster R-CNN produce very comparable mAP scores, indicating that pixel-based image segmentation derived from bounding-boxes does not help to obtain better predictions. On the other hand, the more recent Yolov5x model does very well and even out-performs humans on selected labels such as Text , Table and Picture . This is not entirely surprising, as Text , Table and Picture are abundant and the most visually distinctive in a document.</paragraph>
<caption><location><page_7><loc_9><loc_84><loc_48><loc_89></location>Table 3: Performance of a Mask R-CNN R50 network in mAP@0.5-0.95 scores trained on DocLayNet with different class label sets. The reduced label sets were obtained by either down-mapping or dropping labels.</caption>
<paragraph><location><page_7><loc_9><loc_84><loc_49><loc_89></location>Table 3: Performance of a Mask R-CNN R50 network in mAP@0.5-0.95 scores trained on DocLayNet with different class label sets. The reduced label sets were obtained by either down-mapping or dropping labels.</paragraph>
<paragraph><location><page_7><loc_52><loc_84><loc_92><loc_89></location>Table 4: Performance of a Mask R-CNN R50 network with document-wise and page-wise split for different label sets. Naive page-wise split will result in GLYPH<tildelow> 10% point improvement.</paragraph>
<table>
<location><page_7><loc_13><loc_63><loc_44><loc_81></location>
<caption>Table 3: Performance of a Mask R-CNN R50 network in mAP@0.5-0.95 scores trained on DocLayNet with different class label sets. The reduced label sets were obtained by either down-mapping or dropping labels.</caption>
<caption>Table 4: Performance of a Mask R-CNN R50 network with document-wise and page-wise split for different label sets. Naive page-wise split will result in GLYPH<tildelow> 10% point improvement.</caption>
<row_0><col_0><col_header>Class-count</col_0><col_1><col_header>11</col_1><col_2><col_header>6</col_2><col_3><col_header>5</col_3><col_4><col_header>4</col_4></row_0>
<row_1><col_0><row_header>Caption</col_0><col_1><body>68</col_1><col_2><body>Text</col_2><col_3><body>Text</col_3><col_4><body>Text</col_4></row_1>
<row_2><col_0><row_header>Footnote</col_0><col_1><body>71</col_1><col_2><body>Text</col_2><col_3><body>Text</col_3><col_4><body>Text</col_4></row_2>
@ -149,15 +150,13 @@
<row_12><col_0><row_header>Overall</col_0><col_1><body>72</col_1><col_2><body>73</col_2><col_3><body>78</col_3><col_4><body>77</col_4></row_12>
</table>
<subtitle-level-1><location><page_7><loc_9><loc_58><loc_21><loc_60></location>Learning Curve</subtitle-level-1>
<paragraph><location><page_7><loc_9><loc_33><loc_48><loc_58></location>One of the fundamental questions related to any dataset is if it is "large enough". To answer this question for DocLayNet, we performed a data ablation study in which we evaluated a Mask R-CNN model trained on increasing fractions of the DocLayNet dataset. As can be seen in Figure 5, the mAP score rises sharply in the beginning and eventually levels out. To estimate the error-bar on the metrics, we ran the training five times on the entire data-set. This resulted in a 1% error-bar, depicted by the shaded area in Figure 5. In the inset of Figure 5, we show the exact same data-points, but with a logarithmic scale on the x-axis. As is expected, the mAP score increases linearly as a function of the data-size in the inset. The curve ultimately flattens out between the 80% and 100% mark, with the 80% mark falling within the error-bars of the 100% mark. This provides a good indication that the model would not improve significantly by yet increasing the data size. Rather, it would probably benefit more from improved data consistency (as discussed in Section 3), data augmentation methods [23], or the addition of more document categories and styles.</paragraph>
<paragraph><location><page_7><loc_9><loc_33><loc_49><loc_58></location>One of the fundamental questions related to any dataset is if it is "large enough". To answer this question for DocLayNet, we performed a data ablation study in which we evaluated a Mask R-CNN model trained on increasing fractions of the DocLayNet dataset. As can be seen in Figure 5, the mAP score rises sharply in the beginning and eventually levels out. To estimate the error-bar on the metrics, we ran the training five times on the entire data-set. This resulted in a 1% error-bar, depicted by the shaded area in Figure 5. In the inset of Figure 5, we show the exact same data-points, but with a logarithmic scale on the x-axis. As is expected, the mAP score increases linearly as a function of the data-size in the inset. The curve ultimately flattens out between the 80% and 100% mark, with the 80% mark falling within the error-bars of the 100% mark. This provides a good indication that the model would not improve significantly by yet increasing the data size. Rather, it would probably benefit more from improved data consistency (as discussed in Section 3), data augmentation methods [23], or the addition of more document categories and styles.</paragraph>
<subtitle-level-1><location><page_7><loc_9><loc_30><loc_27><loc_32></location>Impact of Class Labels</subtitle-level-1>
<paragraph><location><page_7><loc_9><loc_11><loc_48><loc_30></location>The choice and number of labels can have a significant effect on the overall model performance. Since PubLayNet, DocBank and DocLayNet all have different label sets, it is of particular interest to understand and quantify this influence of the label set on the model performance. We investigate this by either down-mapping labels into more common ones (e.g. Caption → Text ) or excluding them from the annotations entirely. Furthermore, it must be stressed that all mappings and exclusions were performed on the data before model training. In Table 3, we present the mAP scores for a Mask R-CNN R50 network on different label sets. Where a label is down-mapped, we show its corresponding label, otherwise it was excluded. We present three different label sets, with 6, 5 and 4 different labels respectively. The set of 5 labels contains the same labels as PubLayNet. However, due to the different definition of</paragraph>
<caption><location><page_7><loc_52><loc_84><loc_91><loc_89></location>Table 4: Performance of a Mask R-CNN R50 network with document-wise and page-wise split for different label sets. Naive page-wise split will result in GLYPH<tildelow> 10% point improvement.</caption>
<paragraph><location><page_7><loc_9><loc_11><loc_49><loc_30></location>The choice and number of labels can have a significant effect on the overall model performance. Since PubLayNet, DocBank and DocLayNet all have different label sets, it is of particular interest to understand and quantify this influence of the label set on the model performance. We investigate this by either down-mapping labels into more common ones (e.g. Caption → Text ) or excluding them from the annotations entirely. Furthermore, it must be stressed that all mappings and exclusions were performed on the data before model training. In Table 3, we present the mAP scores for a Mask R-CNN R50 network on different label sets. Where a label is down-mapped, we show its corresponding label, otherwise it was excluded. We present three different label sets, with 6, 5 and 4 different labels respectively. The set of 5 labels contains the same labels as PubLayNet. However, due to the different definition of</paragraph>
<table>
<location><page_7><loc_58><loc_61><loc_86><loc_81></location>
<caption>Table 4: Performance of a Mask R-CNN R50 network with document-wise and page-wise split for different label sets. Naive page-wise split will result in GLYPH<tildelow> 10% point improvement.</caption>
<row_0><col_0><body>Class-count</col_0><col_1><col_header>11</col_1><col_2><col_header>11</col_2><col_3><col_header>5</col_3><col_4><col_header>5</col_4></row_0>
<row_1><col_0><body>Split</col_0><col_1><col_header>Doc</col_1><col_2><col_header>Page</col_2><col_3><col_header>Doc</col_3><col_4><col_header>Page</col_4></row_1>
<location><page_7><loc_58><loc_61><loc_85><loc_81></location>
<row_0><col_0><col_header>Class-count</col_0><col_1><col_header>11</col_1><col_2><col_header>11</col_2><col_3><col_header>5</col_3><col_4><col_header>5</col_4></row_0>
<row_1><col_0><col_header>Split</col_0><col_1><col_header>Doc</col_1><col_2><col_header>Page</col_2><col_3><col_header>Doc</col_3><col_4><col_header>Page</col_4></row_1>
<row_2><col_0><row_header>Caption</col_0><col_1><body>68</col_1><col_2><body>83</col_2><col_3><body></col_3><col_4><body></col_4></row_2>
<row_3><col_0><row_header>Footnote</col_0><col_1><body>71</col_1><col_2><body>84</col_2><col_3><body></col_3><col_4><body></col_4></row_3>
<row_4><col_0><row_header>Formula</col_0><col_1><body>60</col_1><col_2><body>66</col_2><col_3><body></col_3><col_4><body></col_4></row_4>
@ -171,30 +170,31 @@
<row_12><col_0><row_header>Title</col_0><col_1><body>77</col_1><col_2><body>81</col_2><col_3><body></col_3><col_4><body></col_4></row_12>
<row_13><col_0><row_header>All</col_0><col_1><body>72</col_1><col_2><body>84</col_2><col_3><body>78</col_3><col_4><body>87</col_4></row_13>
</table>
<paragraph><location><page_7><loc_52><loc_47><loc_91><loc_58></location>lists in PubLayNet (grouped list-items) versus DocLayNet (separate list-items), the label set of size 4 is the closest to PubLayNet, in the assumption that the List is down-mapped to Text in PubLayNet. The results in Table 3 show that the prediction accuracy on the remaining class labels does not change significantly when other classes are merged into them. The overall macro-average improves by around 5%, in particular when Page-footer and Page-header are excluded.</paragraph>
<paragraph><location><page_7><loc_52><loc_47><loc_92><loc_58></location>lists in PubLayNet (grouped list-items) versus DocLayNet (separate list-items), the label set of size 4 is the closest to PubLayNet, in the assumption that the List is down-mapped to Text in PubLayNet. The results in Table 3 show that the prediction accuracy on the remaining class labels does not change significantly when other classes are merged into them. The overall macro-average improves by around 5%, in particular when Page-footer and Page-header are excluded.</paragraph>
<subtitle-level-1><location><page_7><loc_52><loc_44><loc_90><loc_46></location>Impact of Document Split in Train and Test Set</subtitle-level-1>
<paragraph><location><page_7><loc_52><loc_25><loc_91><loc_44></location>Many documents in DocLayNet have a unique styling. In order to avoid overfitting on a particular style, we have split the train-, test- and validation-sets of DocLayNet on document boundaries, i.e. every document contributes pages to only one set. To the best of our knowledge, this was not considered in PubLayNet or DocBank. To quantify how this affects model performance, we trained and evaluated a Mask R-CNN R50 model on a modified dataset version. Here, the train-, test- and validation-sets were obtained by a randomised draw over the individual pages. As can be seen in Table 4, the difference in model performance is surprisingly large: pagewise splitting gains ˜ 10% in mAP over the document-wise splitting. Thus, random page-wise splitting of DocLayNet can easily lead to accidental overestimation of model performance and should be avoided.</paragraph>
<paragraph><location><page_7><loc_52><loc_25><loc_92><loc_44></location>Many documents in DocLayNet have a unique styling. In order to avoid overfitting on a particular style, we have split the train-, test- and validation-sets of DocLayNet on document boundaries, i.e. every document contributes pages to only one set. To the best of our knowledge, this was not considered in PubLayNet or DocBank. To quantify how this affects model performance, we trained and evaluated a Mask R-CNN R50 model on a modified dataset version. Here, the train-, test- and validation-sets were obtained by a randomised draw over the individual pages. As can be seen in Table 4, the difference in model performance is surprisingly large: pagewise splitting gains ˜ 10% in mAP over the document-wise splitting. Thus, random page-wise splitting of DocLayNet can easily lead to accidental overestimation of model performance and should be avoided.</paragraph>
<subtitle-level-1><location><page_7><loc_52><loc_22><loc_68><loc_23></location>Dataset Comparison</subtitle-level-1>
<paragraph><location><page_7><loc_52><loc_11><loc_91><loc_21></location>Throughout this paper, we claim that DocLayNet's wider variety of document layouts leads to more robust layout detection models. In Table 5, we provide evidence for that. We trained models on each of the available datasets (PubLayNet, DocBank and DocLayNet) and evaluated them on the test sets of the other datasets. Due to the different label sets and annotation styles, a direct comparison is not possible. Hence, we focussed on the common labels among the datasets. Between PubLayNet and DocLayNet, these are Picture ,</paragraph>
<paragraph><location><page_7><loc_52><loc_11><loc_92><loc_21></location>Throughout this paper, we claim that DocLayNet's wider variety of document layouts leads to more robust layout detection models. In Table 5, we provide evidence for that. We trained models on each of the available datasets (PubLayNet, DocBank and DocLayNet) and evaluated them on the test sets of the other datasets. Due to the different label sets and annotation styles, a direct comparison is not possible. Hence, we focussed on the common labels among the datasets. Between PubLayNet and DocLayNet, these are Picture ,</paragraph>
<paragraph><location><page_8><loc_9><loc_91><loc_91><loc_92></location>KDD '22, August 14-18, 2022, Washington, DC, USA Birgit Pfitzmann, Christoph Auer, Michele Dolfi, Ahmed S. Nassar, and Peter Staar</paragraph>
<paragraph><location><page_8><loc_9><loc_81><loc_48><loc_89></location>Table 5: Prediction Performance (mAP@0.5-0.95) of a Mask R-CNN R50 network across the PubLayNet, DocBank & DocLayNet data-sets. By evaluating on common label classes of each dataset, we observe that the DocLayNet-trained model has much less pronounced variations in performance across all datasets.</paragraph>
<table>
<location><page_8><loc_12><loc_57><loc_45><loc_78></location>
<caption>Table 5: Prediction Performance (mAP@0.5-0.95) of a Mask R-CNN R50 network across the PubLayNet, DocBank & DocLayNet data-sets. By evaluating on common label classes of each dataset, we observe that the DocLayNet-trained model has much less pronounced variations in performance across all datasets.</caption>
<row_0><col_0><body></col_0><col_1><col_header>Testing on</col_1><col_2><col_header>Testing on</col_2><col_3><col_header>Testing on</col_3></row_0>
<row_1><col_0><col_header>labels</col_0><col_1><col_header>PLN</col_1><col_2><col_header>DB</col_2><col_3><col_header>DLN</col_3></row_1>
<row_2><col_0><body>Figure</col_0><col_1><body>96</col_1><col_2><body>43</col_2><col_3><body>23</col_3></row_2>
<row_3><col_0><body>Sec-header</col_0><col_1><body>87</col_1><col_2><body>-</col_2><col_3><body>32</col_3></row_3>
<row_4><col_0><body>Table</col_0><col_1><body>95</col_1><col_2><body>24</col_2><col_3><body>49</col_3></row_4>
<row_5><col_0><body>Text</col_0><col_1><body>96</col_1><col_2><body>-</col_2><col_3><body>42</col_3></row_5>
<row_6><col_0><body>total</col_0><col_1><body>93</col_1><col_2><body>34</col_2><col_3><body>30</col_3></row_6>
<row_7><col_0><body>Figure</col_0><col_1><body>77</col_1><col_2><body>71</col_2><col_3><body>31</col_3></row_7>
<row_8><col_0><body>Table</col_0><col_1><body>19</col_1><col_2><body>65</col_2><col_3><body>22</col_3></row_8>
<row_9><col_0><body>total</col_0><col_1><body>48</col_1><col_2><body>68</col_2><col_3><body>27</col_3></row_9>
<row_10><col_0><body>Figure</col_0><col_1><body>67</col_1><col_2><body>51</col_2><col_3><body>72</col_3></row_10>
<row_11><col_0><body>Sec-header</col_0><col_1><body>53</col_1><col_2><body>-</col_2><col_3><body>68</col_3></row_11>
<row_12><col_0><body>Table</col_0><col_1><body>87</col_1><col_2><body>43</col_2><col_3><body>82</col_3></row_12>
<row_13><col_0><body>Text</col_0><col_1><body>77</col_1><col_2><body>-</col_2><col_3><body>84</col_3></row_13>
<row_14><col_0><body>total</col_0><col_1><body>59</col_1><col_2><body>47</col_2><col_3><body>78</col_3></row_14>
<row_0><col_0><body></col_0><col_1><body></col_1><col_2><col_header>Testing on</col_2><col_3><col_header>Testing on</col_3><col_4><col_header>Testing on</col_4></row_0>
<row_1><col_0><col_header>Training on</col_0><col_1><col_header>labels</col_1><col_2><col_header>PLN</col_2><col_3><col_header>DB</col_3><col_4><col_header>DLN</col_4></row_1>
<row_2><col_0><row_header>PubLayNet (PLN)</col_0><col_1><row_header>Figure</col_1><col_2><body>96</col_2><col_3><body>43</col_3><col_4><body>23</col_4></row_2>
<row_3><col_0><row_header>PubLayNet (PLN)</col_0><col_1><row_header>Sec-header</col_1><col_2><body>87</col_2><col_3><body>-</col_3><col_4><body>32</col_4></row_3>
<row_4><col_0><row_header>PubLayNet (PLN)</col_0><col_1><row_header>Table</col_1><col_2><body>95</col_2><col_3><body>24</col_3><col_4><body>49</col_4></row_4>
<row_5><col_0><row_header>PubLayNet (PLN)</col_0><col_1><row_header>Text</col_1><col_2><body>96</col_2><col_3><body>-</col_3><col_4><body>42</col_4></row_5>
<row_6><col_0><row_header>PubLayNet (PLN)</col_0><col_1><row_header>total</col_1><col_2><body>93</col_2><col_3><body>34</col_3><col_4><body>30</col_4></row_6>
<row_7><col_0><row_header>DocBank (DB)</col_0><col_1><row_header>Figure</col_1><col_2><body>77</col_2><col_3><body>71</col_3><col_4><body>31</col_4></row_7>
<row_8><col_0><row_header>DocBank (DB)</col_0><col_1><row_header>Table</col_1><col_2><body>19</col_2><col_3><body>65</col_3><col_4><body>22</col_4></row_8>
<row_9><col_0><row_header>DocBank (DB)</col_0><col_1><row_header>total</col_1><col_2><body>48</col_2><col_3><body>68</col_3><col_4><body>27</col_4></row_9>
<row_10><col_0><row_header>DocLayNet (DLN)</col_0><col_1><row_header>Figure</col_1><col_2><body>67</col_2><col_3><body>51</col_3><col_4><body>72</col_4></row_10>
<row_11><col_0><row_header>DocLayNet (DLN)</col_0><col_1><row_header>Sec-header</col_1><col_2><body>53</col_2><col_3><body>-</col_3><col_4><body>68</col_4></row_11>
<row_12><col_0><row_header>DocLayNet (DLN)</col_0><col_1><row_header>Table</col_1><col_2><body>87</col_2><col_3><body>43</col_3><col_4><body>82</col_4></row_12>
<row_13><col_0><row_header>DocLayNet (DLN)</col_0><col_1><row_header>Text</col_1><col_2><body>77</col_2><col_3><body>-</col_3><col_4><body>84</col_4></row_13>
<row_14><col_0><row_header>DocLayNet (DLN)</col_0><col_1><row_header>total</col_1><col_2><body>59</col_2><col_3><body>47</col_3><col_4><body>78</col_4></row_14>
</table>
<paragraph><location><page_8><loc_9><loc_44><loc_48><loc_51></location>Section-header , Table and Text . Before training, we either mapped or excluded DocLayNet's other labels as specified in table 3, and also PubLayNet's List to Text . Note that the different clustering of lists (by list-element vs. whole list objects) naturally decreases the mAP score for Text .</paragraph>
<paragraph><location><page_8><loc_9><loc_26><loc_48><loc_44></location>For comparison of DocBank with DocLayNet, we trained only on Picture and Table clusters of each dataset. We had to exclude Text because successive paragraphs are often grouped together into a single object in DocBank. This paragraph grouping is incompatible with the individual paragraphs of DocLayNet. As can be seen in Table 5, DocLayNet trained models yield better performance compared to the previous datasets. It is noteworthy that the models trained on PubLayNet and DocBank perform very well on their own test set, but have a much lower performance on the foreign datasets. While this also applies to DocLayNet, the difference is far less pronounced. Thus we conclude that DocLayNet trained models are overall more robust and will produce better results for challenging, unseen layouts.</paragraph>
@ -218,19 +218,22 @@
<paragraph><location><page_8><loc_52><loc_18><loc_91><loc_21></location>[11] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. IEEE Transactions on Pattern Analysis and Machine Intelligence , 39(6):1137-1149, 2017.</paragraph>
<paragraph><location><page_8><loc_52><loc_15><loc_91><loc_18></location>[12] Kaiming He, Georgia Gkioxari, Piotr Dollár, and Ross B. Girshick. Mask R-CNN. In IEEE International Conference on Computer Vision , ICCV, pages 2980-2988. IEEE Computer Society, Oct 2017.</paragraph>
<paragraph><location><page_8><loc_52><loc_11><loc_91><loc_15></location>[13] Glenn Jocher, Alex Stoken, Ayush Chaurasia, Jirka Borovec, NanoCode012, TaoXie, Yonghye Kwon, Kalen Michael, Liu Changyu, Jiacong Fang, Abhiram V, Laughing, tkianai, yxNONG, Piotr Skalski, Adam Hogan, Jebastin Nadar, imyhxy, Lorenzo Mammana, Alex Wang, Cristi Fati, Diego Montes, Jan Hajek, Laurentiu</paragraph>
<caption><location><page_9><loc_9><loc_36><loc_91><loc_41></location>Figure 6: Example layout predictions on selected pages from the DocLayNet test-set. (A, D) exhibit favourable results on coloured backgrounds. (B, C) show accurate list-item and paragraph differentiation despite densely-spaced lines. (E) demonstrates good table and figure distinction. (F) shows predictions on a Chinese patent with multiple overlaps, label confusion and missing boxes.</caption>
<paragraph><location><page_9><loc_9><loc_91><loc_57><loc_92></location>DocLayNet: A Large Human-Annotated Dataset for Document-Layout Analysis</paragraph>
<paragraph><location><page_9><loc_60><loc_91><loc_91><loc_92></location>KDD '22, August 14-18, 2022, Washington, DC, USA</paragraph>
<caption><location><page_9><loc_9><loc_43><loc_52><loc_44></location>Text Caption List-Item Formula Table Section-Header Picture Page-Header Page-Footer Title</caption>
<figure>
<location><page_9><loc_9><loc_43><loc_91><loc_89></location>
<caption>Figure 6: Example layout predictions on selected pages from the DocLayNet test-set. (A, D) exhibit favourable results on coloured backgrounds. (B, C) show accurate list-item and paragraph differentiation despite densely-spaced lines. (E) demonstrates good table and figure distinction. (F) shows predictions on a Chinese patent with multiple overlaps, label confusion and missing boxes.</caption>
<location><page_9><loc_9><loc_44><loc_91><loc_89></location>
<caption>Text Caption List-Item Formula Table Section-Header Picture Page-Header Page-Footer Title</caption>
</figure>
<paragraph><location><page_9><loc_11><loc_31><loc_48><loc_34></location>Diaconu, Mai Thanh Minh, Marc, albinxavi, fatih, oleg, and wanghao yang. ultralytics/yolov5: v6.0 - yolov5n nano models, roboflow integration, tensorflow export, opencv dnn support, October 2021.</paragraph>
<paragraph><location><page_9><loc_9><loc_28><loc_48><loc_31></location>[14] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. CoRR , abs/2005.12872, 2020.</paragraph>
<paragraph><location><page_9><loc_9><loc_36><loc_91><loc_41></location>Figure 6: Example layout predictions on selected pages from the DocLayNet test-set. (A, D) exhibit favourable results on coloured backgrounds. (B, C) show accurate list-item and paragraph differentiation despite densely-spaced lines. (E) demonstrates good table and figure distinction. (F) shows predictions on a Chinese patent with multiple overlaps, label confusion and missing boxes.</paragraph>
<paragraph><location><page_9><loc_11><loc_31><loc_48><loc_33></location>Diaconu, Mai Thanh Minh, Marc, albinxavi, fatih, oleg, and wanghao yang. ultralytics/yolov5: v6.0 - yolov5n nano models, roboflow integration, tensorflow export, opencv dnn support, October 2021.</paragraph>
<paragraph><location><page_9><loc_9><loc_28><loc_48><loc_30></location>[14] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. CoRR , abs/2005.12872, 2020.</paragraph>
<paragraph><location><page_9><loc_9><loc_26><loc_48><loc_27></location>[15] Mingxing Tan, Ruoming Pang, and Quoc V. Le. Efficientdet: Scalable and efficient object detection. CoRR , abs/1911.09070, 2019.</paragraph>
<paragraph><location><page_9><loc_9><loc_23><loc_48><loc_25></location>[16] Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona, Deva Ramanan, Piotr Dollár, and C. Lawrence Zitnick. Microsoft COCO: common objects in context, 2014.</paragraph>
<paragraph><location><page_9><loc_9><loc_21><loc_48><loc_23></location>[17] Yuxin Wu, Alexander Kirillov, Francisco Massa, Wan-Yen Lo, and Ross Girshick. Detectron2, 2019.</paragraph>
<paragraph><location><page_9><loc_9><loc_21><loc_48><loc_22></location>[17] Yuxin Wu, Alexander Kirillov, Francisco Massa, Wan-Yen Lo, and Ross Girshick. Detectron2, 2019.</paragraph>
<paragraph><location><page_9><loc_9><loc_16><loc_48><loc_20></location>[18] Nikolaos Livathinos, Cesar Berrospi, Maksym Lysak, Viktor Kuropiatnyk, Ahmed Nassar, Andre Carvalho, Michele Dolfi, Christoph Auer, Kasper Dinkla, and Peter W. J. Staar. Robust pdf document conversion using recurrent neural networks. In Proceedings of the 35th Conference on Artificial Intelligence , AAAI, pages 1513715145, feb 2021.</paragraph>
<paragraph><location><page_9><loc_9><loc_10><loc_48><loc_15></location>[19] Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, and Ming Zhou. Layoutlm: Pre-training of text and layout for document image understanding. In Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining , KDD, pages 1192-1200, New York, USA, 2020. Association for Computing Machinery.</paragraph>
<paragraph><location><page_9><loc_52><loc_32><loc_91><loc_34></location>[20] Shoubin Li, Xuyan Ma, Shuaiqun Pan, Jun Hu, Lin Shi, and Qing Wang. Vtlayout: Fusion of visual and text features for document layout analysis, 2021.</paragraph>
<paragraph><location><page_9><loc_52><loc_32><loc_91><loc_33></location>[20] Shoubin Li, Xuyan Ma, Shuaiqun Pan, Jun Hu, Lin Shi, and Qing Wang. Vtlayout: Fusion of visual and text features for document layout analysis, 2021.</paragraph>
<paragraph><location><page_9><loc_52><loc_29><loc_91><loc_31></location>[21] Peng Zhang, Can Li, Liang Qiao, Zhanzhan Cheng, Shiliang Pu, Yi Niu, and Fei Wu. Vsr: A unified framework for document layout analysis combining vision, semantics and relations, 2021.</paragraph>
<paragraph><location><page_9><loc_52><loc_25><loc_91><loc_28></location>[22] Peter W J Staar, Michele Dolfi, Christoph Auer, and Costas Bekas. Corpus conversion service: A machine learning platform to ingest documents at scale. In Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining , KDD, pages 774-782. ACM, 2018.</paragraph>
<paragraph><location><page_9><loc_52><loc_23><loc_91><loc_24></location>[23] Connor Shorten and Taghi M. Khoshgoftaar. A survey on image data augmentation for deep learning. Journal of Big Data , 6(1):60, 2019.</paragraph>

File diff suppressed because one or more lines are too long

View File

@ -18,8 +18,6 @@ Accurate document layout analysis is a key requirement for highquality PDF docum
· Information systems → Document structure ; · Applied computing → Document analysis ; · Computing methodologies → Machine learning ; Computer vision ; Object detection ;
Permission to make digital or hard copies of part or all of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for third-party components of this work must be honored. For all other uses, contact the owner/author(s).
KDD '22, August 14-18, 2022, Washington, DC, USA © 2022 Copyright held by the owner/author(s). ACM ISBN 978-1-4503-9385-0/22/08. https://doi.org/10.1145/3534678.3539043
Figure 1: Four examples of complex page layouts across different document categories
@ -31,8 +29,6 @@ PDF document conversion, layout segmentation, object-detection, data set, Machin
## ACM Reference Format:
Birgit Pfitzmann, Christoph Auer, Michele Dolfi, Ahmed S. Nassar, and Peter Staar. 2022. DocLayNet: A Large Human-Annotated Dataset for DocumentLayout Analysis. In Proceedings of the 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining (KDD '22), August 14-18, 2022, Washington, DC, USA. ACM, New York, NY, USA, 9 pages. https://doi.org/10.1145/ 3534678.3539043
## 1 INTRODUCTION
Despite the substantial improvements achieved with machine-learning (ML) approaches and deep neural networks in recent years, document conversion remains a challenging problem, as demonstrated by the numerous public competitions held on this topic [1-4]. The challenge originates from the huge variability in PDF documents regarding layout, language and formats (scanned, programmatic or a combination of both). Engineering a single ML model that can be applied on all types of documents and provides high-quality layout segmentation remains to this day extremely challenging [5]. To highlight the variability in document layouts, we show a few example documents from the DocLayNet dataset in Figure 1.
@ -90,22 +86,26 @@ Despite being cost-intense and far less scalable than automation, human annotati
The annotation campaign was carried out in four phases. In phase one, we identified and prepared the data sources for annotation. In phase two, we determined the class labels and how annotations should be done on the documents in order to obtain maximum consistency. The latter was guided by a detailed requirement analysis and exhaustive experiments. In phase three, we trained the annotation staff and performed exams for quality assurance. In phase four,
KDD '22, August 14-18, 2022, Washington, DC, USA Birgit Pfitzmann, Christoph Auer, Michele Dolfi, Ahmed S. Nassar, and Peter Staar
Table 1: DocLayNet dataset overview. Along with the frequency of each class label, we present the relative occurrence (as % of row "Total") in the train, test and validation sets. The inter-annotator agreement is computed as the mAP@0.5-0.95 metric between pairwise annotations from the triple-annotated pages, from which we obtain accuracy ranges.
| | | % of Total | % of Total | % of Total | triple inter-annotator mAP @ 0.5-0.95 (%) | triple inter-annotator mAP @ 0.5-0.95 (%) | triple inter-annotator mAP @ 0.5-0.95 (%) | triple inter-annotator mAP @ 0.5-0.95 (%) | triple inter-annotator mAP @ 0.5-0.95 (%) | triple inter-annotator mAP @ 0.5-0.95 (%) | triple inter-annotator mAP @ 0.5-0.95 (%) |
|----------------|---------|--------------|--------------|--------------|---------------------------------------------|---------------------------------------------|---------------------------------------------|---------------------------------------------|---------------------------------------------|---------------------------------------------|---------------------------------------------|
| class label | Count | Train | Test | Val | All | Fin | Man | Sci | Law | Pat | Ten |
| Caption | 22524 | 2.04 | 1.77 | 2.32 | 84-89 | 40-61 | 86-92 | 94-99 | 95-99 | 69-78 | n/a |
| Footnote | 6318 | 0.60 | 0.31 | 0.58 | 83-91 | n/a | 100 | 62-88 | 85-94 | n/a | 82-97 |
| Formula | 25027 | 2.25 | 1.90 | 2.96 | 83-85 | n/a | n/a | 84-87 | 86-96 | n/a | n/a |
| List-item | 185660 | 17.19 | 13.34 | 15.82 | 87-88 | 74-83 | 90-92 | 97-97 | 81-85 | 75-88 | 93-95 |
| Page-footer | 70878 | 6.51 | 5.58 | 6.00 | 93-94 | 88-90 | 95-96 | 100 | 92-97 | 100 | 96-98 |
| Page-header | 58022 | 5.10 | 6.70 | 5.06 | 85-89 | 66-76 | 90-94 | 98-100 | 91-92 | 97-99 | 81-86 |
| Picture | 45976 | 4.21 | 2.78 | 5.31 | 69-71 | 56-59 | 82-86 | 69-82 | 80-95 | 66-71 | 59-76 |
| Section-header | 142884 | 12.60 | 15.77 | 12.85 | 83-84 | 76-81 | 90-92 | 94-95 | 87-94 | 69-73 | 78-86 |
| Table | 34733 | 3.20 | 2.27 | 3.60 | 77-81 | 75-80 | 83-86 | 98-99 | 58-80 | 79-84 | 70-85 |
| Text | 510377 | 45.82 | 49.28 | 45.00 | 84-86 | 81-86 | 88-93 | 89-93 | 87-92 | 71-79 | 87-95 |
| Title | 5071 | 0.47 | 0.30 | 0.50 | 60-72 | 24-63 | 50-63 | 94-100 | 82-96 | 68-79 | 24-56 |
| Total | 1107470 | 941123 | 99816 | 66531 | 82-83 | 71-74 | 79-81 | 89-94 | 86-91 | 71-76 | 68-85 |
| | | % of Total | % of Total | % of Total | % of Total | triple inter-annotator mAP @ 0.5-0.95 (%) | triple inter-annotator mAP @ 0.5-0.95 (%) | triple inter-annotator mAP @ 0.5-0.95 (%) | triple inter-annotator mAP @ 0.5-0.95 (%) | triple inter-annotator mAP @ 0.5-0.95 (%) | triple inter-annotator mAP @ 0.5-0.95 (%) |
|----------------|---------|--------------|--------------|--------------|--------------|---------------------------------------------|---------------------------------------------|---------------------------------------------|---------------------------------------------|---------------------------------------------|---------------------------------------------|
| class label | Count | Train | Test | Val | All | Fin | Man | Sci | Law | Pat | Ten |
| Caption | 22524 | 2.04 | 1.77 | 2.32 | 84-89 | 40-61 | 86-92 | 94-99 | 95-99 | 69-78 | n/a |
| Footnote | 6318 | 0.60 | 0.31 | 0.58 | 83-91 | n/a | 100 | 62-88 | 85-94 | n/a | 82-97 |
| Formula | 25027 | 2.25 | 1.90 | 2.96 | 83-85 | n/a | n/a | 84-87 | 86-96 | n/a | n/a |
| List-item | 185660 | 17.19 | 13.34 | 15.82 | 87-88 | 74-83 | 90-92 | 97-97 | 81-85 | 75-88 | 93-95 |
| Page-footer | 70878 | 6.51 | 5.58 | 6.00 | 93-94 | 88-90 | 95-96 | 100 | 92-97 | 100 | 96-98 |
| Page-header | 58022 | 5.10 | 6.70 | 5.06 | 85-89 | 66-76 | 90-94 | 98-100 | 91-92 | 97-99 | 81-86 |
| Picture | 45976 | 4.21 | 2.78 | 5.31 | 69-71 | 56-59 | 82-86 | 69-82 | 80-95 | 66-71 | 59-76 |
| Section-header | 142884 | 12.60 | 15.77 | 12.85 | 83-84 | 76-81 | 90-92 | 94-95 | 87-94 | 69-73 | 78-86 |
| Table | 34733 | 3.20 | 2.27 | 3.60 | 77-81 | 75-80 | 83-86 | 98-99 | 58-80 | 79-84 | 70-85 |
| Text | 510377 | 45.82 | 49.28 | 45.00 | 84-86 | 81-86 | 88-93 | 89-93 | 87-92 | 71-79 | 87-95 |
| Title | 5071 | 0.47 | 0.30 | 0.50 | 60-72 | 24-63 | 50-63 | 94-100 | 82-96 | 68-79 | 24-56 |
| Total | 1107470 | 941123 | 99816 | 66531 | 82-83 | 71-74 | 79-81 | 89-94 | 86-91 | 71-76 | 68-85 |
Figure 3: Corpus Conversion Service annotation user interface. The PDF page is shown in the background, with overlaid text-cells (in darker shades). The annotation boxes can be drawn by dragging a rectangle over each segment with the respective label from the palette on the right.
Figure 3: Corpus Conversion Service annotation user interface. The PDF page is shown in the background, with overlaid text-cells (in darker shades). The annotation boxes can be drawn by dragging a rectangle over each segment with the respective label from the palette on the right.
<!-- image -->
@ -120,6 +120,8 @@ Preparation work included uploading and parsing the sourced PDF documents in the
Phase 2: Label selection and guideline. We reviewed the collected documents and identified the most common structural features they exhibit. This was achieved by identifying recurrent layout elements and lead us to the definition of 11 distinct class labels. These 11 class labels are Caption , Footnote , Formula , List-item , Pagefooter , Page-header , Picture , Section-header , Table , Text , and Title . Critical factors that were considered for the choice of these class labels were (1) the overall occurrence of the label, (2) the specificity of the label, (3) recognisability on a single page (i.e. no need for context from previous or next page) and (4) overall coverage of the page. Specificity ensures that the choice of label is not ambiguous, while coverage ensures that all meaningful items on a page can be annotated. We refrained from class labels that are very specific to a document category, such as Abstract in the Scientific Articles category. We also avoided class labels that are tightly linked to the semantics of the text. Labels such as Author and Affiliation , as seen in DocBank, are often only distinguishable by discriminating on
DocLayNet: A Large Human-Annotated Dataset for Document-Layout Analysis
the textual content of an element, which goes beyond visual layout recognition, in particular outside the Scientific Articles category.
At first sight, the task of visual document-layout interpretation appears intuitive enough to obtain plausible annotations in most cases. However, during early trial-runs in the core team, we observed many cases in which annotators use different annotation styles, especially for documents with challenging layouts. For example, if a figure is presented with subfigures, one annotator might draw a single figure bounding-box, while another might annotate each subfigure separately. The same applies for lists, where one might annotate all list items in one block or each list item separately. In essence, we observed that challenging layouts would be annotated in different but plausible ways. To illustrate this, we show in Figure 4 multiple examples of plausible but inconsistent annotations on the same pages.
@ -186,6 +188,10 @@ In this section, we will present several aspects related to the performance of o
In Table 2, we present baseline experiments (given in mAP) on Mask R-CNN [12], Faster R-CNN [11], and YOLOv5 [13]. Both training and evaluation were performed on RGB images with dimensions of 1025 × 1025 pixels. For training, we only used one annotation in case of redundantly annotated pages. As one can observe, the variation in mAP between the models is rather low, but overall between 6 and 10% lower than the mAP computed from the pairwise human annotations on triple-annotated pages. This gives a good indication that the DocLayNet dataset poses a worthwhile challenge for the research community to close the gap between human recognition and ML approaches. It is interesting to see that Mask R-CNN and Faster R-CNN produce very comparable mAP scores, indicating that pixel-based image segmentation derived from bounding-boxes does not help to obtain better predictions. On the other hand, the more recent Yolov5x model does very well and even out-performs humans on selected labels such as Text , Table and Picture . This is not entirely surprising, as Text , Table and Picture are abundant and the most visually distinctive in a document.
Table 3: Performance of a Mask R-CNN R50 network in mAP@0.5-0.95 scores trained on DocLayNet with different class label sets. The reduced label sets were obtained by either down-mapping or dropping labels.
Table 4: Performance of a Mask R-CNN R50 network with document-wise and page-wise split for different label sets. Naive page-wise split will result in GLYPH<tildelow> 10% point improvement.
Table 4: Performance of a Mask R-CNN R50 network with document-wise and page-wise split for different label sets. Naive page-wise split will result in GLYPH<tildelow> 10% point improvement.
| Class-count | 11 | 6 | 5 | 4 |
|----------------|------|---------|---------|---------|
| Caption | 68 | Text | Text | Text |
@ -209,7 +215,7 @@ One of the fundamental questions related to any dataset is if it is "large enoug
The choice and number of labels can have a significant effect on the overall model performance. Since PubLayNet, DocBank and DocLayNet all have different label sets, it is of particular interest to understand and quantify this influence of the label set on the model performance. We investigate this by either down-mapping labels into more common ones (e.g. Caption → Text ) or excluding them from the annotations entirely. Furthermore, it must be stressed that all mappings and exclusions were performed on the data before model training. In Table 3, we present the mAP scores for a Mask R-CNN R50 network on different label sets. Where a label is down-mapped, we show its corresponding label, otherwise it was excluded. We present three different label sets, with 6, 5 and 4 different labels respectively. The set of 5 labels contains the same labels as PubLayNet. However, due to the different definition of
Table 4: Performance of a Mask R-CNN R50 network with document-wise and page-wise split for different label sets. Naive page-wise split will result in GLYPH<tildelow> 10% point improvement.
| Class-count | 11 | 11 | 5 | 5 |
|----------------|------|------|-----|------|
| Split | Doc | Page | Doc | Page |
@ -236,25 +242,27 @@ Many documents in DocLayNet have a unique styling. In order to avoid overfitting
Throughout this paper, we claim that DocLayNet's wider variety of document layouts leads to more robust layout detection models. In Table 5, we provide evidence for that. We trained models on each of the available datasets (PubLayNet, DocBank and DocLayNet) and evaluated them on the test sets of the other datasets. Due to the different label sets and annotation styles, a direct comparison is not possible. Hence, we focussed on the common labels among the datasets. Between PubLayNet and DocLayNet, these are Picture ,
KDD '22, August 14-18, 2022, Washington, DC, USA Birgit Pfitzmann, Christoph Auer, Michele Dolfi, Ahmed S. Nassar, and Peter Staar
Table 5: Prediction Performance (mAP@0.5-0.95) of a Mask R-CNN R50 network across the PubLayNet, DocBank & DocLayNet data-sets. By evaluating on common label classes of each dataset, we observe that the DocLayNet-trained model has much less pronounced variations in performance across all datasets.
Table 5: Prediction Performance (mAP@0.5-0.95) of a Mask R-CNN R50 network across the PubLayNet, DocBank & DocLayNet data-sets. By evaluating on common label classes of each dataset, we observe that the DocLayNet-trained model has much less pronounced variations in performance across all datasets.
| | Testing on | Testing on | Testing on |
|------------|--------------|--------------|--------------|
| labels | PLN | DB | DLN |
| Figure | 96 | 43 | 23 |
| Sec-header | 87 | - | 32 |
| Table | 95 | 24 | 49 |
| Text | 96 | - | 42 |
| total | 93 | 34 | 30 |
| Figure | 77 | 71 | 31 |
| Table | 19 | 65 | 22 |
| total | 48 | 68 | 27 |
| Figure | 67 | 51 | 72 |
| Sec-header | 53 | - | 68 |
| Table | 87 | 43 | 82 |
| Text | 77 | - | 84 |
| total | 59 | 47 | 78 |
| | | Testing on | Testing on | Testing on |
|-----------------|------------|--------------|--------------|--------------|
| Training on | labels | PLN | DB | DLN |
| PubLayNet (PLN) | Figure | 96 | 43 | 23 |
| PubLayNet (PLN) | Sec-header | 87 | - | 32 |
| PubLayNet (PLN) | Table | 95 | 24 | 49 |
| PubLayNet (PLN) | Text | 96 | - | 42 |
| PubLayNet (PLN) | total | 93 | 34 | 30 |
| DocBank (DB) | Figure | 77 | 71 | 31 |
| DocBank (DB) | Table | 19 | 65 | 22 |
| DocBank (DB) | total | 48 | 68 | 27 |
| DocLayNet (DLN) | Figure | 67 | 51 | 72 |
| DocLayNet (DLN) | Sec-header | 53 | - | 68 |
| DocLayNet (DLN) | Table | 87 | 43 | 82 |
| DocLayNet (DLN) | Text | 77 | - | 84 |
| DocLayNet (DLN) | total | 59 | 47 | 78 |
Section-header , Table and Text . Before training, we either mapped or excluded DocLayNet's other labels as specified in table 3, and also PubLayNet's List to Text . Note that the different clustering of lists (by list-element vs. whole list objects) naturally decreases the mAP score for Text .
@ -300,9 +308,15 @@ To date, there is still a significant gap between human and ML accuracy on the l
[13] Glenn Jocher, Alex Stoken, Ayush Chaurasia, Jirka Borovec, NanoCode012, TaoXie, Yonghye Kwon, Kalen Michael, Liu Changyu, Jiacong Fang, Abhiram V, Laughing, tkianai, yxNONG, Piotr Skalski, Adam Hogan, Jebastin Nadar, imyhxy, Lorenzo Mammana, Alex Wang, Cristi Fati, Diego Montes, Jan Hajek, Laurentiu
Figure 6: Example layout predictions on selected pages from the DocLayNet test-set. (A, D) exhibit favourable results on coloured backgrounds. (B, C) show accurate list-item and paragraph differentiation despite densely-spaced lines. (E) demonstrates good table and figure distinction. (F) shows predictions on a Chinese patent with multiple overlaps, label confusion and missing boxes.
DocLayNet: A Large Human-Annotated Dataset for Document-Layout Analysis
KDD '22, August 14-18, 2022, Washington, DC, USA
Text Caption List-Item Formula Table Section-Header Picture Page-Header Page-Footer Title
<!-- image -->
Figure 6: Example layout predictions on selected pages from the DocLayNet test-set. (A, D) exhibit favourable results on coloured backgrounds. (B, C) show accurate list-item and paragraph differentiation despite densely-spaced lines. (E) demonstrates good table and figure distinction. (F) shows predictions on a Chinese patent with multiple overlaps, label confusion and missing boxes.
Diaconu, Mai Thanh Minh, Marc, albinxavi, fatih, oleg, and wanghao yang. ultralytics/yolov5: v6.0 - yolov5n nano models, roboflow integration, tensorflow export, opencv dnn support, October 2021.
[14] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. CoRR , abs/2005.12872, 2020.

File diff suppressed because one or more lines are too long

View File

@ -4,14 +4,14 @@
<paragraph><location><page_1><loc_22><loc_68><loc_79><loc_77></location>We have chosen the PubTabNet data set to perform HPO, since it includes a highly diverse set of tables. Also we report TED scores separately for simple and complex tables (tables with cell spans). Results are presented in Table. 1. It is evident that with OTSL, our model achieves the same TED score and slightly better mAP scores in comparison to HTML. However OTSL yields a 2x speed up in the inference runtime over HTML.</paragraph>
<caption><location><page_1><loc_22><loc_59><loc_79><loc_66></location>Table 1. HPO performed in OTSL and HTML representation on the same transformer-based TableFormer [9] architecture, trained only on PubTabNet [22]. Effects of reducing the # of layers in encoder and decoder stages of the model show that smaller models trained on OTSL perform better, especially in recognizing complex table structures, and maintain a much higher mAP score than the HTML counterpart.</caption>
<table>
<location><page_1><loc_23><loc_41><loc_78><loc_57></location>
<location><page_1><loc_23><loc_41><loc_78><loc_58></location>
<caption>Table 1. HPO performed in OTSL and HTML representation on the same transformer-based TableFormer [9] architecture, trained only on PubTabNet [22]. Effects of reducing the # of layers in encoder and decoder stages of the model show that smaller models trained on OTSL perform better, especially in recognizing complex table structures, and maintain a much higher mAP score than the HTML counterpart.</caption>
<row_0><col_0><col_header>#</col_0><col_1><col_header>#</col_1><col_2><col_header>Language</col_2><col_3><col_header>TEDs</col_3><col_4><col_header>TEDs</col_4><col_5><col_header>TEDs</col_5><col_6><col_header>mAP</col_6><col_7><col_header>Inference</col_7></row_0>
<row_1><col_0><col_header>enc-layers</col_0><col_1><col_header>dec-layers</col_1><col_2><col_header>Language</col_2><col_3><col_header>simple</col_3><col_4><col_header>complex</col_4><col_5><col_header>all</col_5><col_6><col_header>(0.75)</col_6><col_7><col_header>time (secs)</col_7></row_1>
<row_2><col_0><body>6</col_0><col_1><body>6</col_1><col_2><body>OTSL HTML</col_2><col_3><body>0.965 0.969</col_3><col_4><body>0.934 0.927</col_4><col_5><body>0.955 0.955</col_5><col_6><body>0.88 0.857</col_6><col_7><body>2.73 5.39</col_7></row_2>
<row_3><col_0><body>4</col_0><col_1><body>4</col_1><col_2><body>OTSL HTML</col_2><col_3><body>0.938 0.952</col_3><col_4><body>0.904</col_4><col_5><body>0.927</col_5><col_6><body>0.853</col_6><col_7><body>1.97</col_7></row_3>
<row_4><col_0><body></col_0><col_1><body></col_1><col_2><body>OTSL HTML</col_2><col_3><body>0.923</col_3><col_4><body>0.909 0.897 0.901</col_4><col_5><body>0.938 0.915</col_5><col_6><body>0.843</col_6><col_7><body>3.77</col_7></row_4>
<row_5><col_0><body>2</col_0><col_1><body>4</col_1><col_2><body></col_2><col_3><body>0.945</col_3><col_4><body></col_4><col_5><body>0.931</col_5><col_6><body>0.859 0.834</col_6><col_7><body>1.91 3.81</col_7></row_5>
<row_3><col_0><body>4</col_0><col_1><body>4</col_1><col_2><body>OTSL HTML</col_2><col_3><body>0.938 0.952</col_3><col_4><body>0.904</col_4><col_5><body>0.927 0.938</col_5><col_6><body>0.853</col_6><col_7><body>1.97</col_7></row_3>
<row_4><col_0><body></col_0><col_1><body></col_1><col_2><body>HTML</col_2><col_3><body>0.923</col_3><col_4><body>0.909 0.897 0.901</col_4><col_5><body>0.915</col_5><col_6><body>0.843</col_6><col_7><body>3.77</col_7></row_4>
<row_5><col_0><body>2</col_0><col_1><body>4</col_1><col_2><body>OTSL</col_2><col_3><body>0.945</col_3><col_4><body></col_4><col_5><body>0.931</col_5><col_6><body>0.859 0.834</col_6><col_7><body>1.91 3.81</col_7></row_5>
<row_6><col_0><body>4</col_0><col_1><body>2</col_1><col_2><body>OTSL HTML</col_2><col_3><body>0.952 0.944</col_3><col_4><body>0.92 0.903</col_4><col_5><body>0.942 0.931</col_5><col_6><body>0.857 0.824</col_6><col_7><body>1.22 2</col_7></row_6>
</table>
<subtitle-level-1><location><page_1><loc_22><loc_35><loc_43><loc_36></location>5.2 Quantitative Results</subtitle-level-1>

File diff suppressed because one or more lines are too long

View File

@ -9,9 +9,9 @@ Table 1. HPO performed in OTSL and HTML representation on the same transformer-b
|------------|------------|------------|-------------|-------------------|-------------|-------------|-------------|
| enc-layers | dec-layers | Language | simple | complex | all | (0.75) | time (secs) |
| 6 | 6 | OTSL HTML | 0.965 0.969 | 0.934 0.927 | 0.955 0.955 | 0.88 0.857 | 2.73 5.39 |
| 4 | 4 | OTSL HTML | 0.938 0.952 | 0.904 | 0.927 | 0.853 | 1.97 |
| | | OTSL HTML | 0.923 | 0.909 0.897 0.901 | 0.938 0.915 | 0.843 | 3.77 |
| 2 | 4 | | 0.945 | | 0.931 | 0.859 0.834 | 1.91 3.81 |
| 4 | 4 | OTSL HTML | 0.938 0.952 | 0.904 | 0.927 0.938 | 0.853 | 1.97 |
| | | HTML | 0.923 | 0.909 0.897 0.901 | 0.915 | 0.843 | 3.77 |
| 2 | 4 | OTSL | 0.945 | | 0.931 | 0.859 0.834 | 1.91 3.81 |
| 4 | 2 | OTSL HTML | 0.952 0.944 | 0.92 0.903 | 0.942 0.931 | 0.857 0.824 | 1.22 2 |
## 5.2 Quantitative Results

File diff suppressed because one or more lines are too long

View File

@ -2,23 +2,25 @@
<subtitle-level-1><location><page_1><loc_22><loc_81><loc_79><loc_85></location>Optimized Table Tokenization for Table Structure Recognition</subtitle-level-1>
<paragraph><location><page_1><loc_23><loc_74><loc_78><loc_79></location>Maksym Lysak [0000 - 0002 - 3723 - $^{6960]}$, Ahmed Nassar[0000 - 0002 - 9468 - $^{0822]}$, Nikolaos Livathinos [0000 - 0001 - 8513 - $^{3491]}$, Christoph Auer[0000 - 0001 - 5761 - $^{0422]}$, and Peter Staar [0000 - 0002 - 8088 - 0823]</paragraph>
<paragraph><location><page_1><loc_36><loc_70><loc_64><loc_73></location>IBM Research {mly,ahn,nli,cau,taa}@zurich.ibm.com</paragraph>
<paragraph><location><page_1><loc_26><loc_41><loc_74><loc_66></location>Abstract. Extracting tables from documents is a crucial task in any document conversion pipeline. Recently, transformer-based models have demonstrated that table-structure can be recognized with impressive accuracy using Image-to-Markup-Sequence (Im2Seq) approaches. Taking only the image of a table, such models predict a sequence of tokens (e.g. in HTML, LaTeX) which represent the structure of the table. Since the token representation of the table structure has a significant impact on the accuracy and run-time performance of any Im2Seq model, we investigate in this paper how table-structure representation can be optimised. We propose a new, optimised table-structure language (OTSL) with a minimized vocabulary and specific rules. The benefits of OTSL are that it reduces the number of tokens to 5 (HTML needs 28+) and shortens the sequence length to half of HTML on average. Consequently, model accuracy improves significantly, inference time is halved compared to HTML-based models, and the predicted table structures are always syntactically correct. This in turn eliminates most post-processing needs. Popular table structure data-sets will be published in OTSL format to the community.</paragraph>
<paragraph><location><page_1><loc_27><loc_41><loc_74><loc_66></location>Abstract. Extracting tables from documents is a crucial task in any document conversion pipeline. Recently, transformer-based models have demonstrated that table-structure can be recognized with impressive accuracy using Image-to-Markup-Sequence (Im2Seq) approaches. Taking only the image of a table, such models predict a sequence of tokens (e.g. in HTML, LaTeX) which represent the structure of the table. Since the token representation of the table structure has a significant impact on the accuracy and run-time performance of any Im2Seq model, we investigate in this paper how table-structure representation can be optimised. We propose a new, optimised table-structure language (OTSL) with a minimized vocabulary and specific rules. The benefits of OTSL are that it reduces the number of tokens to 5 (HTML needs 28+) and shortens the sequence length to half of HTML on average. Consequently, model accuracy improves significantly, inference time is halved compared to HTML-based models, and the predicted table structures are always syntactically correct. This in turn eliminates most post-processing needs. Popular table structure data-sets will be published in OTSL format to the community.</paragraph>
<paragraph><location><page_1><loc_27><loc_37><loc_74><loc_40></location>Keywords: Table Structure Recognition · Data Representation · Transformers · Optimization.</paragraph>
<subtitle-level-1><location><page_1><loc_22><loc_33><loc_37><loc_34></location>1 Introduction</subtitle-level-1>
<paragraph><location><page_1><loc_22><loc_21><loc_79><loc_31></location>Tables are ubiquitous in documents such as scientific papers, patents, reports, manuals, specification sheets or marketing material. They often encode highly valuable information and therefore need to be extracted with high accuracy. Unfortunately, tables appear in documents in various sizes, styling and structure, making it difficult to recover their correct structure with simple analytical methods. Therefore, accurate table extraction is achieved these days with machine-learning based methods.</paragraph>
<paragraph><location><page_1><loc_22><loc_16><loc_79><loc_20></location>In modern document understanding systems [1,15], table extraction is typically a two-step process. Firstly, every table on a page is located with a bounding box, and secondly, their logical row and column structure is recognized. As of</paragraph>
<paragraph><location><page_2><loc_22><loc_75><loc_79><loc_84></location>Fig. 1. Comparison between HTML and OTSL table structure representation: (A) table-example with complex row and column headers, including a 2D empty span, (B) minimal graphical representation of table structure using rectangular layout, (C) HTML representation, (D) OTSL representation. This example demonstrates many of the key-features of OTSL, namely its reduced vocabulary size (12 versus 5 in this case), its reduced sequence length (55 versus 30) and a enhanced internal structure (variable token sequence length per row in HTML versus a fixed length of rows in OTSL).</paragraph>
<subtitle-level-1><location><page_2><loc_22><loc_87><loc_38><loc_88></location>2 M. Lysak, et al.</subtitle-level-1>
<caption><location><page_2><loc_22><loc_75><loc_79><loc_84></location>Fig. 1. Comparison between HTML and OTSL table structure representation: (A) table-example with complex row and column headers, including a 2D empty span, (B) minimal graphical representation of table structure using rectangular layout, (C) HTML representation, (D) OTSL representation. This example demonstrates many of the key-features of OTSL, namely its reduced vocabulary size (12 versus 5 in this case), its reduced sequence length (55 versus 30) and a enhanced internal structure (variable token sequence length per row in HTML versus a fixed length of rows in OTSL).</caption>
<figure>
<location><page_2><loc_25><loc_46><loc_76><loc_74></location>
<location><page_2><loc_24><loc_46><loc_76><loc_74></location>
<caption>Fig. 1. Comparison between HTML and OTSL table structure representation: (A) table-example with complex row and column headers, including a 2D empty span, (B) minimal graphical representation of table structure using rectangular layout, (C) HTML representation, (D) OTSL representation. This example demonstrates many of the key-features of OTSL, namely its reduced vocabulary size (12 versus 5 in this case), its reduced sequence length (55 versus 30) and a enhanced internal structure (variable token sequence length per row in HTML versus a fixed length of rows in OTSL).</caption>
</figure>
<paragraph><location><page_2><loc_22><loc_34><loc_79><loc_43></location>today, table detection in documents is a well understood problem, and the latest state-of-the-art (SOTA) object detection methods provide an accuracy comparable to human observers [7,8,10,14,23]. On the other hand, the problem of table structure recognition (TSR) is a lot more challenging and remains a very active area of research, in which many novel machine learning algorithms are being explored [3,4,5,9,11,12,13,14,17,18,21,22].</paragraph>
<paragraph><location><page_2><loc_22><loc_16><loc_79><loc_34></location>Recently emerging SOTA methods for table structure recognition employ transformer-based models, in which an image of the table is provided to the network in order to predict the structure of the table as a sequence of tokens. These image-to-sequence (Im2Seq) models are extremely powerful, since they allow for a purely data-driven solution. The tokens of the sequence typically belong to a markup language such as HTML, Latex or Markdown, which allow to describe table structure as rows, columns and spanning cells in various configurations. In Figure 1, we illustrate how HTML is used to represent the table-structure of a particular example table. Public table-structure data sets such as PubTabNet [22], and FinTabNet [21], which were created in a semi-automated way from paired PDF and HTML sources (e.g. PubMed Central), popularized primarily the use of HTML as ground-truth representation format for TSR.</paragraph>
<paragraph><location><page_3><loc_22><loc_73><loc_79><loc_85></location>While the majority of research in TSR is currently focused on the development and application of novel neural model architectures, the table structure representation language (e.g. HTML in PubTabNet and FinTabNet) is usually adopted as is for the sequence tokenization in Im2Seq models. In this paper, we aim for the opposite and investigate the impact of the table structure representation language with an otherwise unmodified Im2Seq transformer-based architecture. Since the current state-of-the-art Im2Seq model is TableFormer [9], we select this model to perform our experiments.</paragraph>
<paragraph><location><page_3><loc_22><loc_58><loc_79><loc_73></location>The main contribution of this paper is the introduction of a new optimised table structure language (OTSL), specifically designed to describe table-structure in an compact and structured way for Im2Seq models. OTSL has a number of key features, which make it very attractive to use in Im2Seq models. Specifically, compared to other languages such as HTML, OTSL has a minimized vocabulary which yields short sequence length, strong inherent structure (e.g. strict rectangular layout) and a strict syntax with rules that only look backwards. The latter allows for syntax validation during inference and ensures a syntactically correct table-structure. These OTSL features are illustrated in Figure 1, in comparison to HTML.</paragraph>
<paragraph><location><page_3><loc_22><loc_44><loc_79><loc_58></location>The paper is structured as follows. In section 2, we give an overview of the latest developments in table-structure reconstruction. In section 3 we review the current HTML table encoding (popularised by PubTabNet and FinTabNet) and discuss its flaws. Subsequently, we introduce OTSL in section 4, which includes the language definition, syntax rules and error-correction procedures. In section 5, we apply OTSL on the TableFormer architecture, compare it to TableFormer models trained on HTML and ultimately demonstrate the advantages of using OTSL. Finally, in section 6 we conclude our work and outline next potential steps.</paragraph>
<paragraph><location><page_3><loc_22><loc_45><loc_79><loc_58></location>The paper is structured as follows. In section 2, we give an overview of the latest developments in table-structure reconstruction. In section 3 we review the current HTML table encoding (popularised by PubTabNet and FinTabNet) and discuss its flaws. Subsequently, we introduce OTSL in section 4, which includes the language definition, syntax rules and error-correction procedures. In section 5, we apply OTSL on the TableFormer architecture, compare it to TableFormer models trained on HTML and ultimately demonstrate the advantages of using OTSL. Finally, in section 6 we conclude our work and outline next potential steps.</paragraph>
<subtitle-level-1><location><page_3><loc_22><loc_40><loc_39><loc_42></location>2 Related Work</subtitle-level-1>
<paragraph><location><page_3><loc_22><loc_16><loc_79><loc_38></location>Approaches to formalize the logical structure and layout of tables in electronic documents date back more than two decades [16]. In the recent past, a wide variety of computer vision methods have been explored to tackle the problem of table structure recognition, i.e. the correct identification of columns, rows and spanning cells in a given table. Broadly speaking, the current deeplearning based approaches fall into three categories: object detection (OD) methods, Graph-Neural-Network (GNN) methods and Image-to-Markup-Sequence (Im2Seq) methods. Object-detection based methods [11,12,13,14,21] rely on tablestructure annotation using (overlapping) bounding boxes for training, and produce bounding-box predictions to define table cells, rows, and columns on a table image. Graph Neural Network (GNN) based methods [3,6,17,18], as the name suggests, represent tables as graph structures. The graph nodes represent the content of each table cell, an embedding vector from the table image, or geometric coordinates of the table cell. The edges of the graph define the relationship between the nodes, e.g. if they belong to the same column, row, or table cell.</paragraph>
<paragraph><location><page_4><loc_22><loc_87><loc_38><loc_88></location>4 M. Lysak, et al.</paragraph>
<paragraph><location><page_4><loc_22><loc_67><loc_79><loc_85></location>Other work [20] aims at predicting a grid for each table and deciding which cells must be merged using an attention network. Im2Seq methods cast the problem as a sequence generation task [4,5,9,22], and therefore need an internal tablestructure representation language, which is often implemented with standard markup languages (e.g. HTML, LaTeX, Markdown). In theory, Im2Seq methods have a natural advantage over the OD and GNN methods by virtue of directly predicting the table-structure. As such, no post-processing or rules are needed in order to obtain the table-structure, which is necessary with OD and GNN approaches. In practice, this is not entirely true, because a predicted sequence of table-structure markup does not necessarily have to be syntactically correct. Hence, depending on the quality of the predicted sequence, some post-processing needs to be performed to ensure a syntactically valid (let alone correct) sequence.</paragraph>
<paragraph><location><page_4><loc_22><loc_39><loc_79><loc_67></location>Within the Im2Seq method, we find several popular models, namely the encoder-dual-decoder model (EDD) [22], TableFormer [9], Tabsplitter[2] and Ye et. al. [19]. EDD uses two consecutive long short-term memory (LSTM) decoders to predict a table in HTML representation. The tag decoder predicts a sequence of HTML tags. For each decoded table cell ( <td> ), the attention is passed to the cell decoder to predict the content with an embedded OCR approach. The latter makes it susceptible to transcription errors in the cell content of the table. TableFormer address this reliance on OCR and uses two transformer decoders for HTML structure and cell bounding box prediction in an end-to-end architecture. The predicted cell bounding box is then used to extract text tokens from an originating (digital) PDF page, circumventing any need for OCR. TabSplitter [2] proposes a compact double-matrix representation of table rows and columns to do error detection and error correction of HTML structure sequences based on predictions from [19]. This compact double-matrix representation can not be used directly by the Img2seq model training, so the model uses HTML as an intermediate form. Chi et. al. [4] introduce a data set and a baseline method using bidirectional LSTMs to predict LaTeX code. Kayal [5] introduces Gated ResNet transformers to predict LaTeX code, and a separate OCR module to extract content.</paragraph>
<paragraph><location><page_4><loc_22><loc_26><loc_79><loc_38></location>Im2Seq approaches have shown to be well-suited for the TSR task and allow a full end-to-end network design that can output the final table structure without pre- or post-processing logic. Furthermore, Im2Seq models have demonstrated to deliver state-of-the-art prediction accuracy [9]. This motivated the authors to investigate if the performance (both in accuracy and inference time) can be further improved by optimising the table structure representation language. We believe this is a necessary step before further improving neural network architectures for this task.</paragraph>
@ -33,6 +35,7 @@
<paragraph><location><page_5><loc_22><loc_33><loc_79><loc_54></location>Obviously, HTML and other general-purpose markup languages were not designed for Im2Seq models. As such, they have some serious drawbacks. First, the token vocabulary needs to be artificially large in order to describe all plausible tabular structures. Since most Im2Seq models use an autoregressive approach, they generate the sequence token by token. Therefore, to reduce inference time, a shorter sequence length is critical. Every table-cell is represented by at least two tokens ( <td> and </td> ). Furthermore, when tokenizing the HTML structure, one needs to explicitly enumerate possible column-spans and row-spans as words. In practice, this ends up requiring 28 different HTML tokens (when including column- and row-spans up to 10 cells) just to describe every table in the PubTabNet dataset. Clearly, not every token is equally represented, as is depicted in Figure 2. This skewed distribution of tokens in combination with variable token row-length makes it challenging for models to learn the HTML structure.</paragraph>
<paragraph><location><page_5><loc_22><loc_27><loc_79><loc_32></location>Additionally, it would be desirable if the representation would easily allow an early detection of invalid sequences on-the-go, before the prediction of the entire table structure is completed. HTML is not well-suited for this purpose as the verification of incomplete sequences is non-trivial or even impossible.</paragraph>
<paragraph><location><page_5><loc_22><loc_16><loc_79><loc_26></location>In a valid HTML table, the token sequence must describe a 2D grid of table cells, serialised in row-major ordering, where each row and each column have the same length (while considering row- and column-spans). Furthermore, every opening tag in HTML needs to be matched by a closing tag in a correct hierarchical manner. Since the number of tokens for each table row and column can vary significantly, especially for large tables with many row- and column-spans, it is complex to verify the consistency of predicted structures during sequence</paragraph>
<paragraph><location><page_6><loc_22><loc_87><loc_38><loc_88></location>6 M. Lysak, et al.</paragraph>
<paragraph><location><page_6><loc_22><loc_82><loc_79><loc_85></location>generation. Implicitly, this also means that Im2Seq models need to learn these complex syntax rules, simply to deliver valid output.</paragraph>
<paragraph><location><page_6><loc_22><loc_63><loc_79><loc_82></location>In practice, we observe two major issues with prediction quality when training Im2Seq models on HTML table structure generation from images. On the one hand, we find that on large tables, the visual attention of the model often starts to drift and is not accurately moving forward cell by cell anymore. This manifests itself in either in an increasing location drift for proposed table-cells in later rows on the same column or even complete loss of vertical alignment, as illustrated in Figure 5. Addressing this with post-processing is partially possible, but clearly undesired. On the other hand, we find many instances of predictions with structural inconsistencies or plain invalid HTML output, as shown in Figure 6, which are nearly impossible to properly correct. Both problems seriously impact the TSR model performance, since they reflect not only in the task of pure structure recognition but also in the equally crucial recognition or matching of table cell content.</paragraph>
<subtitle-level-1><location><page_6><loc_22><loc_58><loc_61><loc_60></location>4 Optimised Table Structure Language</subtitle-level-1>
@ -44,54 +47,58 @@
<paragraph><location><page_6><loc_23><loc_27><loc_79><loc_29></location>-"L" cell left-looking cell , merging with the left neighbor cell to create a span</paragraph>
<paragraph><location><page_6><loc_23><loc_23><loc_79><loc_26></location>-"U" cell up-looking cell , merging with the upper neighbor cell to create a span</paragraph>
<paragraph><location><page_6><loc_23><loc_22><loc_74><loc_23></location>-"X" cell cross cell , to merge with both left and upper neighbor cells</paragraph>
<paragraph><location><page_6><loc_23><loc_20><loc_54><loc_22></location>-"NL" new-line , switch to the next row.</paragraph>
<paragraph><location><page_6><loc_23><loc_20><loc_54><loc_21></location>-"NL" new-line , switch to the next row.</paragraph>
<paragraph><location><page_6><loc_22><loc_16><loc_79><loc_19></location>A notable attribute of OTSL is that it has the capability of achieving lossless conversion to HTML.</paragraph>
<caption><location><page_7><loc_22><loc_80><loc_79><loc_84></location>Fig. 3. OTSL description of table structure: A - table example; B - graphical representation of table structure; C - mapping structure on a grid; D - OTSL structure encoding; E - explanation on cell encoding</caption>
<paragraph><location><page_7><loc_22><loc_80><loc_79><loc_84></location>Fig. 3. OTSL description of table structure: A - table example; B - graphical representation of table structure; C - mapping structure on a grid; D - OTSL structure encoding; E - explanation on cell encoding</paragraph>
<figure>
<location><page_7><loc_27><loc_65><loc_73><loc_79></location>
<caption>Fig. 3. OTSL description of table structure: A - table example; B - graphical representation of table structure; C - mapping structure on a grid; D - OTSL structure encoding; E - explanation on cell encoding</caption>
</figure>
<subtitle-level-1><location><page_7><loc_22><loc_60><loc_40><loc_62></location>4.2 Language Syntax</subtitle-level-1>
<subtitle-level-1><location><page_7><loc_22><loc_60><loc_40><loc_61></location>4.2 Language Syntax</subtitle-level-1>
<paragraph><location><page_7><loc_22><loc_58><loc_59><loc_59></location>The OTSL representation follows these syntax rules:</paragraph>
<paragraph><location><page_7><loc_23><loc_54><loc_79><loc_56></location>1. Left-looking cell rule : The left neighbour of an "L" cell must be either another "L" cell or a "C" cell.</paragraph>
<paragraph><location><page_7><loc_23><loc_51><loc_79><loc_53></location>2. Up-looking cell rule : The upper neighbour of a "U" cell must be either another "U" cell or a "C" cell.</paragraph>
<subtitle-level-1><location><page_7><loc_23><loc_49><loc_37><loc_50></location>3. Cross cell rule :</subtitle-level-1>
<paragraph><location><page_7><loc_24><loc_44><loc_79><loc_50></location>: The left neighbour of an "X" cell must be either another "X" cell or a "U" cell, and the upper neighbour of an "X" cell must be either another "X" cell or an "L" cell.</paragraph>
<paragraph><location><page_7><loc_23><loc_50><loc_79><loc_53></location>2. Up-looking cell rule : The upper neighbour of a "U" cell must be either another "U" cell or a "C" cell.</paragraph>
<paragraph><location><page_7><loc_23><loc_49><loc_37><loc_50></location>3. Cross cell rule :</paragraph>
<paragraph><location><page_7><loc_25><loc_44><loc_79><loc_49></location>The left neighbour of an "X" cell must be either another "X" cell or a "U" cell, and the upper neighbour of an "X" cell must be either another "X" cell or an "L" cell.</paragraph>
<paragraph><location><page_7><loc_23><loc_43><loc_78><loc_44></location>4. First row rule : Only "L" cells and "C" cells are allowed in the first row.</paragraph>
<paragraph><location><page_7><loc_23><loc_40><loc_79><loc_43></location>5. First column rule : Only "U" cells and "C" cells are allowed in the first column.</paragraph>
<paragraph><location><page_7><loc_23><loc_37><loc_79><loc_40></location>6. Rectangular rule : The table representation is always rectangular - all rows must have an equal number of tokens, terminated with "NL" token.</paragraph>
<paragraph><location><page_7><loc_22><loc_19><loc_79><loc_35></location>The application of these rules gives OTSL a set of unique properties. First of all, the OTSL enforces a strictly rectangular structure representation, where every new-line token starts a new row. As a consequence, all rows and all columns have exactly the same number of tokens, irrespective of cell spans. Secondly, the OTSL representation is unambiguous: Every table structure is represented in one way. In this representation every table cell corresponds to a "C"-cell token, which in case of spans is always located in the top-left corner of the table cell definition. Third, OTSL syntax rules are only backward-looking. As a consequence, every predicted token can be validated straight during sequence generation by looking at the previously predicted sequence. As such, OTSL can guarantee that every predicted sequence is syntactically valid.</paragraph>
<paragraph><location><page_7><loc_22><loc_16><loc_79><loc_19></location>These characteristics can be easily learned by sequence generator networks, as we demonstrate further below. We find strong indications that this pattern</paragraph>
<paragraph><location><page_8><loc_22><loc_87><loc_38><loc_88></location>8 M. Lysak, et al.</paragraph>
<paragraph><location><page_8><loc_22><loc_82><loc_79><loc_85></location>reduces significantly the column drift seen in the HTML based models (see Figure 5).</paragraph>
<subtitle-level-1><location><page_8><loc_22><loc_78><loc_52><loc_80></location>4.3 Error-detection and -mitigation</subtitle-level-1>
<paragraph><location><page_8><loc_22><loc_62><loc_79><loc_77></location>The design of OTSL allows to validate a table structure easily on an unfinished sequence. The detection of an invalid sequence token is a clear indication of a prediction mistake, however a valid sequence by itself does not guarantee prediction correctness. Different heuristics can be used to correct token errors in an invalid sequence and thus increase the chances for accurate predictions. Such heuristics can be applied either after the prediction of each token, or at the end on the entire predicted sequence. For example a simple heuristic which can correct the predicted OTSL sequence on-the-fly is to verify if the token with the highest prediction confidence invalidates the predicted sequence, and replace it by the token with the next highest confidence until OTSL rules are satisfied.</paragraph>
<subtitle-level-1><location><page_8><loc_22><loc_58><loc_37><loc_60></location>5 Experiments</subtitle-level-1>
<subtitle-level-1><location><page_8><loc_22><loc_58><loc_38><loc_60></location>5 Experiments</subtitle-level-1>
<paragraph><location><page_8><loc_22><loc_43><loc_79><loc_56></location>To evaluate the impact of OTSL on prediction accuracy and inference times, we conducted a series of experiments based on the TableFormer model (Figure 4) with two objectives: Firstly we evaluate the prediction quality and performance of OTSL vs. HTML after performing Hyper Parameter Optimization (HPO) on the canonical PubTabNet data set. Secondly we pick the best hyper-parameters found in the first step and evaluate how OTSL impacts the performance of TableFormer after training on other publicly available data sets (FinTabNet, PubTables-1M [14]). The ground truth (GT) from all data sets has been converted into OTSL format for this purpose, and will be made publicly available.</paragraph>
<caption><location><page_8><loc_22><loc_36><loc_79><loc_39></location>Fig. 4. Architecture sketch of the TableFormer model, which is a representative for the Im2Seq approach.</caption>
<paragraph><location><page_8><loc_22><loc_36><loc_79><loc_39></location>Fig. 4. Architecture sketch of the TableFormer model, which is a representative for the Im2Seq approach.</paragraph>
<caption><location><page_8><loc_22><loc_16><loc_79><loc_22></location>We rely on standard metrics such as Tree Edit Distance score (TEDs) for table structure prediction, and Mean Average Precision (mAP) with 0.75 Intersection Over Union (IOU) threshold for the bounding-box predictions of table cells. The predicted OTSL structures were converted back to HTML format in</caption>
<figure>
<location><page_8><loc_23><loc_25><loc_77><loc_36></location>
<caption>Fig. 4. Architecture sketch of the TableFormer model, which is a representative for the Im2Seq approach.</caption>
<caption>We rely on standard metrics such as Tree Edit Distance score (TEDs) for table structure prediction, and Mean Average Precision (mAP) with 0.75 Intersection Over Union (IOU) threshold for the bounding-box predictions of table cells. The predicted OTSL structures were converted back to HTML format in</caption>
</figure>
<paragraph><location><page_8><loc_22><loc_16><loc_79><loc_22></location>We rely on standard metrics such as Tree Edit Distance score (TEDs) for table structure prediction, and Mean Average Precision (mAP) with 0.75 Intersection Over Union (IOU) threshold for the bounding-box predictions of table cells. The predicted OTSL structures were converted back to HTML format in</paragraph>
<paragraph><location><page_9><loc_22><loc_81><loc_79><loc_85></location>order to compute the TED score. Inference timing results for all experiments were obtained from the same machine on a single core with AMD EPYC 7763 CPU @2.45 GHz.</paragraph>
<subtitle-level-1><location><page_9><loc_22><loc_77><loc_52><loc_79></location>5.1 Hyper Parameter Optimization</subtitle-level-1>
<subtitle-level-1><location><page_9><loc_22><loc_78><loc_52><loc_79></location>5.1 Hyper Parameter Optimization</subtitle-level-1>
<paragraph><location><page_9><loc_22><loc_68><loc_79><loc_77></location>We have chosen the PubTabNet data set to perform HPO, since it includes a highly diverse set of tables. Also we report TED scores separately for simple and complex tables (tables with cell spans). Results are presented in Table. 1. It is evident that with OTSL, our model achieves the same TED score and slightly better mAP scores in comparison to HTML. However OTSL yields a 2x speed up in the inference runtime over HTML.</paragraph>
<caption><location><page_9><loc_22><loc_59><loc_79><loc_65></location>Table 1. HPO performed in OTSL and HTML representation on the same transformer-based TableFormer [9] architecture, trained only on PubTabNet [22]. Effects of reducing the # of layers in encoder and decoder stages of the model show that smaller models trained on OTSL perform better, especially in recognizing complex table structures, and maintain a much higher mAP score than the HTML counterpart.</caption>
<table>
<location><page_9><loc_23><loc_41><loc_78><loc_57></location>
<location><page_9><loc_23><loc_41><loc_78><loc_58></location>
<caption>Table 1. HPO performed in OTSL and HTML representation on the same transformer-based TableFormer [9] architecture, trained only on PubTabNet [22]. Effects of reducing the # of layers in encoder and decoder stages of the model show that smaller models trained on OTSL perform better, especially in recognizing complex table structures, and maintain a much higher mAP score than the HTML counterpart.</caption>
<row_0><col_0><col_header>#</col_0><col_1><col_header>#</col_1><col_2><col_header>Language</col_2><col_3><col_header>TEDs</col_3><col_4><col_header>TEDs</col_4><col_5><col_header>TEDs</col_5><col_6><col_header>mAP</col_6><col_7><col_header>Inference</col_7></row_0>
<row_1><col_0><col_header>enc-layers</col_0><col_1><col_header>dec-layers</col_1><col_2><col_header>Language</col_2><col_3><col_header>simple</col_3><col_4><col_header>complex</col_4><col_5><col_header>all</col_5><col_6><col_header>(0.75)</col_6><col_7><col_header>time (secs)</col_7></row_1>
<row_2><col_0><body>6</col_0><col_1><body>6</col_1><col_2><body>OTSL HTML</col_2><col_3><body>0.965 0.969</col_3><col_4><body>0.934 0.927</col_4><col_5><body>0.955 0.955</col_5><col_6><body>0.88 0.857</col_6><col_7><body>2.73 5.39</col_7></row_2>
<row_3><col_0><body>4</col_0><col_1><body>4</col_1><col_2><body>OTSL HTML</col_2><col_3><body>0.938 0.952</col_3><col_4><body>0.904 0.909</col_4><col_5><body>0.927</col_5><col_6><body>0.853</col_6><col_7><body>1.97</col_7></row_3>
<row_4><col_0><body>2</col_0><col_1><body>4</col_1><col_2><body>OTSL HTML</col_2><col_3><body>0.923</col_3><col_4><body>0.897 0.901</col_4><col_5><body>0.938 0.915</col_5><col_6><body>0.843</col_6><col_7><body>3.77</col_7></row_4>
<row_5><col_0><body></col_0><col_1><body></col_1><col_2><body></col_2><col_3><body>0.945</col_3><col_4><body></col_4><col_5><body>0.931</col_5><col_6><body>0.859 0.834</col_6><col_7><body>1.91 3.81</col_7></row_5>
<row_3><col_0><body>4</col_0><col_1><body>4</col_1><col_2><body>OTSL HTML</col_2><col_3><body>0.938 0.952</col_3><col_4><body>0.904 0.909</col_4><col_5><body>0.927 0.938</col_5><col_6><body>0.853</col_6><col_7><body>1.97</col_7></row_3>
<row_4><col_0><body>2</col_0><col_1><body></col_1><col_2><body>OTSL HTML</col_2><col_3><body>0.923</col_3><col_4><body>0.897 0.901</col_4><col_5><body>0.915</col_5><col_6><body>0.843</col_6><col_7><body>3.77</col_7></row_4>
<row_5><col_0><body></col_0><col_1><body>4</col_1><col_2><body></col_2><col_3><body>0.945</col_3><col_4><body></col_4><col_5><body>0.931</col_5><col_6><body>0.859 0.834</col_6><col_7><body>1.91 3.81</col_7></row_5>
<row_6><col_0><body>4</col_0><col_1><body>2</col_1><col_2><body>OTSL HTML</col_2><col_3><body>0.952 0.944</col_3><col_4><body>0.92 0.903</col_4><col_5><body>0.942 0.931</col_5><col_6><body>0.857 0.824</col_6><col_7><body>1.22 2</col_7></row_6>
</table>
<subtitle-level-1><location><page_9><loc_22><loc_35><loc_43><loc_36></location>5.2 Quantitative Results</subtitle-level-1>
<paragraph><location><page_9><loc_22><loc_22><loc_79><loc_34></location>We picked the model parameter configuration that produced the best prediction quality (enc=6, dec=6, heads=8) with PubTabNet alone, then independently trained and evaluated it on three publicly available data sets: PubTabNet (395k samples), FinTabNet (113k samples) and PubTables-1M (about 1M samples). Performance results are presented in Table. 2. It is clearly evident that the model trained on OTSL outperforms HTML across the board, keeping high TEDs and mAP scores even on difficult financial tables (FinTabNet) that contain sparse and large tables.</paragraph>
<paragraph><location><page_9><loc_22><loc_16><loc_79><loc_22></location>Additionally, the results show that OTSL has an advantage over HTML when applied on a bigger data set like PubTables-1M and achieves significantly improved scores. Finally, OTSL achieves faster inference due to fewer decoding steps which is a result of the reduced sequence representation.</paragraph>
<caption><location><page_10><loc_22><loc_82><loc_79><loc_86></location>Table 2. TSR and cell detection results compared between OTSL and HTML on the PubTabNet [22], FinTabNet [21] and PubTables-1M [14] data sets using TableFormer [9] (with enc=6, dec=6, heads=8).</caption>
<paragraph><location><page_10><loc_22><loc_87><loc_24><loc_88></location>10</paragraph>
<paragraph><location><page_10><loc_27><loc_87><loc_29><loc_88></location>M.</paragraph>
<paragraph><location><page_10><loc_30><loc_87><loc_38><loc_88></location>Lysak, et al.</paragraph>
<caption><location><page_10><loc_22><loc_82><loc_79><loc_85></location>Table 2. TSR and cell detection results compared between OTSL and HTML on the PubTabNet [22], FinTabNet [21] and PubTables-1M [14] data sets using TableFormer [9] (with enc=6, dec=6, heads=8).</caption>
<table>
<location><page_10><loc_23><loc_67><loc_77><loc_80></location>
<caption>Table 2. TSR and cell detection results compared between OTSL and HTML on the PubTabNet [22], FinTabNet [21] and PubTables-1M [14] data sets using TableFormer [9] (with enc=6, dec=6, heads=8).</caption>
@ -108,29 +115,30 @@
<paragraph><location><page_10><loc_22><loc_54><loc_79><loc_61></location>To illustrate the qualitative differences between OTSL and HTML, Figure 5 demonstrates less overlap and more accurate bounding boxes with OTSL. In Figure 6, OTSL proves to be more effective in handling tables with longer token sequences, resulting in even more precise structure prediction and bounding boxes.</paragraph>
<caption><location><page_10><loc_22><loc_44><loc_79><loc_50></location>Fig. 5. The OTSL model produces more accurate bounding boxes with less overlap (E) than the HTML model (D), when predicting the structure of a sparse table (A), at twice the inference speed because of shorter sequence length (B),(C). "PMC2807444_006_00.png" PubTabNet. μ</caption>
<figure>
<location><page_10><loc_27><loc_16><loc_74><loc_44></location>
<location><page_10><loc_26><loc_16><loc_74><loc_44></location>
<caption>Fig. 5. The OTSL model produces more accurate bounding boxes with less overlap (E) than the HTML model (D), when predicting the structure of a sparse table (A), at twice the inference speed because of shorter sequence length (B),(C). "PMC2807444_006_00.png" PubTabNet. μ</caption>
</figure>
<paragraph><location><page_10><loc_37><loc_15><loc_38><loc_16></location>μ</paragraph>
<paragraph><location><page_10><loc_49><loc_12><loc_49><loc_14></location>≥</paragraph>
<caption><location><page_11><loc_22><loc_77><loc_79><loc_84></location>Fig. 6. Visualization of predicted structure and detected bounding boxes on a complex table with many rows. The OTSL model (B) captured repeating pattern of horizontally merged cells from the GT (A), unlike the HTML model (C). The HTML model also didn't complete the HTML sequence correctly and displayed a lot more of drift and overlap of bounding boxes. "PMC5406406_003_01.png" PubTabNet.</caption>
<caption><location><page_11><loc_22><loc_78><loc_79><loc_84></location>Fig. 6. Visualization of predicted structure and detected bounding boxes on a complex table with many rows. The OTSL model (B) captured repeating pattern of horizontally merged cells from the GT (A), unlike the HTML model (C). The HTML model also didn't complete the HTML sequence correctly and displayed a lot more of drift and overlap of bounding boxes. "PMC5406406_003_01.png" PubTabNet.</caption>
<figure>
<location><page_11><loc_27><loc_20><loc_73><loc_77></location>
<caption>Fig. 6. Visualization of predicted structure and detected bounding boxes on a complex table with many rows. The OTSL model (B) captured repeating pattern of horizontally merged cells from the GT (A), unlike the HTML model (C). The HTML model also didn't complete the HTML sequence correctly and displayed a lot more of drift and overlap of bounding boxes. "PMC5406406_003_01.png" PubTabNet.</caption>
</figure>
<subtitle-level-1><location><page_12><loc_22><loc_87><loc_38><loc_88></location>12 M. Lysak, et al.</subtitle-level-1>
<subtitle-level-1><location><page_12><loc_22><loc_84><loc_36><loc_85></location>6 Conclusion</subtitle-level-1>
<paragraph><location><page_12><loc_22><loc_74><loc_79><loc_82></location>We demonstrated that representing tables in HTML for the task of table structure recognition with Im2Seq models is ill-suited and has serious limitations. Furthermore, we presented in this paper an Optimized Table Structure Language (OTSL) which, when compared to commonly used general purpose languages, has several key benefits.</paragraph>
<paragraph><location><page_12><loc_22><loc_74><loc_79><loc_81></location>We demonstrated that representing tables in HTML for the task of table structure recognition with Im2Seq models is ill-suited and has serious limitations. Furthermore, we presented in this paper an Optimized Table Structure Language (OTSL) which, when compared to commonly used general purpose languages, has several key benefits.</paragraph>
<paragraph><location><page_12><loc_22><loc_59><loc_79><loc_74></location>First and foremost, given the same network configuration, inference time for a table-structure prediction is about 2 times faster compared to the conventional HTML approach. This is primarily owed to the shorter sequence length of the OTSL representation. Additional performance benefits can be obtained with HPO (hyper parameter optimization). As we demonstrate in our experiments, models trained on OTSL can be significantly smaller, e.g. by reducing the number of encoder and decoder layers, while preserving comparatively good prediction quality. This can further improve inference performance, yielding 5-6 times faster inference speed in OTSL with prediction quality comparable to models trained on HTML (see Table 1).</paragraph>
<paragraph><location><page_12><loc_22><loc_41><loc_79><loc_59></location>Secondly, OTSL has more inherent structure and a significantly restricted vocabulary size. This allows autoregressive models to perform better in the TED metric, but especially with regards to prediction accuracy of the table-cell bounding boxes (see Table 2). As shown in Figure 5, we observe that the OTSL drastically reduces the drift for table cell bounding boxes at high row count and in sparse tables. This leads to more accurate predictions and a significant reduction in post-processing complexity, which is an undesired necessity in HTML-based Im2Seq models. Significant novelty lies in OTSL syntactical rules, which are few, simple and always backwards looking. Each new token can be validated only by analyzing the sequence of previous tokens, without requiring the entire sequence to detect mistakes. This in return allows to perform structural error detection and correction on-the-fly during sequence generation.</paragraph>
<subtitle-level-1><location><page_12><loc_22><loc_36><loc_32><loc_38></location>References</subtitle-level-1>
<paragraph><location><page_12><loc_23><loc_29><loc_79><loc_34></location>1. Auer, C., Dolfi, M., Carvalho, A., Ramis, C.B., Staar, P.W.J.: Delivering document conversion as a cloud service with high throughput and responsiveness. CoRR abs/2206.00785 (2022). https://doi.org/10.48550/arXiv.2206.00785 , https://doi.org/10.48550/arXiv.2206.00785</paragraph>
<paragraph><location><page_12><loc_23><loc_23><loc_79><loc_29></location>2. Chen, B., Peng, D., Zhang, J., Ren, Y., Jin, L.: Complex table structure recognition in the wild using transformer and identity matrix-based augmentation. In: Porwal, U., Fornés, A., Shafait, F. (eds.) Frontiers in Handwriting Recognition. pp. 545561. Springer International Publishing, Cham (2022)</paragraph>
<paragraph><location><page_12><loc_23><loc_23><loc_79><loc_28></location>2. Chen, B., Peng, D., Zhang, J., Ren, Y., Jin, L.: Complex table structure recognition in the wild using transformer and identity matrix-based augmentation. In: Porwal, U., Fornés, A., Shafait, F. (eds.) Frontiers in Handwriting Recognition. pp. 545561. Springer International Publishing, Cham (2022)</paragraph>
<paragraph><location><page_12><loc_23><loc_20><loc_79><loc_23></location>3. Chi, Z., Huang, H., Xu, H.D., Yu, H., Yin, W., Mao, X.L.: Complicated table structure recognition. arXiv preprint arXiv:1908.04729 (2019)</paragraph>
<paragraph><location><page_12><loc_23><loc_16><loc_79><loc_20></location>4. Deng, Y., Rosenberg, D., Mann, G.: Challenges in end-to-end neural scientific table recognition. In: 2019 International Conference on Document Analysis and Recognition (ICDAR). pp. 894-901. IEEE (2019)</paragraph>
<paragraph><location><page_13><loc_23><loc_81><loc_79><loc_85></location>5. Kayal, P., Anand, M., Desai, H., Singh, M.: Tables to latex: structure and content extraction from scientific tables. International Journal on Document Analysis and Recognition (IJDAR) pp. 1-10 (2022)</paragraph>
<paragraph><location><page_13><loc_23><loc_76><loc_79><loc_81></location>6. Lee, E., Kwon, J., Yang, H., Park, J., Lee, S., Koo, H.I., Cho, N.I.: Table structure recognition based on grid shape graph. In: 2022 Asia-Pacific Signal and Information Processing Association Annual Summit and Conference (APSIPA ASC). pp. 18681873. IEEE (2022)</paragraph>
<paragraph><location><page_13><loc_23><loc_73><loc_79><loc_75></location>7. Li, M., Cui, L., Huang, S., Wei, F., Zhou, M., Li, Z.: Tablebank: A benchmark dataset for table detection and recognition (2019)</paragraph>
<paragraph><location><page_13><loc_23><loc_66><loc_79><loc_73></location>8. Livathinos, N., Berrospi, C., Lysak, M., Kuropiatnyk, V., Nassar, A., Carvalho, A., Dolfi, M., Auer, C., Dinkla, K., Staar, P.: Robust pdf document conversion using recurrent neural networks. Proceedings of the AAAI Conference on Artificial Intelligence 35 (17), 15137-15145 (May 2021), https://ojs.aaai.org/index.php/ AAAI/article/view/17777</paragraph>
<paragraph><location><page_13><loc_23><loc_66><loc_79><loc_72></location>8. Livathinos, N., Berrospi, C., Lysak, M., Kuropiatnyk, V., Nassar, A., Carvalho, A., Dolfi, M., Auer, C., Dinkla, K., Staar, P.: Robust pdf document conversion using recurrent neural networks. Proceedings of the AAAI Conference on Artificial Intelligence 35 (17), 15137-15145 (May 2021), https://ojs.aaai.org/index.php/ AAAI/article/view/17777</paragraph>
<paragraph><location><page_13><loc_23><loc_62><loc_79><loc_66></location>9. Nassar, A., Livathinos, N., Lysak, M., Staar, P.: Tableformer: Table structure understanding with transformers. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 4614-4623 (June 2022)</paragraph>
<paragraph><location><page_13><loc_22><loc_53><loc_79><loc_61></location>10. Pfitzmann, B., Auer, C., Dolfi, M., Nassar, A.S., Staar, P.W.J.: Doclaynet: A large human-annotated dataset for document-layout segmentation. In: Zhang, A., Rangwala, H. (eds.) KDD '22: The 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, Washington, DC, USA, August 14 - 18, 2022. pp. 3743-3751. ACM (2022). https://doi.org/10.1145/3534678.3539043 , https:// doi.org/10.1145/3534678.3539043</paragraph>
<paragraph><location><page_13><loc_22><loc_48><loc_79><loc_53></location>11. Prasad, D., Gadpal, A., Kapadni, K., Visave, M., Sultanpure, K.: Cascadetabnet: An approach for end to end table detection and structure recognition from imagebased documents. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition workshops. pp. 572-573 (2020)</paragraph>
@ -140,10 +148,11 @@
<paragraph><location><page_13><loc_22><loc_23><loc_79><loc_31></location>15. Staar, P.W.J., Dolfi, M., Auer, C., Bekas, C.: Corpus conversion service: A machine learning platform to ingest documents at scale. In: Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. pp. 774-782. KDD '18, Association for Computing Machinery, New York, NY, USA (2018). https://doi.org/10.1145/3219819.3219834 , https://doi.org/10. 1145/3219819.3219834</paragraph>
<paragraph><location><page_13><loc_22><loc_20><loc_79><loc_23></location>16. Wang, X.: Tabular Abstraction, Editing, and Formatting. Ph.D. thesis, CAN (1996), aAINN09397</paragraph>
<paragraph><location><page_13><loc_22><loc_16><loc_79><loc_20></location>17. Xue, W., Li, Q., Tao, D.: Res2tim: Reconstruct syntactic structures from table images. In: 2019 International Conference on Document Analysis and Recognition (ICDAR). pp. 749-755. IEEE (2019)</paragraph>
<paragraph><location><page_14><loc_22><loc_87><loc_38><loc_88></location>14 M. Lysak, et al.</paragraph>
<paragraph><location><page_14><loc_22><loc_81><loc_79><loc_85></location>18. Xue, W., Yu, B., Wang, W., Tao, D., Li, Q.: Tgrnet: A table graph reconstruction network for table structure recognition. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 1295-1304 (2021)</paragraph>
<paragraph><location><page_14><loc_22><loc_76><loc_79><loc_81></location>19. Ye, J., Qi, X., He, Y., Chen, Y., Gu, D., Gao, P., Xiao, R.: Pingan-vcgroup's solution for icdar 2021 competition on scientific literature parsing task b: Table recognition to html (2021). https://doi.org/10.48550/ARXIV.2105.01848 , https://arxiv.org/abs/2105.01848</paragraph>
<paragraph><location><page_14><loc_22><loc_73><loc_79><loc_75></location>20. Zhang, Z., Zhang, J., Du, J., Wang, F.: Split, embed and merge: An accurate table structure recognizer. Pattern Recognition 126 , 108565 (2022)</paragraph>
<paragraph><location><page_14><loc_22><loc_66><loc_79><loc_73></location>21. Zheng, X., Burdick, D., Popa, L., Zhong, X., Wang, N.X.R.: Global table extractor (gte): A framework for joint table identification and cell structure recognition using visual context. In: 2021 IEEE Winter Conference on Applications of Computer Vision (WACV). pp. 697-706 (2021). https://doi.org/10.1109/WACV48630.2021. 00074</paragraph>
<paragraph><location><page_14><loc_22><loc_66><loc_79><loc_72></location>21. Zheng, X., Burdick, D., Popa, L., Zhong, X., Wang, N.X.R.: Global table extractor (gte): A framework for joint table identification and cell structure recognition using visual context. In: 2021 IEEE Winter Conference on Applications of Computer Vision (WACV). pp. 697-706 (2021). https://doi.org/10.1109/WACV48630.2021. 00074</paragraph>
<paragraph><location><page_14><loc_22><loc_60><loc_79><loc_66></location>22. Zhong, X., ShafieiBavani, E., Jimeno Yepes, A.: Image-based table recognition: Data, model, and evaluation. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.M. (eds.) Computer Vision - ECCV 2020. pp. 564-580. Springer International Publishing, Cham (2020)</paragraph>
<paragraph><location><page_14><loc_22><loc_56><loc_79><loc_60></location>23. Zhong, X., Tang, J., Yepes, A.J.: Publaynet: largest dataset ever for document layout analysis. In: 2019 International Conference on Document Analysis and Recognition (ICDAR). pp. 1015-1022. IEEE (2019)</paragraph>
</document>

File diff suppressed because one or more lines are too long

View File

@ -14,7 +14,7 @@ Tables are ubiquitous in documents such as scientific papers, patents, reports,
In modern document understanding systems [1,15], table extraction is typically a two-step process. Firstly, every table on a page is located with a bounding box, and secondly, their logical row and column structure is recognized. As of
Fig. 1. Comparison between HTML and OTSL table structure representation: (A) table-example with complex row and column headers, including a 2D empty span, (B) minimal graphical representation of table structure using rectangular layout, (C) HTML representation, (D) OTSL representation. This example demonstrates many of the key-features of OTSL, namely its reduced vocabulary size (12 versus 5 in this case), its reduced sequence length (55 versus 30) and a enhanced internal structure (variable token sequence length per row in HTML versus a fixed length of rows in OTSL).
## 2 M. Lysak, et al.
Fig. 1. Comparison between HTML and OTSL table structure representation: (A) table-example with complex row and column headers, including a 2D empty span, (B) minimal graphical representation of table structure using rectangular layout, (C) HTML representation, (D) OTSL representation. This example demonstrates many of the key-features of OTSL, namely its reduced vocabulary size (12 versus 5 in this case), its reduced sequence length (55 versus 30) and a enhanced internal structure (variable token sequence length per row in HTML versus a fixed length of rows in OTSL).
<!-- image -->
@ -33,6 +33,8 @@ The paper is structured as follows. In section 2, we give an overview of the lat
Approaches to formalize the logical structure and layout of tables in electronic documents date back more than two decades [16]. In the recent past, a wide variety of computer vision methods have been explored to tackle the problem of table structure recognition, i.e. the correct identification of columns, rows and spanning cells in a given table. Broadly speaking, the current deeplearning based approaches fall into three categories: object detection (OD) methods, Graph-Neural-Network (GNN) methods and Image-to-Markup-Sequence (Im2Seq) methods. Object-detection based methods [11,12,13,14,21] rely on tablestructure annotation using (overlapping) bounding boxes for training, and produce bounding-box predictions to define table cells, rows, and columns on a table image. Graph Neural Network (GNN) based methods [3,6,17,18], as the name suggests, represent tables as graph structures. The graph nodes represent the content of each table cell, an embedding vector from the table image, or geometric coordinates of the table cell. The edges of the graph define the relationship between the nodes, e.g. if they belong to the same column, row, or table cell.
4 M. Lysak, et al.
Other work [20] aims at predicting a grid for each table and deciding which cells must be merged using an attention network. Im2Seq methods cast the problem as a sequence generation task [4,5,9,22], and therefore need an internal tablestructure representation language, which is often implemented with standard markup languages (e.g. HTML, LaTeX, Markdown). In theory, Im2Seq methods have a natural advantage over the OD and GNN methods by virtue of directly predicting the table-structure. As such, no post-processing or rules are needed in order to obtain the table-structure, which is necessary with OD and GNN approaches. In practice, this is not entirely true, because a predicted sequence of table-structure markup does not necessarily have to be syntactically correct. Hence, depending on the quality of the predicted sequence, some post-processing needs to be performed to ensure a syntactically valid (let alone correct) sequence.
Within the Im2Seq method, we find several popular models, namely the encoder-dual-decoder model (EDD) [22], TableFormer [9], Tabsplitter[2] and Ye et. al. [19]. EDD uses two consecutive long short-term memory (LSTM) decoders to predict a table in HTML representation. The tag decoder predicts a sequence of HTML tags. For each decoded table cell ( <td> ), the attention is passed to the cell decoder to predict the content with an embedded OCR approach. The latter makes it susceptible to transcription errors in the cell content of the table. TableFormer address this reliance on OCR and uses two transformer decoders for HTML structure and cell bounding box prediction in an end-to-end architecture. The predicted cell bounding box is then used to extract text tokens from an originating (digital) PDF page, circumventing any need for OCR. TabSplitter [2] proposes a compact double-matrix representation of table rows and columns to do error detection and error correction of HTML structure sequences based on predictions from [19]. This compact double-matrix representation can not be used directly by the Img2seq model training, so the model uses HTML as an intermediate form. Chi et. al. [4] introduce a data set and a baseline method using bidirectional LSTMs to predict LaTeX code. Kayal [5] introduces Gated ResNet transformers to predict LaTeX code, and a separate OCR module to extract content.
@ -54,6 +56,8 @@ Additionally, it would be desirable if the representation would easily allow an
In a valid HTML table, the token sequence must describe a 2D grid of table cells, serialised in row-major ordering, where each row and each column have the same length (while considering row- and column-spans). Furthermore, every opening tag in HTML needs to be matched by a closing tag in a correct hierarchical manner. Since the number of tokens for each table row and column can vary significantly, especially for large tables with many row- and column-spans, it is complex to verify the consistency of predicted structures during sequence
6 M. Lysak, et al.
generation. Implicitly, this also means that Im2Seq models need to learn these complex syntax rules, simply to deliver valid output.
In practice, we observe two major issues with prediction quality when training Im2Seq models on HTML table structure generation from images. On the one hand, we find that on large tables, the visual attention of the model often starts to drift and is not accurately moving forward cell by cell anymore. This manifests itself in either in an increasing location drift for proposed table-cells in later rows on the same column or even complete loss of vertical alignment, as illustrated in Figure 5. Addressing this with post-processing is partially possible, but clearly undesired. On the other hand, we find many instances of predictions with structural inconsistencies or plain invalid HTML output, as shown in Figure 6, which are nearly impossible to properly correct. Both problems seriously impact the TSR model performance, since they reflect not only in the task of pure structure recognition but also in the equally crucial recognition or matching of table cell content.
@ -80,6 +84,8 @@ The OTSL vocabulary is comprised of the following tokens:
A notable attribute of OTSL is that it has the capability of achieving lossless conversion to HTML.
Fig. 3. OTSL description of table structure: A - table example; B - graphical representation of table structure; C - mapping structure on a grid; D - OTSL structure encoding; E - explanation on cell encoding
Fig. 3. OTSL description of table structure: A - table example; B - graphical representation of table structure; C - mapping structure on a grid; D - OTSL structure encoding; E - explanation on cell encoding
<!-- image -->
@ -91,9 +97,9 @@ The OTSL representation follows these syntax rules:
2. Up-looking cell rule : The upper neighbour of a "U" cell must be either another "U" cell or a "C" cell.
## 3. Cross cell rule :
3. Cross cell rule :
: The left neighbour of an "X" cell must be either another "X" cell or a "U" cell, and the upper neighbour of an "X" cell must be either another "X" cell or an "L" cell.
The left neighbour of an "X" cell must be either another "X" cell or a "U" cell, and the upper neighbour of an "X" cell must be either another "X" cell or an "L" cell.
4. First row rule : Only "L" cells and "C" cells are allowed in the first row.
@ -105,6 +111,8 @@ The application of these rules gives OTSL a set of unique properties. First of a
These characteristics can be easily learned by sequence generator networks, as we demonstrate further below. We find strong indications that this pattern
8 M. Lysak, et al.
reduces significantly the column drift seen in the HTML based models (see Figure 5).
## 4.3 Error-detection and -mitigation
@ -116,9 +124,9 @@ The design of OTSL allows to validate a table structure easily on an unfinished
To evaluate the impact of OTSL on prediction accuracy and inference times, we conducted a series of experiments based on the TableFormer model (Figure 4) with two objectives: Firstly we evaluate the prediction quality and performance of OTSL vs. HTML after performing Hyper Parameter Optimization (HPO) on the canonical PubTabNet data set. Secondly we pick the best hyper-parameters found in the first step and evaluate how OTSL impacts the performance of TableFormer after training on other publicly available data sets (FinTabNet, PubTables-1M [14]). The ground truth (GT) from all data sets has been converted into OTSL format for this purpose, and will be made publicly available.
Fig. 4. Architecture sketch of the TableFormer model, which is a representative for the Im2Seq approach.
<!-- image -->
We rely on standard metrics such as Tree Edit Distance score (TEDs) for table structure prediction, and Mean Average Precision (mAP) with 0.75 Intersection Over Union (IOU) threshold for the bounding-box predictions of table cells. The predicted OTSL structures were converted back to HTML format in
<!-- image -->
order to compute the TED score. Inference timing results for all experiments were obtained from the same machine on a single core with AMD EPYC 7763 CPU @2.45 GHz.
@ -131,9 +139,9 @@ Table 1. HPO performed in OTSL and HTML representation on the same transformer-b
|------------|------------|------------|-------------|-------------|-------------|-------------|-------------|
| enc-layers | dec-layers | Language | simple | complex | all | (0.75) | time (secs) |
| 6 | 6 | OTSL HTML | 0.965 0.969 | 0.934 0.927 | 0.955 0.955 | 0.88 0.857 | 2.73 5.39 |
| 4 | 4 | OTSL HTML | 0.938 0.952 | 0.904 0.909 | 0.927 | 0.853 | 1.97 |
| 2 | 4 | OTSL HTML | 0.923 | 0.897 0.901 | 0.938 0.915 | 0.843 | 3.77 |
| | | | 0.945 | | 0.931 | 0.859 0.834 | 1.91 3.81 |
| 4 | 4 | OTSL HTML | 0.938 0.952 | 0.904 0.909 | 0.927 0.938 | 0.853 | 1.97 |
| 2 | | OTSL HTML | 0.923 | 0.897 0.901 | 0.915 | 0.843 | 3.77 |
| | 4 | | 0.945 | | 0.931 | 0.859 0.834 | 1.91 3.81 |
| 4 | 2 | OTSL HTML | 0.952 0.944 | 0.92 0.903 | 0.942 0.931 | 0.857 0.824 | 1.22 2 |
## 5.2 Quantitative Results
@ -142,6 +150,12 @@ We picked the model parameter configuration that produced the best prediction qu
Additionally, the results show that OTSL has an advantage over HTML when applied on a bigger data set like PubTables-1M and achieves significantly improved scores. Finally, OTSL achieves faster inference due to fewer decoding steps which is a result of the reduced sequence representation.
10
M.
Lysak, et al.
Table 2. TSR and cell detection results compared between OTSL and HTML on the PubTabNet [22], FinTabNet [21] and PubTables-1M [14] data sets using TableFormer [9] (with enc=6, dec=6, heads=8).
| | Language | TEDs | TEDs | TEDs | mAP(0.75) | Inference time (secs) |
|--------------|------------|--------|---------|--------|-------------|-------------------------|
@ -167,6 +181,8 @@ Fig. 5. The OTSL model produces more accurate bounding boxes with less overlap (
Fig. 6. Visualization of predicted structure and detected bounding boxes on a complex table with many rows. The OTSL model (B) captured repeating pattern of horizontally merged cells from the GT (A), unlike the HTML model (C). The HTML model also didn't complete the HTML sequence correctly and displayed a lot more of drift and overlap of bounding boxes. "PMC5406406_003_01.png" PubTabNet.
<!-- image -->
## 12 M. Lysak, et al.
## 6 Conclusion
We demonstrated that representing tables in HTML for the task of table structure recognition with Im2Seq models is ill-suited and has serious limitations. Furthermore, we presented in this paper an Optimized Table Structure Language (OTSL) which, when compared to commonly used general purpose languages, has several key benefits.
@ -211,6 +227,8 @@ Secondly, OTSL has more inherent structure and a significantly restricted vocabu
17. Xue, W., Li, Q., Tao, D.: Res2tim: Reconstruct syntactic structures from table images. In: 2019 International Conference on Document Analysis and Recognition (ICDAR). pp. 749-755. IEEE (2019)
14 M. Lysak, et al.
18. Xue, W., Yu, B., Wang, W., Tao, D., Li, Q.: Tgrnet: A table graph reconstruction network for table structure recognition. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 1295-1304 (2021)
19. Ye, J., Qi, X., He, Y., Chen, Y., Gu, D., Gao, P., Xiao, R.: Pingan-vcgroup's solution for icdar 2021 competition on scientific literature parsing task b: Table recognition to html (2021). https://doi.org/10.48550/ARXIV.2105.01848 , https://arxiv.org/abs/2105.01848

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@ -5,8 +5,15 @@ Front cover
## Row and Column Access Control Support in IBM DB2 for i
Implement roles and separation of duties
<!-- image -->
Leverage row permissions on the database
Protect columns by defining column masks
ibm.com /redbooks
Jim Bainbridge Hernando Bedoya Rob Bestgen Mike Cain Dan Cruikshank Jim Denton Doug Mack Tom McKinley Kent Milligan
<!-- image -->
@ -16,17 +23,17 @@ Front cover
International Technical Support Organization
## Row and Column Access Control Support in IBM DB2 for i
Row and Column Access Control Support in IBM DB2 for i
November 2014
Note: Before using this information and the product it supports, read the information in "Notices" on page vii.
## First Edition (November 2014)
First Edition (November 2014)
This edition applies to Version 7, Release 2 of IBM i (product number 5770-SS1).
## ' Copyright International Business Machines Corporation 2014. All rights reserved.
' Copyright International Business Machines Corporation 2014. All rights reserved.
Note to U.S. Government Users Restricted Rights -- Use, duplication or disclosure restricted by GSA ADP Schedule Contract with IBM Corp.
@ -39,7 +46,7 @@ Note to U.S. Government Users Restricted Rights -- Use, duplication or disclosur
| DB2 for i Center of Excellence | . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . ix |
| Preface | . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . xi |
| Authors . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . xi | |
| Now you can become a published author, too! | . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . xiii |
| Now you can become a published author, too! . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | xiii |
| Comments welcome. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | xiii |
| Stay connected to IBM Redbooks | . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . xiv |
| Chapter 1. Securing and protecting IBM DB2 data . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 1 |
@ -79,56 +86,56 @@ Note to U.S. Government Users Restricted Rights -- Use, duplication or disclosur
| 3.6.8 Demonstrating data access with a view and RCAC . . . . . . . . . . . . . . . . . . . . . . . | 32 |
| Chapter 4. Implementing Row and Column Access Control: Banking example . . . . . | 37 |
|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| 4.1 Business requirements for the RCAC banking scenario . . . . . . . . . . . . . . . . . . . . . . . . | 38 |
| 4.2 Description of the users roles and responsibilities . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 39 |
| 4.3 Implementation of RCAC . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 42 |
| 4.3.1 Reviewing the tables that are used in this example | . . . . . . . . . . . . . . . . . . . . . . . 42 |
| 4.3.2 Assigning function ID QIBM_DB_SECADM to the Database Engineers group | . . 47 |
| 4.3.3 Creating group profiles for the users and their roles . . . . . . . . . . . . . . . . . . . . . . . | 50 |
| | . . . . . . . . . . . . . . . . . . . . . 52 |
| 4.3.5 Defining and creating row permissions . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 54 |
| 4.3.6 Defining and creating column masks | 58 |
| . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 4.3.7 Restricting the inserting and updating of masked data . . . . . . . . . . . . . . . . . . . . . | 60 |
| | 79 |
| 4.3.8 Activating row and column access control . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 4.3.9 Reviewing row permissions. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 64 |
| 4.3.10 Demonstrating data access with RCAC . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 66 |
| 4.3.11 Query implementation with RCAC activated . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 75 |
| Chapter 5. RCAC and non-SQL interfaces . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 80 |
| 5.1 Unsupported interfaces . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 5.1 Unsupported interfaces . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . |
| 5.2 Native query result differences . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 5.3 Accidental updates with masked values . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 5.2 Native query result differences . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 5.3 Accidental updates with masked values . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . |
| | 81 |
| 5.4 System CL commands considerations . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 82 |
| 5.4.1 Create Duplicate Object (CRTDUPOBJ) command . . . . . . . . . . . . . . . . . . . . . . . | 82 |
| | 82 |
| 5.4.2 Copy File (CPYF) command . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 5.4.3 Copy Library (CPYLIB) command. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 5.4.2 Copy File (CPYF) command . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 5.4.3 Copy Library (CPYLIB) command. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . |
| | 83 |
| Chapter 6. Additional considerations . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 108 |
| 6.2 RCAC effects on data movement . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 6.2 RCAC effects on data movement . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . |
| . . . . . . . . . . . . . . . . . . . . . . . | 88 |
| 6.2.1 Effects when RCAC is defined on the source table | 6.2.1 Effects when RCAC is defined on the source table |
| 6.2.3 Effects when RCAC is defined on both source and target tables . . . . . . . . . . . . . 6.3 RCAC effects on joins . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 90 91 |
| 6.3.1 Inner joins . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 92 | 6.3.1 Inner joins . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 92 |
| 6.3.2 Outer joins. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 94 | 6.3.2 Outer joins. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 94 |
| 6.3.3 Exception joins . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 96 |
| 6.4 Monitoring, analyzing, and debugging with RCAC | 97 |
| . . . . . . . . . . . . . . . . . . . . . . . . . . . . | Query monitoring and analysis tools . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 97 |
| 6.4.2 Index advisor. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 6.4.2 Index advisor. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . |
| 6.4.3 Metadata using catalogs . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 6.4.3 Metadata using catalogs . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . |
| 6.5 Views, materialized query tables, and query rewrite with RCAC . . . . . . . . . . . . . . . . | 102 |
| Materialized query tables . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 103 |
| 6.5.2 | 105 |
| 6.5.3 Query rewrite | 6.5.3 Query rewrite |
| 6.6 RCAC effects on performance and scalability. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 105 |
| | 107 |
| 6.7 Exclusive lock to implement RCAC (availability issues) . . . . . . . . . . . . . . . . . . . . . . . 6.8 Avoiding propagation of masked data . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 6.7 Exclusive lock to implement RCAC (availability issues) . . . . . . . . . . . . . . . . . . . . . . . 6.8 Avoiding propagation of masked data . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . |
| | 108 |
| | 109 109 |
| | 110 |
| | 113 |
| | 111 |
| Chapter 7. Row and Column Access Control management . . . . . . . . . . . . . . . . . . . . | Chapter 7. Row and Column Access Control management . . . . . . . . . . . . . . . . . . . . |
| Chapter 4. Implementing Row and Column Access Control: Banking example . . . . . | 37 |
|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| 4.1 Business requirements for the RCAC banking scenario . . . . . . . . . . . . . . . . . . . . . . . . | 38 |
| 4.2 Description of the users roles and responsibilities | . . . . . . . . . . . . . . . . . . . . . . . . . . . . 39 |
| 4.3 Implementation of RCAC . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 42 |
| 4.3.1 Reviewing the tables that are used in this example | . . . . . . . . . . . . . . . . . . . . . . . 42 |
| 4.3.2 Assigning function ID QIBM_DB_SECADM to the Database Engineers group | . . 47 |
| 4.3.3 Creating group profiles for the users and their roles . . . . . . . . . . . . . . . . . . . . . . . | 50 |
| 4.3.4 Creating the CUSTOMER_LOGIN_ID global variable | . . . . . . . . . . . . . . . . . . . . . 52 |
| 4.3.5 Defining and creating row permissions . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 54 |
| 4.3.6 Defining and creating column masks | . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 58 |
| 4.3.7 Restricting the inserting and updating of masked data . . . . . . . . . . . . . . . . . . . . . | 60 |
| | 110 |
| 4.3.9 Reviewing row permissions. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 64 |
| 4.3.10 Demonstrating data access with RCAC . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 4.3.11 Query implementation with RCAC activated . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 66 |
| | 75 |
| Chapter 5. RCAC and non-SQL interfaces . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 79 |
| | Unsupported interfaces . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 80 |
| 5.2 Native query result differences . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 5.2 Native query result differences . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . |
| 5.3 Accidental updates with masked values . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 81 |
| 5.4 System CL commands considerations . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 82 |
| 5.4.1 Create Duplicate Object (CRTDUPOBJ) command . . . . . . . . . . . . . . . . . . . . . . . | 82 |
| 5.4.2 Copy File (CPYF) command . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 82 |
| 5.4.3 Copy Library (CPYLIB) command. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 5.4.3 Copy Library (CPYLIB) command. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . |
| | 83 |
| Chapter 6. Additional considerations . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 6.1 Timing of column masking . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | Chapter 6. Additional considerations . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 6.1 Timing of column masking . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . |
| 6.2 RCAC effects on data movement . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 6.2 RCAC effects on data movement . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . |
| . . . . . . . . . . . . . . . . . . . . . . . | 88 |
| 6.2.1 Effects when RCAC is defined on the source table | 6.2.1 Effects when RCAC is defined on the source table |
| 6.2.3 Effects when RCAC is defined on both source and target tables . . . . . . . . . . . . . 6.3 RCAC effects on joins . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 90 91 |
| 6.3.1 Inner joins . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 92 | 6.3.1 Inner joins . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 92 |
| 6.3.2 Outer joins. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 94 | 6.3.2 Outer joins. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 94 |
| 6.3.3 Exception joins . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 96 |
| 6.4 Monitoring, analyzing, and debugging with RCAC | 97 |
| . . . . . . . . . . . . . . . . . . . . . . . . . . . . | Query monitoring and analysis tools . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 97 |
| 6.4.2 Index advisor. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 99 |
| 6.4.3 Metadata using catalogs . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 6.4.3 Metadata using catalogs . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . |
| 6.5 Views, materialized query tables, and query rewrite with RCAC . . . . . . . . . . . . . . . . 6.5.1 Views . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 102 | 6.5 Views, materialized query tables, and query rewrite with RCAC . . . . . . . . . . . . . . . . 6.5.1 Views . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 102 |
| 6.5.2 Materialized query tables . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 103 |
| | 105 |
| 6.5.3 Query rewrite RCAC effects on performance and scalability. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 6.5.3 Query rewrite RCAC effects on performance and scalability. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . |
| 6.6 | 6.6 |
| | 105 |
| 6.7 Exclusive lock to implement RCAC (availability issues) . . . . . . . . . . . . . . . . . . . . . . . | 6.7 Exclusive lock to implement RCAC (availability issues) . . . . . . . . . . . . . . . . . . . . . . . |
| 6.8 Avoiding propagation of masked data . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 6.8 Avoiding propagation of masked data . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . |
| | 108 |
| | 108 |
| | 109 |
| | 111 |
| Chapter 7. Row and Column Access Control management . . . . . . . . . . . . . . . . . . . . 113 | Chapter 7. Row and Column Access Control management . . . . . . . . . . . . . . . . . . . . 113 |
| 7.1 Managing row permissions and column masks. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 114 |
@ -184,11 +191,11 @@ IBM, the IBM logo, and ibm.com are trademarks or registered trademarks of Intern
The following terms are trademarks of the International Business Machines Corporation in the United States, other countries, or both:
AS/400fi DB2fi DRDAfi
| AS/400fi | IBMfi | Redpaper™ |
|------------|----------------|----------------------------|
| DB2fi | Power Systems™ | Redbooks (log o) fi System |
| DRDAfi | Redbooksfi | ifi |
IBMfi Power Systems™ Redbooksfi
Redpaper™ Redbooks (log o) fi System ifi
The following terms are trademarks of other companies:
@ -200,7 +207,7 @@ DB2 for i Center of Excellence
Solution Brief IBM Systems Lab Services and Training
## Highlights
Highlights
GLYPH<g115>GLYPH<g3> GLYPH<g40>GLYPH<g81>GLYPH<g75>GLYPH<g68>GLYPH<g81>GLYPH<g70>GLYPH<g72>GLYPH<g3> GLYPH<g87>GLYPH<g75>GLYPH<g72>GLYPH<g3> GLYPH<g83>GLYPH<g72>GLYPH<g85>GLYPH<g73>GLYPH<g82>GLYPH<g85>GLYPH<g80>GLYPH<g68>GLYPH<g81>GLYPH<g70>GLYPH<g72>GLYPH<g3> GLYPH<g82>GLYPH<g73>GLYPH<g3> GLYPH<g92>GLYPH<g82>GLYPH<g88>GLYPH<g85> GLYPH<g3> GLYPH<g71>GLYPH<g68>GLYPH<g87>GLYPH<g68>GLYPH<g69>GLYPH<g68>GLYPH<g86>GLYPH<g72>GLYPH<g3> GLYPH<g82>GLYPH<g83>GLYPH<g72>GLYPH<g85>GLYPH<g68>GLYPH<g87>GLYPH<g76>GLYPH<g82>GLYPH<g81>GLYPH<g86>
@ -263,7 +270,9 @@ r Configuration of systems, operating system and products to fully leverage data
## Key client benefits
T Gain greater database and application performance within your current environment. Achieve greater productivity in the development and maintenance of database and applications using modern techniques. Architect and design data structures to accommodate and benefit from business analytics (BA) tools and processes.
Gain greater database and application performance within your current environment. Achieve greater productivity in the development and maintenance of database and applications using modern techniques. Architect and design data structures to accommodate and benefit from business analytics (BA) tools and processes.
T
## For more information
@ -276,11 +285,7 @@ ibm.com GLYPH<g18>GLYPH<g86>GLYPH<g92>GLYPH<g86>GLYPH<g87>GLYPH<g72>GLYPH<g80>GL
© Copyright IBM Corporation 2013
IBM Corporation
Route 100
Somers, NY 10589
IBM Corporation Route 100 Somers, NY 10589
Produced in the United States of America March 2013
@ -295,6 +300,8 @@ Not all offerings are available in every country in which IBM operates.
Please Recycle
QLS12392-USEN-00
## Preface
This IBMfi Redpaper™ publication provides information about the IBM i 7.2 feature of IBM DB2fi for i Row and Column Access Control (RCAC). It offers a broad description of the function and advantages of controlling access to data in a comprehensive and transparent way. This publication helps you understand the capabilities of RCAC and provides examples of defining, creating, and implementing the row permissions and column masks in a relational database environment.
@ -352,9 +359,7 @@ Kent Milligan is a senior DB2 consultant on the DB2 for i Center of Excellence t
Thanks to the following people for their contributions to this project:
Debra Landon
International Technical Support Organization, Rochester Center
Debra Landon International Technical Support Organization, Rochester Center
Craig Aldrich, Mark Anderson, Theresa Euler, Scott Forstie, Chad Olstad IBM Rochester Development
@ -362,7 +367,9 @@ Craig Aldrich, Mark Anderson, Theresa Euler, Scott Forstie, Chad Olstad IBM Roch
Here's an opportunity to spotlight your skills, grow your career, and become a published author-all at the same time! Join an ITSO residency project and help write a book in your area of expertise, while honing your experience using leading-edge technologies. Your efforts will help to increase product acceptance and customer satisfaction, as you expand your network of technical contacts and relationships. Residencies run from two to six weeks in length, and you can participate either in person or as a remote resident working from your home base.
Find out more about the residency program, browse the residency index, and apply online at: ibm.com /redbooks/residencies.html
Find out more about the residency program, browse the residency index, and apply online at:
ibm.com /redbooks/residencies.html
## Comments welcome
@ -378,7 +385,9 @@ GLYPH<SM590000> Send your comments in an email to:
redbooks@us.ibm.com
GLYPH<SM590000> Mail your comments to: IBM Corporation, International Technical Support Organization Dept. HYTD Mail Station P099 2455 South Road Poughkeepsie, NY 12601-5400
GLYPH<SM590000> Mail your comments to:
IBM Corporation, International Technical Support Organization Dept. HYTD Mail Station P099 2455 South Road Poughkeepsie, NY 12601-5400
## Stay connected to IBM Redbooks
@ -407,6 +416,8 @@ http://www.redbooks.ibm.com/rss.html
Chapter 1.
## 1
## Securing and protecting IBM DB2 data
Recent news headlines are filled with reports of data breaches and cyber-attacks impacting global businesses of all sizes. The Identity Theft Resource Center$^{1}$ reports that almost 5000 data breaches have occurred since 2005, exposing over 600 million records of data. The financial cost of these data breaches is skyrocketing. Studies from the Ponemon Institute$^{2}$ revealed that the average cost of a data breach increased in 2013 by 15% globally and resulted in a brand equity loss of $9.4 million per attack. The average cost that is incurred for each lost record containing sensitive information increased more than 9% to $145 per record.
@ -482,6 +493,8 @@ The new DB2 RCAC support provides a method for controlling data access across al
Chapter 2.
## 2
## Roles and separation of duties
One of the primary objectives of row and column access control (RCAC) is to create data security policies that control and govern user access to data and limit the data access of DB2 designers and administrators to only the minimum that is required to do their jobs.
@ -587,13 +600,8 @@ Table 2-1 FUNCTION_USAGE view
To discover who has authorization to define and manage RCAC, you can use the query that is shown in Example 2-1.
Example 2-1 Query to determine who has authority to define and manage RCAC
| SELECT | function_id, user_name, |
|----------|------------------------------|
| | usage, |
| | user_type |
| FROM | function_usage |
| WHERE | function_id='QIBM_DB_SECADM' |
| ORDER BY | user_name; |
SELECT function_id, user_name, usage, user_type FROM function_usage WHERE function_id='QIBM_DB_SECADM' ORDER BY user_name;
## 2.2 Separation of duties
@ -612,21 +620,20 @@ A preferred practice is that the RCAC administrator has the QIBM_DB_SECADM funct
Table 2-2 shows a comparison of the different function usage IDs and *JOBCTL authority to the different CL commands and DB2 for i tools.
Table 2-2 Comparison of the different function usage IDs and *JOBCTL authority
| User action | *JOBCTL | QIBM_DB_SECADM | QIBM_DB_SQLADM | QIBM_DB_SYSMON No Authority |
|--------------------------------------------------------------------------------|-----------|------------------|------------------|-------------------------------|
| SET CURRENT DEGREE (SQL statement) | X | | X | |
| CHGQRYA command targeting a different user's job | X | | X | |
| STRDBMON or ENDDBMON commands targeting a different user's job | X | | X | |
| STRDBMON or ENDDBMON commands targeting a job that matches the current user | X | | X X | X |
| QUSRJOBI() API format 900 or System i Navigator's SQL Details for Job | X | | X | X |
| Visual Explain within Run SQL scripts | X | | X | X X |
| Visual Explain outside of Run SQL scripts | X | | X | |
| ANALYZE PLAN CACHE procedure | X | | X | |
| DUMP PLAN CACHE procedure | X | | X | |
| MODIFY PLAN CACHE procedure | X | | X | |
| MODIFY PLAN CACHE PROPERTIES procedure (currently does not check authority) | | | | |
| | X | | X | |
| CHANGE PLAN CACHE SIZE procedure (currently does not check authority) | X | | X | |
| User action | *JOBCTL | QIBM_DB_SECADM | QIBM_DB_SQLADM | QIBM_DB_SYSMON | No Authority |
|--------------------------------------------------------------------------------|-----------|------------------|------------------|------------------|----------------|
| SET CURRENT DEGREE (SQL statement) | X | | X | | |
| CHGQRYA command targeting a different user's job | X | | X | | |
| STRDBMON or ENDDBMON commands targeting a different user's job | X | | X | | |
| STRDBMON or ENDDBMON commands targeting a job that matches the current user | X | | X | X | X |
| QUSRJOBI() API format 900 or System i Navigator's SQL Details for Job | X | | X | X | |
| Visual Explain within Run SQL scripts | X | | X | X | X |
| Visual Explain outside of Run SQL scripts | X | | X | | |
| ANALYZE PLAN CACHE procedure | X | | X | | |
| DUMP PLAN CACHE procedure | X | | X | | |
| MODIFY PLAN CACHE procedure | X | | X | | |
| MODIFY PLAN CACHE PROPERTIES procedure (currently does not check authority) | X | | X | | |
| CHANGE PLAN CACHE SIZE procedure (currently does not check authority) | X | | X | | |
| User action | *JOBCTL | QIBM_DB_SECADM | QIBM_DB_SQLADM | QIBM_DB_SYSMON | No Authority |
@ -658,7 +665,7 @@ Table 2-2 Comparison of the different function usage IDs and *JOBCTL authority
Chapter 3.
3
## 3
## Row and Column Access Control
@ -821,7 +828,11 @@ Here is an example of using the VERIFY_GROUP_FOR_USER function:
3. If a user is connected to the server using user profile JANE, all of the following function invocations return a value of 1:
VERIFY_GROUP_FOR_USER (CURRENT_USER, 'MGR') VERIFY_GROUP_FOR_USER (CURRENT_USER, 'JANE', 'MGR') VERIFY_GROUP_FOR_USER (CURRENT_USER, 'JANE', 'MGR', 'STEVE') The following function invocation returns a value of 0: VERIFY_GROUP_FOR_USER (CURRENT_USER, 'JUDY', 'TONY')
VERIFY_GROUP_FOR_USER (CURRENT_USER, 'MGR') VERIFY_GROUP_FOR_USER (CURRENT_USER, 'JANE', 'MGR') VERIFY_GROUP_FOR_USER (CURRENT_USER, 'JANE', 'MGR', 'STEVE')
The following function invocation returns a value of 0:
VERIFY_GROUP_FOR_USER (CURRENT_USER, 'JUDY', 'TONY')
## 3.4 Establishing and controlling accessibility by using the RCAC rule text
@ -846,9 +857,9 @@ One of the first tasks in either the row permission or the column mask logic is
More sophisticated methods can employ existential, day of year / time of day, and relational comparisons with set operations. For example, you can use a date master or date dimension table to determine whether the current date is a normal business day. If the current date is a valid business day, then access is allowed. If the current date is not a business day (for example a weekend day or holiday), access is denied. This test can be accomplished by performing a lookup using a subquery, such as the one that is shown in Example 3-1.
Example 3-1 Subquery that is used as part of the rule
| CURRENT_DATE IN (SELECT D.DATE_KEY | DATE_MASTER D D.BUSINESS_DAY = 'Y') |
|--------------------------------------|---------------------------------------|
| FROM WHERE | |
| CURRENT_DATE IN (SELECT D.DATE_KEY | DATE_MASTER D |
|--------------------------------------|-----------------------|
| FROM WHERE | D.BUSINESS_DAY = 'Y') |
Given that joins and subqueries can be used to perform set-based operations against existing data that is housed in other objects, almost any relational test can be constructed. If the data in the objects is manipulated over time, the RCAC test logic (and user query results) can be changed without modifying the actual row permission or column mask. This includes moving a user from one group to another or changing a column value that is used to allow or disallow access. For example, if Saturday is now a valid business day, only the BUSINESS_DAY value in the DATE_MASTER must be updated, not the permission logic. This technique can potentially avoid downtime because of the exclusive lock that is needed on the table when adding or changing RCAC definitions.
@ -901,6 +912,7 @@ SELECT function_id, user_name, usage, user_type FROM qsys2.function_usage WHERE
3. The result of the SQL statement is shown in Figure 3-6. In this example, either MCAIN or HBEDOYA can implement RCAC in the Human Resources database.
Figure 3-6 Result of the function ID query
<!-- image -->
## 3.6.2 Creating group profiles for the users and their roles
@ -916,7 +928,7 @@ Assuming that all the employees have a valid user profile, the next step is to c
These are created by creating user profiles with no password. Example 3-4 shows the Create User Profile ( CRTUSRPRF ) CL commands that you use to create these group profiles.
## Example 3-4 Creating group profiles
Example 3-4 Creating group profiles
CRTUSRPRF USRPRF(EMP) PASSWORD() TEXT('Employees Group') CRTUSRPRF USRPRF(MGR) PASSWORD() TEXT('Managers Group') CRTUSRPRF USRPRF(HR) PASSWORD() TEXT('Human Resources Group')
@ -948,6 +960,7 @@ SELECT EMPLOYEE_ID, LAST_NAME, JOB_DESCRIPTION, DATE_OF_BIRTH, TAX_ID, USER_ID,
The result of this query is shown in Figure 3-8.
Figure 3-8 List of employees without RCAC enabled
<!-- image -->
## 3.6.4 Defining and creating row permissions
@ -989,10 +1002,8 @@ Define the different masks for the columns that are sensitive by completing the
To implement this column mask, run the SQL statement that is shown in Example 3-8.
Example 3-8 Creation of a mask on the DATE_OF_BIRTH column
| CREATE MASK | HR_SCHEMA.MASK_DATE_OF_BIRTH_ON_EMPLOYEES |
|---------------|---------------------------------------------|
| ON | HR_SCHEMA.EMPLOYEES AS EMPLOYEES |
| FOR COLUMN | DATE_OF_BIRTH |
CREATE MASK HR_SCHEMA.MASK_DATE_OF_BIRTH_ON_EMPLOYEES ON HR_SCHEMA.EMPLOYEES AS EMPLOYEES FOR COLUMN DATE_OF_BIRTH
RETURN CASE WHEN VERIFY_GROUP_FOR_USER ( SESSION_USER , 'HR', 'EMP' ) = 1 THEN EMPLOYEES . DATE_OF_BIRTH WHEN VERIFY_GROUP_FOR_USER ( SESSION_USER , 'MGR' ) = 1 AND SESSION_USER = EMPLOYEES . USER_ID THEN EMPLOYEES . DATE_OF_BIRTH WHEN VERIFY_GROUP_FOR_USER ( SESSION_USER , 'MGR' ) = 1 AND SESSION_USER <> EMPLOYEES . USER_ID THEN ( 9999 || '-' || MONTH ( EMPLOYEES . DATE_OF_BIRTH ) || '-' || DAY (EMPLOYEES.DATE_OF_BIRTH )) ELSE NULL END ENABLE ;
@ -1008,7 +1019,7 @@ RETURN CASE WHEN VERIFY_GROUP_FOR_USER ( SESSION_USER , 'HR', 'EMP' ) = 1 THEN E
To implement this column mask, run the SQL statement that is shown in Example 3-9.
## Example 3-9 Creating a mask on the TAX_ID column
Example 3-9 Creating a mask on the TAX_ID column
CREATE MASK HR_SCHEMA.MASK_TAX_ID_ON_EMPLOYEES ON HR_SCHEMA.EMPLOYEES AS EMPLOYEES FOR COLUMN TAX_ID RETURN CASE WHEN VERIFY_GROUP_FOR_USER ( SESSION_USER , 'HR' ) = 1 THEN EMPLOYEES . TAX_ID WHEN VERIFY_GROUP_FOR_USER ( SESSION_USER , 'MGR' ) = 1 AND SESSION_USER = EMPLOYEES . USER_ID THEN EMPLOYEES . TAX_ID WHEN VERIFY_GROUP_FOR_USER ( SESSION_USER , 'MGR' ) = 1 AND SESSION_USER <> EMPLOYEES . USER_ID THEN ( 'XXX-XX-' CONCAT QSYS2 . SUBSTR ( EMPLOYEES . TAX_ID , 8 , 4 ) ) WHEN VERIFY_GROUP_FOR_USER ( SESSION_USER , 'EMP' ) = 1 THEN EMPLOYEES . TAX_ID ELSE 'XXX-XX-XXXX' END ENABLE ;
@ -1049,8 +1060,6 @@ SELECT COUNT(*) as ROW_COUNT FROM HR_SCHEMA.EMPLOYEES;
2. The result of the query for a user that belongs to the HR group profile is shown in Figure 3-13. This user can see all the 42 rows (employees).
Figure 3-13 Count of EMPLOYEES by HR
Figure 3-13 Count of EMPLOYEES by HR
<!-- image -->
@ -1061,8 +1070,6 @@ Figure 3-14 Count of EMPLOYEES by a manager
4. The result of the same query that is run by an employee (DSSMITH) gives the result that is shown in Figure 3-15. Each employee can see only his or her own data (row).
Figure 3-15 Count of EMPLOYEES by an employee
Figure 3-15 Count of EMPLOYEES by an employee
<!-- image -->
@ -1082,18 +1089,22 @@ SELECT EMPLOYEE_ID, LAST_NAME, JOB_DESCRIPTION, DATE_OF_BIRTH, TAX_ID, USER_ID,
7. Figure 3-17 shows the results of the query for a Human Resources (VGLUCCHESS) user profile. The user can see all the rows and all the columns.
Figure 3-17 SQL statement result by Human Resources user profile
<!-- image -->
8. Figure 3-18 shows the results of the same query for the Manager (TQSPENSER). Notice the masking of the DATE_OF_BIRTH and TAX_ID columns.
Figure 3-18 SQL statement result by Manager profile
<!-- image -->
9. Figure 3-19 shows the results of the same query for an employee (DSSMITH). The employee can only see only his own data with no masking at all.
Figure 3-19 SQL statement result by an employee profile
<!-- image -->
10.Figure 3-20 shows the results of the same query for the Consultant/DBE, who is not one of the company's employees.
Figure 3-20 SQL statement result by Consultant/DBE profile
<!-- image -->
## 3.6.8 Demonstrating data access with a view and RCAC
@ -1102,16 +1113,16 @@ This section covers data access with a view and RCAC. Complete the following ste
1. The EMPLOYEES table has a column that is called On_Leave_Flag (Figure 3-21 on page 33) indicating that the employee is on Leave of Absence. For this purpose, a view is created that lists only the employees that are on leave.
Figure 3-21 Employees on leave
<!-- image -->
## 2. Example 3-13 shows the definition of the view.
2. Example 3-13 shows the definition of the view.
## Example 3-13 VIew of employees on leave
Example 3-13 VIew of employees on leave
CREATE VIEW HR_SCHEMA.EMPLOYEES_ON_LEAVE (EMPLOYEE_ID, FIRST_NAME, MIDDLE_INITIAL, LAST_NAME, WORK_DEPARTMENT, PHONE_EXTENSION, JOB_DESCRIPTION, DATE_OF_BIRTH,
TAX_ID, USER_ID, MANAGER_OF_EMPLOYEE, ON_LEAVE_FLAG )
AS SELECT EMPLOYEE_ID, FIRST_NAME , MIDDLE_INITIAL, LAST_NAME , WORK_DEPARTMENT, PHONE_EXTENSION, JOB_DESCRIPTION, DATE_OF_BIRTH, TAX_ID, USER_ID, MANAGER_OF_EMPLOYEE, ON_LEAVE_FLAG FROM HR_SCHEMA.EMPLOYEES WHERE ON_LEAVE_FLAG = 'Y';
<!-- image -->
3. Use the view to query the data and see who is on leave. The SQL statement that is used is shown in Example 3-14:
@ -1122,23 +1133,24 @@ SELECT EMPLOYEE_ID, LAST_NAME, JOB_DESCRIPTION, DATE_OF_BIRTH, TAX_ID, USER_ID,
4. Start with the Human Resources person (VGLUCCHESS) and see what is the result of the previous query. He sees the two employees that are on leave and no masking is done over the DATE_OF_BIRTH and TAX_ID columns. The results of the query are shown in Figure 3-22.
Figure 3-22 Employees on leave - Human Resources user
<!-- image -->
5. Figure 3-23 shows what the Manager (TQSPENSER) gets when he runs the same query over the view. He sees only the employees that are on leave that are managed by him. In this example, it is one employee. The columns are masked, which confirms that RCAC is applied to the view as well.
Figure 3-23 Employee on leave - Manager of Field Reps user
<!-- image -->
6. Figure 3-24 shows what the employee (DSSMITH) gets when he runs the same query over the view. The employee gets an empty set or he gets only himself if he is on leave.
.
Figure 3-24 Employees on leave - employee user
<!-- image -->
<!-- image -->
Chapter 4.
4
## 4
## Implementing Row and Column Access Control: Banking example
@ -1202,26 +1214,12 @@ GLYPH<SM590000> PUBLIC: Anyone not already in a group
Based on their respective roles and responsibilities, the users (that is, a group) are controlled by row permissions and column masks. The chart that is shown in Figure 4-2 shows the rules for row and column access in this example.
Figure 4-2 Rules for row and column access
| | CUSTOMERS | CUSTOMERS | ACCOUNTS | ACCOUNTS | TRANSACTIONS | TRANSACTIONS |
|----------|-------------|-------------|------------|------------|----------------|----------------|
| SECURITY | No Rows | Yes | No Rows | Yes | No Rows | No |
| DBE | All Rows | Yes | All Rows | Yes | All Rows | No |
| ADMIN | All Rows | No | All Rows | No | All Rows | No |
| TELLER | All Rows | Yes | All Rows | No | All Rows | No |
| CUSTOMER | Own Rows | No | Own Rows | No | Own Rows | No |
| PUBLIC | No Rows | Yes | No Rows | Yes | No Rows | No |
<!-- image -->
The chart that is shown in Figure 4-3 shows the column access that is allowed by group and lists the column masks by table.
Figure 4-3 Column masks
| | | CUSTOMERS | ACCOUNTS |
|----------|----------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------|
| SECURITY | No Rows | CUSTOMER_DRIVERS_LICENSE_NUMBER CUSTOMER_EMAIL CUSTOMER_LOGIN_ID CUSTOMER_SECURITY_QUESTION CUSTOMER_SECURITY_QUESTION_ANSWER CUSTOMER_TAX_ID | ACCOUNT_NUMBER |
| DBE | All Rows | CUSTOMER_DRIVERS_LICENSE_NUMBER CUSTOMER_EMAIL CUSTOMER_LOGIN_ID CUSTOMER_SECURITY_QUESTION CUSTOMER_SECURITY_QUESTION_ANSWER CUSTOMER_TAX_ID | ACCOUNT NUMBER ACCOUNT_NUMBER |
| ADMIN | All Rows | None | None |
| TELLER | All Rows | CUSTOMER_EMAIL CUSTOMER_LOGIN_ID CUSTOMER_SECURITY_QUESTION CUSTOMER_SECURITY_QUESTION_ANSWER CUSTOMER TAX ID _ _ | None |
| CUSTOMER | Own Rows | None | None |
| PUBLIC | No Rows | CUSTOMER_DRIVERS_LICENSE_NUMBER CUSTOMER_EMAIL CUSTOMER LOGIN ID CUSTOMER_LOGIN_ID CUSTOMER_SECURITY_QUESTION CUSTOMER_SECURITY_QUESTION_ANSWER CUSTOMER_TAX_ID | ACCOUNT_NUMBER |
<!-- image -->
For the demonstration and testing of RCAC in this example, the following users interact with the database. Furthermore, the column masking rules are developed independently of the row permissions. If a person does not have permission to access the row, the column mask processing does not occur.
@ -1275,6 +1273,7 @@ GLYPH<SM590000> Query implementation with RCAC activated
This section reviews the tables that are used in this example. As shown in Figure 4-5, there are three main tables that are involved in the data model: CUSTOMERS, ACCOUNTS, and TRANSACTIONS. There are 90 customers.
Figure 4-5 Tables that are used in the banking example
<!-- image -->
Note: Appendix A, "Database definitions for the RCAC banking example" on page 121 provides a script that you can use to create all the database definitions or DDLs to re-create this RCAC example.
@ -1290,6 +1289,7 @@ Figure 4-6 CUSTOMERS table attributes
3. Click the Columns tab to see the columns of the CUSTOMERS table, as shown in Figure 4-7.
Figure 4-7 Column definitions of the CUSTOMERS table
<!-- image -->
4. Click the Key Constraints , Foreign Key Constraints , and Check Constraints tabs to review the key, foreign, and check constraints on the CUSTOMERS table, as shown in Figure 4-8. There are no Foreign Key Constraints or Check Constraints on the CUSTOMERS table.
@ -1304,10 +1304,12 @@ Figure 4-9 ACCOUNTS table attributes
6. Click the Columns tab to see the columns of the ACCOUNTS table, as shown in Figure 4-10.
Figure 4-10 Column definitions of the ACCOUNTS table
<!-- image -->
7. Click the Key Constraints , Foreign Key Constraints , and Check Constraints tabs to review the key, foreign, and check constraints on the ACCOUNTS table, as shown in Figure 4-11. There is one Foreign Key Constraint and no Check Constraints on the ACCOUNTS table.
Figure 4-11 Reviewing the constraints on the ACCOUNTS table
<!-- image -->
8. Review the definition of the TRANSACTIONS table. The definition of the TRANSACTIONS table is shown in Figure 4-12. RCAC is not defined for this table yet.
@ -1317,10 +1319,12 @@ Figure 4-12 TRANSACTIONS table attributes
9. Click the Columns tab to see the columns of the TRANSACTIONS table, as shown in Figure 4-13.
Figure 4-13 Column definitions of the TRANSACTIONS table
<!-- image -->
10.Click the Key Constraints , Foreign Key Constraints , and Check Constraints tabs to review the key, foreign, and check constraints on the TRANSACTIONS table, as shown in Figure 4-14. There is one Foreign Key Constraint and one Check Constraint on the TRANSACTIONS table.
Figure 4-14 Reviewing the constraints on the TRANSACTIONS table
<!-- image -->
Now that you have reviewed the database model for this example, the following sections describe the steps that are required to implement RCAC in this banking scenario.
@ -1358,6 +1362,7 @@ Figure 4-19 Function usage ID Database Security Administrator customized
6. Run an SQL query that shows which user profiles are enabled to define RCAC. The SQL query is shown in Figure 4-20.
Figure 4-20 Query to display user profiles with function usage ID for RCAC
<!-- image -->
## 4.3.3 Creating group profiles for the users and their roles
@ -1451,7 +1456,9 @@ Figure 4-30 New row permissions on the ACCOUNTS table
Note: You must join back to ACCOUNTS and then to CUSTOMERS by using a subquery to check whether the global variable matches CUSTOMER_LOGIN_ID. Also, if the row permission or column mask rule text references another table with RCAC defined, the RCAC for the referenced table is ignored.
-Any other user profile cannot see any rows at all. Select the Enabled option. Click OK .
-Any other user profile cannot see any rows at all.
Select the Enabled option. Click OK .
Figure 4-31 New row permissions on the TRANSACTIONS table
<!-- image -->
@ -1506,7 +1513,7 @@ Figure 4-35 List of column masks on BANK_SCHEMA
This step defines the check constraints that support the column masks to make sure that on INSERTS or UPDATES, data is not written with a masked value. For more information about the propagation of masked data, see 6.8, "Avoiding propagation of masked data" on page 108.
## Complete the following steps:
Complete the following steps:
1. Create a check constraint on the column CUSTOMER_EMAIL in the CUSTOMERS table. From the navigation pane of System i Navigator, right-click the CUSTOMERS table and select Definition , as shown Figure 4-36
@ -1604,14 +1611,17 @@ To test a DBE (MCAIN) user, complete the following steps:
1. Confirm that the user is the user of the session by running the first SQL statement, as shown in Figure 4-47. In this example, MCAIN is the DBE user.
Figure 4-47 DBE session user
<!-- image -->
2. The number of rows that the DBE user MCAIN can see is shown in Figure 4-48.
Figure 4-48 Number of rows that DBE user can see in the CUSTOMERS table
<!-- image -->
3. The result of the third SQL statement is shown in Figure 4-49. Note the masked columns. User MCAIN can see all the rows in the CUSTOMERS table, but there are some columns where the result is masked.
Figure 4-49 SQL statement that is run by the DBE user with masked columns
<!-- image -->
## Data access for SECURITY user with RCAC
@ -1625,6 +1635,7 @@ Figure 4-50 SECURITY session user
2. The number of rows in the CUSTOMERS table that the security officer can see is shown in Figure 4-51. The security officer cannot see any data at all.
Figure 4-51 Number of rows that the security officer can see in the CUSTOMERS table
<!-- image -->
3. The result of the third SQL statement is shown in Figure 4-52. Note the empty set that is returned to the security officer.
@ -1638,6 +1649,7 @@ To test a Teller (TQSPENCER) user, complete the following steps:
1. Confirm that the TELLER user is the user of the session by running the first SQL statement, as shown in Figure 4-53. In this example, TQSPENCER is a TELLER user.
Figure 4-53 TELLER session user
<!-- image -->
2. The number of rows in the CUSTOMERS table that the TELLER user can see is shown in Figure 4-54. The TELLER user can see all the rows.
@ -1647,6 +1659,7 @@ Figure 4-54 Number of rows that the TELLER user can see in the CUSTOMERS table
3. The result of the third SQL statement is shown in Figure 4-55. Note the masked columns. The TELLER user, TQSPENSER, can see all the rows, but there are some columns where the result is masked.
Figure 4-55 SQL statement that is run by the TELLER user with masked columns
<!-- image -->
## Data access for ADMIN user with RCAC
@ -1660,10 +1673,12 @@ Figure 4-56 ADMIN session user
2. The number of rows that the ADMIN user can see is shown in Figure 4-57. The ADMIN user can see all the rows.
Figure 4-57 Number of rows that the ADMIN can see in the CUSTOMERS table
<!-- image -->
3. The result of the third SQL statement is shown in Figure 4-58. There are no masked columns.
Figure 4-58 SQL statement that is run by the ADMIN user - no masked columns
<!-- image -->
## Data access for WEBUSER user with RCAC
@ -1692,6 +1707,7 @@ Figure 4-62 Number of rows that the WEBUSER can see in the CUSTOMERS table
5. The result of the third SQL statement is shown in Figure 4-63. There are no masked columns, and the user can see only one row, which is the user's own row.
Figure 4-63 SQL statement that is run by WEBUSER - no masked columns
<!-- image -->
## Other examples of data access with RCAC
@ -1700,20 +1716,23 @@ To run an SQL statement that lists all the accounts and current balance by custo
1. Run the SQL statement that is shown in Figure 4-64 using the WEBUSER user profile. The SQL statement has no WHERE clause, but the WEBUSER can see only his accounts.
Figure 4-64 List of accounts and current balance by customer using the WEBUSER user profile
<!-- image -->
2. Figure 4-65 shows running a more complex SQL statement that calculates transaction total by account for year and quarter. Run this statement using the WEBUSER profile. The SQL statement has no WHERE clause, but the WEBUSER user can see only his transactions.
Figure 4-65 Calculate transaction total by account for year and quarter using the WEBUSER profile
<!-- image -->
3. Run the same SQL statement that lists the accounts and current balance by customer, but use a TELLER user profile. The result of this SQL statement is shown in Figure 4-66. The TELLER user can see all the rows in the CUSTOMERS table.
Figure 4-66 List of accounts and current balance by customer using a TELLER user profile
<!-- image -->
## 4.3.11 Query implementation with RCAC activated
This section looks at some other interesting information that is related to RCAC by comparing the access plans of the same SQL statement without RCAC and with RCAC. This example uses Visual Explain and runs an SQL statement that lists the accounts and current balance by customer.
## Complete the following steps:
Complete the following steps:
1. Figure 4-67 shows the SQL statement in Visual Explain ran with no RCAC. The implementation of the SQL statement is a two-way join, which is exactly what the SQL statement is doing.
@ -1740,7 +1759,7 @@ Figure 4-70 Index advice with RCAC enabled
Chapter 5.
5
## 5
## RCAC and non-SQL interfaces
@ -1900,7 +1919,7 @@ GLYPH<SM590000> Triggers and functions (SECURED)
GLYPH<SM590000> RCAC is only one part of the solution
6
## 6
## 6.1 Timing of column masking
@ -1908,47 +1927,8 @@ An important design and implementation consideration is the fact that RCAC colum
An example of this situation is shown in Figure 6-1. However, note that aggregate functions (a form of grouping) are based on masked values.
SELECT
FROM GROUP BY ORDER BY
## Without RCAC Masking
## With RCAC Masking
| CREDIT CARD NUMBER _ _ | TOTAL |
|--------------------------|---------------|
| 3785 0000 0000 1234 | 233.50 |
| 3785 1111 1111 1234 | 105.10 |
| 3785 2222 2222 1234 | 300 00 300.00 |
| 3785 3333 3333 1234 | 1,775.00 |
| 5466 4444 4444 1234 | 601.70 |
| 5466 5555 5555 1234 | 37.80 |
| 5466 6666 6666 1234 | 490.45 |
| 6011 7777 7777 1234 | 1005.00 |
| 6011 8888 8888 1234 | 750.33 |
| 6011 9999 9999 0001 | 10.00 |
Figure 6-1 Timing of column masking
| CREDIT CARD NUMBER _ _ | TOTAL |
|---------------------------|---------------|
| **** **** **** 1234 | 233.50 |
| **** **** **** 1234 | 105.10 |
| **** **** **** 1234 | 300 00 300.00 |
| **** **** **** 1234 | 1,775.00 |
| **** **** **** 1234 | 601.70 |
| **** **** **** 1234 | 37.80 |
| **** **** **** 1234 | 490.45 |
| **** **** **** 1234 1234 | 1005.00 |
| **** **** **** 1234 | 750.33 |
| **** **** **** 0001 | 10.00 |
CREDIT_CARD_NUMBER, SUM(AMOUNT) AS TOTAL TRANSACTIONS
CREDIT_CARD_NUMBER
CREDIT_CARD_NUMBER;
<!-- image -->
Conversely, field procedure masking causes the column values to be changed (that is, masked) and stored in the row. When the table is queried and the masked columns are referenced, the masked data is used for any local selection, joining, grouping, or ordering operations. This situation can have a profound effect on the query's final result set and not just on the column values that are returned. Field procedure masking occurs when the column values are read from disk before any query processing. RCAC masking occurs when the column values are returned to the application after query processing. This difference in behavior is shown in Figure 6-2.
@ -2146,9 +2126,10 @@ http://www.ibm.com/partnerworld/wps/servlet/ContentHandler/stg_ast_sys_wp_db2_i_
To make the discovery and identification of RCAC row permissions and column masks programmatically, query the QSYS2.SYSCONTROLS catalog view or the QSYS2.SYSCONTROLSDEP catalog view directly. Otherwise, the System i Navigator Database graphical interface can be used interactively.
## Figure 6-20 shows the QSYS2.SYSCONTROLS catalog view.
Figure 6-20 shows the QSYS2.SYSCONTROLS catalog view.
Figure 6-20 RCAC and catalogs
<!-- image -->
The SYSCONTROLS catalog view contains the following columns:
@ -2248,11 +2229,7 @@ The following example illustrates this scenario:
1. Create schema and tables:
CREATE SCHEMA Schema1;
CREATE TABLE Schema1.employee(userID varchar(128), LocationID integer, Regionid integer);
CREATE TABLE Schema1.Sales (INVOICE INTEGER NOT NULL, SALEAMT DECIMAL(5,2), TAXAMT DECIMAL(5,2), LOCATIONID INTEGER, REGIONID INTEGER);
CREATE SCHEMA Schema1; CREATE TABLE Schema1.employee(userID varchar(128), LocationID integer, Regionid integer); CREATE TABLE Schema1.Sales (INVOICE INTEGER NOT NULL, SALEAMT DECIMAL(5,2), TAXAMT DECIMAL(5,2), LOCATIONID INTEGER, REGIONID INTEGER);
2. Create a row permission that allows the employees to see only rows from the region they work in:
@ -2260,9 +2237,7 @@ CREATE TABLE Schema1.Sales (INVOICE INTEGER NOT NULL, SALEAMT DECIMAL(5,2), TAXA
3. Create an MQT to summarize sales by location:
-- Create MQT to summarize sales by location -- This has all of the data. The schema1.sales_perm1 predicate was not applied CREATE TABLE Schema1.Location_Sales_MQT as AS (SELECT LocationID, SUM(Saleamt) as Total_Location_Sales FROM SCHEMA1.SALES GROUP BY LOCATIONID) DATA INITIALLY DEFERRED REFRESH DEFERRED
MAINTAINED BY USER;
-- Create MQT to summarize sales by location -- This has all of the data. The schema1.sales_perm1 predicate was not applied CREATE TABLE Schema1.Location_Sales_MQT as AS (SELECT LocationID, SUM(Saleamt) as Total_Location_Sales FROM SCHEMA1.SALES GROUP BY LOCATIONID) DATA INITIALLY DEFERRED REFRESH DEFERRED MAINTAINED BY USER;
4. Populate the MQT (permission is not applied):
@ -2366,7 +2341,7 @@ As part of RCAC, new SQL syntax is provided to allow an action to be performed w
In the Example 6-4, the mask is defined to return a value of 'XXX-XX-nnnn' for any query that is not done by a user profile in the DBMGR group. The constraint checks that the column SSN does not have the masked value.
## Example 6-4 Check constraint to avoid masked data
Example 6-4 Check constraint to avoid masked data
CREATE SCHEMA MY_LIB SET SCHEMA MY_LIB CREATE TABLE MY_LIB.EMP_INFO (COL1_name CHAR(10) WITH DEFAULT 'DEFAULT', COL2_ssn CHAR(11) WITH DEFAULT 'DEFAULT') CREATE MASK MASK_ssn ON MY_LIB.EMP_INFO FOR COLUMN COL2_ssn RETURN CASE WHEN VERIFY_GROUP_FOR_USER ( SESSION_USER , 'DBMGR' ) = 1 THEN COL2_ssn
@ -2376,7 +2351,7 @@ ELSE 'XXX-XX-'||SUBSTR(COL2_ssn,8,4) END ENABLE | /* Check constraint for the up
The actions that are described in Example 6-4 on page 108 for ON UPDATE VIOLATION and ON INSERT VIOLATION also can be handled by a before trigger, as shown in Example 6-5.
## Example 6-5 Before trigger to avoid masked data
Example 6-5 Before trigger to avoid masked data
CREATE TRIGGER PREVENT_MASK_SSN BEFORE INSERT OR UPDATE ON MY_LIB.EMP_INFO REFERENCING NEW ROW AS N OLD ROW AS O FOR EACH ROW MODE DB2ROW SECURED WHEN(SUBSTR(N.COL2_ssn,1,7) = 'XXX-XX-') BEGIN IF INSERTING THEN SET N.COL2_ssn = DEFAULT; ELSEIF UPDATING THEN SET N.COL2_ssn = O.COL2_ssn; END IF; END
@ -2390,7 +2365,7 @@ Triggers have access to the data in rows outside of the row permission or column
Any triggers that are defined on a table must be created with an attribute that designates that it is SECURED when RCAC definitions are created or altered for that table, as shown in Example 6-6. The same applies to a view that has an instead of trigger. That trigger must be secure at the point RCAC is enabled for any of the underlying tables the view is over.
## Example 6-6 Trigger SECURED
Example 6-6 Trigger SECURED
/* Trigger created with the SECURED attribute */ CREATE TRIGGER PREVENT_MASK_SSN BEFORE INSERT OR UPDATE ON MY_LIB.EMP_INFO REFERENCING NEW ROW AS N OLD ROW AS O FOR EACH ROW MODE DB2ROW SECURED WHEN(SUBSTR(N.COL2_ssn,1,7) = 'XXX-XX-') BEGIN IF INSERTING THEN SET N.COL2_ssn = DEFAULT; ELSEIF UPDATING THEN SET N.COL2_ssn = O.COL2_ssn; END IF; END
@ -2398,7 +2373,7 @@ Any triggers that are defined on a table must be created with an attribute that
Within a CREATE PERMISSION or CREATE MASK , a function can be called. Because that UDF has access to the data before the RCAC rules are applied, the SECURE attribute is required on that function, as shown in Example 6-7.
## Example 6-7 Specifying SECURED on a function
Example 6-7 Specifying SECURED on a function
CREATE PERMISSION SCHEMA.PERM1 ON SCHEMA.TABLE1 FOR ROWS WHERE MY_UDF(CURRENT_USER,COLUMN1) = 1 ENFORCED FOR ALL ACCESS ENABLE; CREATE FUNCTION MY_UDF (INP1 CHAR(32), INP2 INTEGER) Returns INTEGER LANGUAGE SQL CONTAINS SQL SECURED
@ -2406,11 +2381,17 @@ The SECURED attribute of MY_UDF signifies that the function is considered secure
Consider the following examples:
GLYPH<SM590000> Table1 has RCAC defined and enabled. SELECT MY_UDF2(Column2) from schema.table1. MY_UDF2 must be created with the SECURED attribute. If MY_UDF2 invokes MY_UDF3, there is no checking to ensure that it is also created with SECURED. NOT SECURED is the default on the create function unless SECURED is explicitly selected.
GLYPH<SM590000> Table1 has RCAC defined and enabled. SELECT MY_UDF2(Column2) from schema.table1.
MY_UDF2 must be created with the SECURED attribute. If MY_UDF2 invokes MY_UDF3, there is no checking to ensure that it is also created with SECURED.
NOT SECURED is the default on the create function unless SECURED is explicitly selected.
This same rule applies for any function that might be invoked with a masked column specified as an argument.
GLYPH<SM590000> Table2 column SSN has a column mask that is defined on it. SELECT MY_UDF4(SSN) from table2. Because SSN has a column mask that is defined, MY_UDF4 must be created with the SECURED attribute.
GLYPH<SM590000> Table2 column SSN has a column mask that is defined on it.
SELECT MY_UDF4(SSN) from table2. Because SSN has a column mask that is defined, MY_UDF4 must be created with the SECURED attribute.
## 6.10 RCAC is only one part of the solution
@ -2434,7 +2415,7 @@ The ability to monitor, analyze, debug, and tune data-centric applications effec
Chapter 7.
7
## 7
## Row and Column Access Control management
@ -2534,7 +2515,7 @@ GLYPH<SM590000> Implementing RCAC with good design and proper planning
GLYPH<SM590000> DB2 for i Center of Excellence
8
## 8
## 8.1 Implementing RCAC with good design and proper planning
@ -2573,18 +2554,33 @@ If you are interested in engaging with the DB2 for i Center of Excellence, conta
Appendix A.
<!-- image -->
## Database definitions for the RCAC banking example
This appendix provides the database definitions or DDLs to re-create the Row and Column Access Control (RCAC) scenario that is described in Chapter 4, "Implementing Row and Column Access Control: Banking example" on page 37. The script that is shown in Example A-1 is the DDL script that is used to implement this example.
## Example A-1 DDL script to implement the RCAC banking example
Example A-1 DDL script to implement the RCAC banking example
/* Database Definitions for RCAC Bank Scenario */ /* Schema */ CREATE SCHEMA BANK_SCHEMA FOR SCHEMA BANKSCHEMA ; /* Global Variable */ CREATE VARIABLE BANK_SCHEMA.CUSTOMER_LOGIN_ID VARCHAR( 30) ; LABEL ON VARIABLE BANK_SCHEMA.CUSTOMER_LOGIN_ID IS 'Customer''s log in value passed by web application' ; /* Tables */ CREATE TABLE BANK_SCHEMA.CUSTOMERS ( CUSTOMER_ID FOR COLUMN CUSTO00001 INTEGER GENERATED ALWAYS AS IDENTITY ( START WITH 1 INCREMENT BY 1 NO MINVALUE NO MAXVALUE NO CYCLE NO ORDER CACHE 20 ), CUSTOMER_NAME FOR COLUMN CUSTO00002 VARCHAR(30) CCSID 37 NOT NULL , CUSTOMER_ADDRESS FOR COLUMN CUSTO00003 VARCHAR(30) CCSID 37 NOT NULL , CUSTOMER_CITY FOR COLUMN CUSTO00004 VARCHAR(30) CCSID 37 NOT NULL , CUSTOMER_STATE FOR COLUMN CUSTO00005 CHAR(2) CCSID 37 NOT NULL , CUSTOMER_PHONE FOR COLUMN CUSTO00006 CHAR(10) CCSID 37 NOT NULL , CUSTOMER_EMAIL FOR COLUMN CUSTO00007 VARCHAR(30) CCSID 37 NOT NULL , CUSTOMER_TAX_ID FOR COLUMN CUSTO00008 CHAR(11) CCSID 37 NOT NULL , CUSTOMER_DRIVERS_LICENSE_NUMBER FOR COLUMN CUSTO00012 CHAR(13) CCSID 37 DEFAULT NULL , CUSTOMER_LOGIN_ID FOR COLUMN CUSTO00009 VARCHAR(30) CCSID 37 DEFAULT NULL , CUSTOMER_SECURITY_QUESTION FOR COLUMN CUSTO00010 VARCHAR(100) CCSID 37 DEFAULT NULL ,
CUSTOMER_SECURITY_QUESTION_ANSWER FOR COLUMN CUSTO00011 VARCHAR(100) CCSID 37 DEFAULT NULL , INSERT_TIMESTAMP FOR COLUMN INSER00001 TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP IMPLICITLY HIDDEN , UPDATE_TIMESTAMP FOR COLUMN UPDAT00001 TIMESTAMP GENERATED ALWAYS FOR EACH ROW ON UPDATE AS ROW CHANGE TIMESTAMP NOT NULL IMPLICITLY HIDDEN , CONSTRAINT BANK_SCHEMA.CUSTOMER_ID_PK PRIMARY KEY( CUSTOMER_ID ) ) ; ALTER TABLE BANK_SCHEMA.CUSTOMERS ADD CONSTRAINT BANK_SCHEMA.CUSTOMER_LOGIN_ID_UK UNIQUE( CUSTOMER_LOGIN_ID ) ; ALTER TABLE BANK_SCHEMA.CUSTOMERS ADD CONSTRAINT BANK_SCHEMA.CUSTOMER_DRIVERS_LICENSE_CHECK CHECK( CUSTOMER_DRIVERS_LICENSE_NUMBER <> '*************' ) ON UPDATE VIOLATION PRESERVE CUSTOMER_DRIVERS_LICENSE_NUMBER ; ALTER TABLE BANK_SCHEMA.CUSTOMERS ADD CONSTRAINT BANK_SCHEMA.CUSTOMER_EMAIL_CHECK CHECK( CUSTOMER_EMAIL <> '****@****' ) ON UPDATE VIOLATION PRESERVE CUSTOMER_EMAIL ; ALTER TABLE BANK_SCHEMA.CUSTOMERS ADD CONSTRAINT BANK_SCHEMA.CUSTOMER_LOGIN_ID_CHECK CHECK( CUSTOMER_LOGIN_ID <> '*****' ) ON INSERT VIOLATION SET CUSTOMER_LOGIN_ID = DEFAULT ON UPDATE VIOLATION PRESERVE CUSTOMER_LOGIN_ID ; ALTER TABLE BANK_SCHEMA.CUSTOMERS ADD CONSTRAINT BANK_SCHEMA.CUSTOMER_SECURITY_QUESTION_CHECK CHECK( CUSTOMER_SECURITY_QUESTION_ANSWER <> '*****' ) ON INSERT VIOLATION SET CUSTOMER_SECURITY_QUESTION_ANSWER = DEFAULT ON UPDATE VIOLATION PRESERVE CUSTOMER_SECURITY_QUESTION_ANSWER ; ALTER TABLE BANK_SCHEMA.CUSTOMERS ADD CONSTRAINT BANK_SCHEMA.CUSTOMER_SECURITY_QUESTION_ANSWER CHECK( CUSTOMER_SECURITY_QUESTION <> '*****' ) ON INSERT VIOLATION SET CUSTOMER_SECURITY_QUESTION = DEFAULT ON UPDATE VIOLATION PRESERVE CUSTOMER_SECURITY_QUESTION ; ALTER TABLE BANK_SCHEMA.CUSTOMERS ADD CONSTRAINT BANK_SCHEMA.CUSTOMER_TAX_ID_CHECK CHECK( CUSTOMER_TAX_ID <> 'XXX-XX-XXXX' AND SUBSTR ( CUSTOMER_TAX_ID , 1 , 7 ) <> 'XXX-XX-' ) ON UPDATE VIOLATION PRESERVE CUSTOMER_TAX_ID ; CREATE TABLE BANK_SCHEMA.ACCOUNTS ( ACCOUNT_ID INTEGER GENERATED ALWAYS AS IDENTITY ( START WITH 1 INCREMENT BY 1 NO MINVALUE NO MAXVALUE NO CYCLE NO ORDER CACHE 20 ), CUSTOMER_ID FOR COLUMN CUSTID INTEGER NOT NULL , ACCOUNT_NUMBER FOR COLUMN ACCOUNTNO VARCHAR(50) CCSID 37 NOT NULL , ACCOUNT_NAME FOR COLUMN ACCOUNTNAM CHAR(12) CCSID 37 NOT NULL , ACCOUNT_DATE_OPENED FOR COLUMN OPENDATE DATE DEFAULT CURRENT_DATE , ACCOUNT_DATE_CLOSED FOR COLUMN CLOSEDATE DATE DEFAULT NULL , ACCOUNT_CURRENT_BALANCE FOR COLUMN ACCTBAL DECIMAL(11, 2) NOT NULL DEFAULT 0 , INSERT_TIMESTAMP FOR COLUMN INSDATE TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP IMPLICITLY HIDDEN , UPDATE_TIMESTAMP FOR COLUMN UPDDATE TIMESTAMP GENERATED ALWAYS FOR EACH ROW ON UPDATE AS ROW CHANGE TIMESTAMP NOT NULL IMPLICITLY HIDDEN , CONSTRAINT BANK_SCHEMA.ACCOUNT_ID_PK PRIMARY KEY( ACCOUNT_ID ) );
## A
CUSTOMER_SECURITY_QUESTION_ANSWER FOR COLUMN CUSTO00011 VARCHAR(100) CCSID 37 DEFAULT NULL , INSERT_TIMESTAMP FOR COLUMN INSER00001 TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP IMPLICITLY HIDDEN , UPDATE_TIMESTAMP FOR COLUMN UPDAT00001 TIMESTAMP GENERATED ALWAYS FOR EACH ROW ON UPDATE AS ROW CHANGE TIMESTAMP NOT NULL IMPLICITLY HIDDEN , CONSTRAINT BANK_SCHEMA.CUSTOMER_ID_PK PRIMARY KEY( CUSTOMER_ID ) ) ;
ALTER TABLE BANK_SCHEMA.CUSTOMERS ADD CONSTRAINT BANK_SCHEMA.CUSTOMER_LOGIN_ID_UK UNIQUE( CUSTOMER_LOGIN_ID ) ;
ALTER TABLE BANK_SCHEMA.CUSTOMERS ADD CONSTRAINT BANK_SCHEMA.CUSTOMER_DRIVERS_LICENSE_CHECK CHECK( CUSTOMER_DRIVERS_LICENSE_NUMBER <> '*************' ) ON UPDATE VIOLATION PRESERVE CUSTOMER_DRIVERS_LICENSE_NUMBER ;
ALTER TABLE BANK_SCHEMA.CUSTOMERS ADD CONSTRAINT BANK_SCHEMA.CUSTOMER_EMAIL_CHECK CHECK( CUSTOMER_EMAIL <> '****@****' ) ON UPDATE VIOLATION PRESERVE CUSTOMER_EMAIL ;
ALTER TABLE BANK_SCHEMA.CUSTOMERS ADD CONSTRAINT BANK_SCHEMA.CUSTOMER_LOGIN_ID_CHECK CHECK( CUSTOMER_LOGIN_ID <> '*****' ) ON INSERT VIOLATION SET CUSTOMER_LOGIN_ID = DEFAULT ON UPDATE VIOLATION PRESERVE CUSTOMER_LOGIN_ID ;
ALTER TABLE BANK_SCHEMA.CUSTOMERS ADD CONSTRAINT BANK_SCHEMA.CUSTOMER_SECURITY_QUESTION_CHECK CHECK( CUSTOMER_SECURITY_QUESTION_ANSWER <> '*****' ) ON INSERT VIOLATION SET CUSTOMER_SECURITY_QUESTION_ANSWER = DEFAULT ON UPDATE VIOLATION PRESERVE CUSTOMER_SECURITY_QUESTION_ANSWER ;
ALTER TABLE BANK_SCHEMA.CUSTOMERS ADD CONSTRAINT BANK_SCHEMA.CUSTOMER_SECURITY_QUESTION_ANSWER CHECK( CUSTOMER_SECURITY_QUESTION <> '*****' ) ON INSERT VIOLATION SET CUSTOMER_SECURITY_QUESTION = DEFAULT ON UPDATE VIOLATION PRESERVE CUSTOMER_SECURITY_QUESTION ;
ALTER TABLE BANK_SCHEMA.CUSTOMERS ADD CONSTRAINT BANK_SCHEMA.CUSTOMER_TAX_ID_CHECK CHECK( CUSTOMER_TAX_ID <> 'XXX-XX-XXXX' AND SUBSTR ( CUSTOMER_TAX_ID , 1 , 7 ) <> 'XXX-XX-' ) ON UPDATE VIOLATION PRESERVE CUSTOMER_TAX_ID ;
CREATE TABLE BANK_SCHEMA.ACCOUNTS ( ACCOUNT_ID INTEGER GENERATED ALWAYS AS IDENTITY ( START WITH 1 INCREMENT BY 1 NO MINVALUE NO MAXVALUE NO CYCLE NO ORDER CACHE 20 ), CUSTOMER_ID FOR COLUMN CUSTID INTEGER NOT NULL , ACCOUNT_NUMBER FOR COLUMN ACCOUNTNO VARCHAR(50) CCSID 37 NOT NULL , ACCOUNT_NAME FOR COLUMN ACCOUNTNAM CHAR(12) CCSID 37 NOT NULL , ACCOUNT_DATE_OPENED FOR COLUMN OPENDATE DATE DEFAULT CURRENT_DATE , ACCOUNT_DATE_CLOSED FOR COLUMN CLOSEDATE DATE DEFAULT NULL , ACCOUNT_CURRENT_BALANCE FOR COLUMN ACCTBAL DECIMAL(11, 2) NOT NULL DEFAULT 0 , INSERT_TIMESTAMP FOR COLUMN INSDATE TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP IMPLICITLY HIDDEN , UPDATE_TIMESTAMP FOR COLUMN UPDDATE TIMESTAMP GENERATED ALWAYS FOR EACH ROW ON UPDATE AS ROW CHANGE TIMESTAMP NOT NULL IMPLICITLY HIDDEN , CONSTRAINT BANK_SCHEMA.ACCOUNT_ID_PK PRIMARY KEY( ACCOUNT_ID ) );
ALTER TABLE BANK_SCHEMA.ACCOUNTS ADD CONSTRAINT BANK_SCHEMA.ACCOUNT_CUSTOMER_ID_FK FOREIGN KEY( CUSTOMER_ID ) REFERENCES BANK_SCHEMA.CUSTOMERS ( CUSTO00001 ) ON DELETE RESTRICT ON UPDATE RESTRICT ; ALTER TABLE BANK_SCHEMA.ACCOUNTS ADD CONSTRAINT BANK_SCHEMA.ACCOUNT_NUMBER_CHECK CHECK( ACCOUNT_NUMBER <> '*****' ) ON UPDATE VIOLATION PRESERVE ACCOUNT_NUMBER ; CREATE TABLE BANK_SCHEMA.TRANSACTIONS FOR SYSTEM NAME TRANS ( TRANSACTION_ID FOR COLUMN TRANS00001 INTEGER GENERATED ALWAYS AS IDENTITY ( START WITH 1 INCREMENT BY 1 NO MINVALUE NO MAXVALUE NO CYCLE NO ORDER CACHE 20 ), ACCOUNT_ID INTEGER NOT NULL , TRANSACTION_TYPE FOR COLUMN TRANS00002 CHAR(1) CCSID 37 NOT NULL , TRANSACTION_DATE FOR COLUMN TRANS00003 DATE NOT NULL DEFAULT CURRENT_DATE , TRANSACTION_TIME FOR COLUMN TRANS00004 TIME NOT NULL DEFAULT CURRENT_TIME , TRANSACTION_AMOUNT FOR COLUMN TRANS00005 DECIMAL(11, 2) NOT NULL , INSERT_TIMESTAMP FOR COLUMN INSER00001 TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP IMPLICITLY HIDDEN , UPDATE_TIMESTAMP FOR COLUMN UPDAT00001 TIMESTAMP GENERATED ALWAYS FOR EACH ROW ON UPDATE AS ROW CHANGE TIMESTAMP NOT NULL IMPLICITLY HIDDEN , CONSTRAINT BANK_SCHEMA.TRANSACTION_ID_PK PRIMARY KEY( TRANSACTION_ID ) ) ; ALTER TABLE BANK_SCHEMA.TRANSACTIONS ADD CONSTRAINT BANK_SCHEMA.TRANSACTIONS_ACCOUNT_ID_FK FOREIGN KEY( ACCOUNT_ID ) REFERENCES BANK_SCHEMA.ACCOUNTS ( ACCOUNT_ID ) ON DELETE RESTRICT ON UPDATE RESTRICT ; /* Permissions and Masks */ CREATE PERMISSION BANK_SCHEMA.PERMISSION1_ON_CUSTOMERS ON BANK_SCHEMA.CUSTOMERS AS C FOR ROWS WHERE ( QSYS2 . VERIFY_GROUP_FOR_USER ( SESSION_USER , 'DBE' , 'ADMIN' , 'TELLER' ) = 1 ) OR ( QSYS2 . VERIFY_GROUP_FOR_USER ( SESSION_USER , 'CUSTOMER' ) = 1 AND ( C . CUSTOMER_LOGIN_ID = BANK_SCHEMA . CUSTOMER_LOGIN_ID ) ) ENFORCED FOR ALL ACCESS ENABLE ; CREATE MASK BANK_SCHEMA.MASK_EMAIL_ON_CUSTOMERS ON BANK_SCHEMA.CUSTOMERS AS C FOR COLUMN CUSTOMER_EMAIL RETURN CASE WHEN QSYS2 . VERIFY_GROUP_FOR_USER ( SESSION_USER , 'ADMIN' ) = 1 THEN C . CUSTOMER_EMAIL WHEN QSYS2 . VERIFY_GROUP_FOR_USER ( SESSION_USER , 'CUSTOMER' ) = 1 THEN C . CUSTOMER_EMAIL ELSE '****@****' END ENABLE ; CREATE MASK BANK_SCHEMA.MASK_TAX_ID_ON_CUSTOMERS ON BANK_SCHEMA.CUSTOMERS AS C FOR COLUMN CUSTOMER_TAX_ID RETURN CASE WHEN QSYS2 . VERIFY_GROUP_FOR_USER ( SESSION_USER , 'ADMIN' ) = 1
@ -2624,7 +2620,9 @@ http://www-01.ibm.com/support/knowledgecenter/ssw_ibm_i_72/rzarl/rzarlkickoff.h
These websites are relevant as further information sources:
GLYPH<SM590000> Database programming topic of the IBM i 7.2 IBM Knowledge Center: http://www-01.ibm.com/support/knowledgecenter/ssw_ibm_i_72/rzahg/rzahgdbp.htm?l ang=en
GLYPH<SM590000> Database programming topic of the IBM i 7.2 IBM Knowledge Center:
http://www-01.ibm.com/support/knowledgecenter/ssw_ibm_i_72/rzahg/rzahgdbp.htm?l ang=en
GLYPH<SM590000> Identity Theft Resource Center
@ -2636,7 +2634,13 @@ http://www.ponemon.org/
## Help from IBM
IBM Support and downloads ibm.com /support IBM Global Services ibm.com /services
IBM Support and downloads
ibm.com /support
IBM Global Services
ibm.com /services
Back cover
@ -2660,7 +2664,7 @@ REDP-5110-00
<!-- image -->
INTERNATIONAL TECHNICAL SUPPORT ORGANIZATION
## INTERNATIONAL TECHNICAL SUPPORT ORGANIZATION
## BUILDING TECHNICAL INFORMATION BASED ON PRACTICAL EXPERIENCE

File diff suppressed because one or more lines are too long

View File

@ -4,26 +4,44 @@
<location><page_1><loc_67><loc_90><loc_93><loc_96></location>
</figure>
<subtitle-level-1><location><page_1><loc_7><loc_75><loc_88><loc_86></location>IBM Cloud Pak for Data on IBM Z</subtitle-level-1>
<paragraph><location><page_1><loc_7><loc_60><loc_20><loc_62></location>Jasmeet Bhatia</paragraph>
<paragraph><location><page_1><loc_7><loc_57><loc_20><loc_59></location>Ravi Gummadi</paragraph>
<paragraph><location><page_1><loc_7><loc_51><loc_21><loc_52></location>Srirama Sharma</paragraph>
<figure>
<location><page_1><loc_0><loc_11><loc_100><loc_64></location>
<location><page_1><loc_6><loc_22><loc_20><loc_26></location>
</figure>
<figure>
<location><page_3><loc_5><loc_70><loc_39><loc_91></location>
<location><page_1><loc_7><loc_18><loc_23><loc_21></location>
</figure>
<subtitle-level-1><location><page_3><loc_11><loc_65><loc_48><loc_68></location>Executive overview</subtitle-level-1>
<figure>
<location><page_1><loc_6><loc_13><loc_17><loc_16></location>
</figure>
<figure>
<location><page_1><loc_7><loc_3><loc_21><loc_8></location>
</figure>
<figure>
<location><page_1><loc_24><loc_13><loc_99><loc_62></location>
</figure>
<figure>
<location><page_1><loc_76><loc_3><loc_93><loc_7></location>
</figure>
<figure>
<location><page_3><loc_5><loc_70><loc_40><loc_91></location>
</figure>
<subtitle-level-1><location><page_3><loc_11><loc_65><loc_48><loc_67></location>Executive overview</subtitle-level-1>
<paragraph><location><page_3><loc_22><loc_50><loc_89><loc_60></location>Most industries are susceptible to fraud, which poses a risk to both businesses and consumers. According to The National Health Care Anti-Fraud Association, health care fraud alone causes the nation around $68 billion annually.$^{1}$ This statistic does not include the numerous other industries where fraudulent activities occur daily. In addition, the growing amount of data that enterprises own makes it difficult for them to detect fraud. Businesses can benefit by using an analytical platform to fully integrate their data with artificial intelligence (AI) technology.</paragraph>
<paragraph><location><page_3><loc_22><loc_41><loc_89><loc_48></location>With IBM Cloud Pakfi for Data on IBM Z, enterprises can modernize their data infrastructure, develop, and deploy machine learning (ML) and AI models, and instantiate highly efficient analytics deployment on IBM LinuxONE. Enterprises can create cutting-edge, intelligent, and interactive applications with embedded AI, colocate data with commercial applications, and use AI to make inferences.</paragraph>
<paragraph><location><page_3><loc_22><loc_32><loc_89><loc_39></location>This IBM Redguide publication presents a high-level overview of IBM Z. It describes IBM Cloud Pak for Data (CP4D) on IBM Z and IBM LinuxONE, the different features that are supported on the platform, and how the associated features can help enterprise customers in building AI and ML models by using core transactional data, which results in decreased latency and increased throughput.</paragraph>
<paragraph><location><page_3><loc_22><loc_22><loc_89><loc_31></location>This publication highlights real-time CP4D on IBM Z use cases. Real-time Clearing and Settlement Transactions, Trustworthy AI and its Role in Day-To-Day Monitoring, and the Prevention of Retail Crimes are use cases that are described in this publication. Using CP4D on IBM Z and LinuxONE, this publication shows how businesses can implement a highly efficient analytics deployment that minimizes latency, cost inefficiencies, and potential security exposures that are connected with data transportation.</paragraph>
<subtitle-level-1><location><page_4><loc_11><loc_89><loc_35><loc_91></location>IBM Z: An overview</subtitle-level-1>
<paragraph><location><page_3><loc_22><loc_22><loc_89><loc_30></location>This publication highlights real-time CP4D on IBM Z use cases. Real-time Clearing and Settlement Transactions, Trustworthy AI and its Role in Day-To-Day Monitoring, and the Prevention of Retail Crimes are use cases that are described in this publication. Using CP4D on IBM Z and LinuxONE, this publication shows how businesses can implement a highly efficient analytics deployment that minimizes latency, cost inefficiencies, and potential security exposures that are connected with data transportation.</paragraph>
<subtitle-level-1><location><page_4><loc_10><loc_89><loc_35><loc_91></location>IBM Z: An overview</subtitle-level-1>
<paragraph><location><page_4><loc_22><loc_80><loc_88><loc_87></location>Ever wonder how many transactions a bank processes per day? What about the pace at which these transactions happen? According to an IBMfi report, 44 of 50 of the world's top banks use IBM Z mainframes for these daily transactions.$^{2}$ IBM Z is a platform that is designed for voluminous data, maximum security, real-time transaction analysis, and cost efficiency.</paragraph>
<paragraph><location><page_4><loc_22><loc_75><loc_84><loc_78></location>The most recent platform for IBM Z is IBM z16™. The IBM z16 supports the following features:</paragraph>
<paragraph><location><page_4><loc_22><loc_73><loc_42><loc_75></location>GLYPH<SM590000> On-chip AI acceleration</paragraph>
<paragraph><location><page_4><loc_22><loc_73><loc_42><loc_74></location>GLYPH<SM590000> On-chip AI acceleration</paragraph>
<paragraph><location><page_4><loc_22><loc_71><loc_47><loc_72></location>GLYPH<SM590000> Quantum-safe crypto discovery</paragraph>
<paragraph><location><page_4><loc_22><loc_69><loc_40><loc_70></location>GLYPH<SM590000> Simplified compliance</paragraph>
<paragraph><location><page_4><loc_22><loc_67><loc_37><loc_68></location>GLYPH<SM590000> Flexible capacity</paragraph>
<paragraph><location><page_4><loc_22><loc_65><loc_46><loc_66></location>GLYPH<SM590000> Modernization of applications</paragraph>
<paragraph><location><page_4><loc_22><loc_62><loc_34><loc_64></location>GLYPH<SM590000> Sustainability</paragraph>
<paragraph><location><page_4><loc_22><loc_63><loc_34><loc_64></location>GLYPH<SM590000> Sustainability</paragraph>
<paragraph><location><page_4><loc_22><loc_58><loc_85><loc_61></location>With these features, enterprises can upgrade applications while preserving secure and resilient data.</paragraph>
<paragraph><location><page_4><loc_22><loc_55><loc_71><loc_57></location>To learn more about these features, see the IBM z16 product page.</paragraph>
<paragraph><location><page_4><loc_22><loc_53><loc_68><loc_54></location>Figure 1 on page 3 shows a picture of the IBM z16 mainframe.</paragraph>
@ -32,19 +50,19 @@
<location><page_5><loc_22><loc_44><loc_71><loc_90></location>
<caption>Figure 1 IBM z16</caption>
</figure>
<subtitle-level-1><location><page_5><loc_11><loc_38><loc_58><loc_40></location>IBM z16 and IBM LinuxONE Emperor 4 features</subtitle-level-1>
<subtitle-level-1><location><page_5><loc_10><loc_38><loc_58><loc_40></location>IBM z16 and IBM LinuxONE Emperor 4 features</subtitle-level-1>
<paragraph><location><page_5><loc_22><loc_29><loc_89><loc_36></location>IBM Z are based on enterprise mainframe technology. Starting with transaction-based workloads and databases, IBM Z has undergone tremendous transformations in its system design for many generations to build servers that cater to Linux-based workloads and security with a cyberresilient system, and support quantum computing and modernization by using a hybrid cloud with a focus on data and AI.</paragraph>
<paragraph><location><page_6><loc_22><loc_88><loc_89><loc_91></location>Figure 2 provides a snapshot of the IBM Z processor roadmap, which depicts the journey of transformation and improvement.</paragraph>
<caption><location><page_6><loc_11><loc_51><loc_35><loc_52></location>Figure 2 IBM Z: Processor roadmap</caption>
<caption><location><page_6><loc_10><loc_51><loc_35><loc_52></location>Figure 2 IBM Z: Processor roadmap</caption>
<figure>
<location><page_6><loc_10><loc_52><loc_90><loc_87></location>
<location><page_6><loc_10><loc_53><loc_90><loc_87></location>
<caption>Figure 2 IBM Z: Processor roadmap</caption>
</figure>
<paragraph><location><page_6><loc_22><loc_38><loc_89><loc_49></location>The IBM z16 and IBM LinuxONE Emperor 4 are the latest of the IBM Z, and they are developed with a 'built to build' focus to provide a powerful, cyberresilient, open, and secure platform for business with an extra focus on sustainability to help build sustainable data centers. Although the z16 server can host both IBM z/OSfi and Linux workloads, LinuxONE Emperor 4 is built to host Linux only workloads with a focus on consolidation and resiliency. Depending on the workload, consolidation from numerous x86 servers into a LinuxONE Emperor 4 can help reduce energy consumption by 75% and data center floor space by 50%, which helps to achieve the sustainability goals of the organization.</paragraph>
<paragraph><location><page_6><loc_22><loc_29><loc_89><loc_36></location>Figure 3 on page 5 shows a summary of the system design of IBM LinuxONE Emperor 4 with the IBM Telum™ processor. The IBM Telum processor chip is designed to run enterprise applications efficiently where their data resides to embed AI with super low latency. The support for higher bandwidth and I/O rates is supported through FCP Express cards with an endpoint security solution. The memory subsystem supports up to 40 TB of memory.</paragraph>
<caption><location><page_7><loc_11><loc_54><loc_49><loc_56></location>Figure 3 System design of IBM z16 LinuxONE Emperor 4</caption>
<figure>
<location><page_7><loc_71><loc_56><loc_89><loc_83></location>
<location><page_7><loc_10><loc_56><loc_90><loc_90></location>
<caption>Figure 3 System design of IBM z16 LinuxONE Emperor 4</caption>
</figure>
<paragraph><location><page_7><loc_22><loc_45><loc_89><loc_53></location>The IBM z16 and IBM LinuxONE Emperor 4 servers are built with 7-nm technology at a 5.2 GHz speed. They consist of four dual-chip modules (DCMs) per central processor complex (CPC) drawer, each of which is built with two 8-core Telum processor chips that has "first in the industry" on-chip acceleration for mid-transaction, real-time AI inferencing, which supports many different use cases, including fraud detection.</paragraph>
@ -52,30 +70,30 @@
<paragraph><location><page_8><loc_22><loc_88><loc_88><loc_91></location>Figure 4 provides more information about the features of AI Accelerator integration with the IBM Z processor cores.</paragraph>
<caption><location><page_8><loc_10><loc_53><loc_63><loc_54></location>Figure 4 IBM z16 on-chip AI Accelerator integration with IBM Z processor cores</caption>
<figure>
<location><page_8><loc_11><loc_54><loc_90><loc_86></location>
<location><page_8><loc_10><loc_54><loc_90><loc_87></location>
<caption>Figure 4 IBM z16 on-chip AI Accelerator integration with IBM Z processor cores</caption>
</figure>
<paragraph><location><page_8><loc_22><loc_41><loc_89><loc_51></location>The IBM z16 and IBM LinuxONE Emperor 4 server platforms are built with the hardware features that are shown in Figure 4 with addressing data and AI workloads in mind. Regardless of where the ML and deep learning (DL) frameworks are used to build and train data and AI models, the inferencing on existing enterprise application data can happen along currently running enterprise business applications. CP4D 4.6 supports Tensorflow and IBM Snap ML frameworks, which are optimized to use the on-chip AI Accelerator during inferencing. Support for various other frameworks is planned for future releases.</paragraph>
<paragraph><location><page_8><loc_22><loc_37><loc_89><loc_39></location>Figure 5 on page 7 shows the seamless integration of AI into existing enterprises workloads on the IBM z16 while leveraging the underlying hardware capabilities.</paragraph>
<caption><location><page_9><loc_11><loc_61><loc_31><loc_62></location>Figure 5 Seamless integration</caption>
<caption><location><page_9><loc_10><loc_61><loc_31><loc_62></location>Figure 5 Seamless integration</caption>
<figure>
<location><page_9><loc_10><loc_62><loc_90><loc_90></location>
<location><page_9><loc_10><loc_62><loc_89><loc_90></location>
<caption>Figure 5 Seamless integration</caption>
</figure>
<subtitle-level-1><location><page_9><loc_11><loc_55><loc_56><loc_57></location>What is Cloud Pak for Data on IBM Z</subtitle-level-1>
<subtitle-level-1><location><page_9><loc_10><loc_55><loc_56><loc_57></location>What is Cloud Pak for Data on IBM Z</subtitle-level-1>
<paragraph><location><page_9><loc_22><loc_47><loc_89><loc_53></location>IBM Cloud Pak for Data allows enterprises to simplify, unify, and automate the delivery of data and AI. It categorizes the activities within the journey to AI as four rungs of the AI Ladder: Collect, Organize, Analyze, and Infuse. For more information about each of the AI Ladder rungs, see Become Data Driven with IBM Z Infused Data Fabric , REDP-5680.</paragraph>
<paragraph><location><page_9><loc_22><loc_31><loc_89><loc_46></location>CP4D on IBM Z provides enterprises with a resilient and secure private cloud platform. You can use it to create ML and AI models that may be included into modern intelligent applications. You also can use it to use and construct applications for mission-critical data. With CP4D on IBM Z, enterprises can lower data movement latency, cost inefficiencies, and potential security exposures. Enterprises can safely store and access their most important company data, and leverage their current infrastructure by using cutting-edge hybrid cloud applications. Enterprises can combine their current database applications without any rewrites, which results in reduced cost and complexity. Lastly, by using CP4D on IBM Z, enterprises can update their database infrastructure to benefit from easier management, a quicker time to value, and lower operating expenses.</paragraph>
<paragraph><location><page_10><loc_22><loc_79><loc_89><loc_91></location>Figure 6 shows a solution overview of CP4D. The infrastructure alternatives are shown at the bottom, and they include IBM Z and LinuxONE. They all leverage Red Hat OpenShift. Common Foundational Services come next, which offer clarity throughout the data and AI lifecycle, that is, from user access management to monitoring and service provisioning. A high-level view of the services is shown in the middle section. The services have several different capabilities that span the AI hierarchy. The platform can be expanded, and it offers a seamless user experience for all distinct personas across the AI lifecycle, from data gathering through AI infusion.</paragraph>
<caption><location><page_10><loc_11><loc_38><loc_43><loc_39></location>Figure 6 Solution overview of Cloud Pak for Data</caption>
<figure>
<location><page_10><loc_11><loc_39><loc_90><loc_77></location>
<location><page_10><loc_10><loc_39><loc_90><loc_77></location>
<caption>Figure 6 Solution overview of Cloud Pak for Data</caption>
</figure>
<paragraph><location><page_10><loc_22><loc_35><loc_85><loc_36></location>We highlight the four main pillars that make IBM Z the correct infrastructure for CP4D:</paragraph>
<paragraph><location><page_10><loc_22><loc_33><loc_42><loc_34></location>GLYPH<SM590000> Performance and Scale</paragraph>
<paragraph><location><page_10><loc_22><loc_31><loc_42><loc_32></location>GLYPH<SM590000> Embedded Accelerators</paragraph>
<paragraph><location><page_10><loc_22><loc_28><loc_43><loc_30></location>GLYPH<SM590000> Reliability and Availability</paragraph>
<paragraph><location><page_10><loc_22><loc_26><loc_44><loc_28></location>GLYPH<SM590000> Security and Governance.</paragraph>
<paragraph><location><page_10><loc_22><loc_26><loc_44><loc_27></location>GLYPH<SM590000> Security and Governance.</paragraph>
<paragraph><location><page_10><loc_22><loc_13><loc_89><loc_25></location>From a performance perspective, CP4D on IBM Z provides your data and AI with high transaction processing and a powerful infrastructure. From the embedded accelerators perspective, CP4D on IBM Z can investigate each transaction thanks to a cutting-edge DL inference technology even in the most demanding, sensitive, and latency-prone real-time workloads. From a reliability perspective, CP4D on IBM Z provides high availability and resiliency. Lastly from the security perspective, CP4D on IBM Z is suitable for protecting sensitive data and AI models for enterprises in highly regulated industries or those industries that are worried about security.</paragraph>
<subtitle-level-1><location><page_11><loc_11><loc_89><loc_85><loc_91></location>Cloud Pak for Data capabilities on IBM Z and IBM LinuxONE</subtitle-level-1>
<paragraph><location><page_11><loc_22><loc_81><loc_89><loc_87></location>With CP4D on IBM Z and IBM LinuxONE, users can develop, train, and deploy AI and ML models. Users can accomplish this task by using the CP4D IBM Watsonfi Studio and IBM Watson Machine Learning (WLM) services. By using these two fundamental services, users can accomplish the following tasks:</paragraph>
@ -86,7 +104,7 @@
<paragraph><location><page_11><loc_22><loc_70><loc_89><loc_72></location>GLYPH<SM590000> Create, build, evaluate, and deploy analytics and ML models with trust and transparency.</paragraph>
<paragraph><location><page_11><loc_22><loc_68><loc_82><loc_70></location>GLYPH<SM590000> Leverage the AI Integrated Accelerator for TensorFlow 2.7.2 and Snap ML 1.9.</paragraph>
<paragraph><location><page_11><loc_22><loc_64><loc_88><loc_67></location>For more information about the specifics of these capabilities, see Capabilities on Linux on IBM Z and IBM LinuxONE.</paragraph>
<subtitle-level-1><location><page_11><loc_11><loc_59><loc_41><loc_61></location>Open-source ecosystem</subtitle-level-1>
<subtitle-level-1><location><page_11><loc_11><loc_59><loc_41><loc_60></location>Open-source ecosystem</subtitle-level-1>
<paragraph><location><page_11><loc_22><loc_48><loc_89><loc_56></location>These days, innovation and product development are not limited to closed doors within an organization. In any industry sector, the solutions include a mix of proprietary code addressing the core business solution that is supported or integrated into other software components from open source. In some cases, enterprises business solutions also are built from open-source community offerings. Thus, open-source software becomes an important ingredient in modern-day solution building.</paragraph>
<paragraph><location><page_11><loc_22><loc_34><loc_89><loc_46></location>IBM actively participates in various open-source communities as part of steering boards defining the roadmap of the community, and also in contributing code to make the community a better place for everyone to participate. Red Hat also actively participates in various open-source communities and makes extensive contributions. In open-source communities, although most open-source development happens on x86 / amd64 or the Intel architecture, the same open-source software is used by other architectures, such as IBM Power (ppc64le), IBM Z and IBM LInuxONE (s390x), ARM, and Sparc. So, the availability of an open-source ecosystem on any architecture is key and critical to business.</paragraph>
<paragraph><location><page_11><loc_22><loc_27><loc_88><loc_33></location>On IBM Z and IBM LinuxONE (s390x) architecture, there is a huge open-source support ecosystem that ranges from operating systems such as Linux; application run times; cloud and container services; DevOps and automation; big data; observability; analytics; databases; and storage. The ecosystem on IBM Z and IBM LinuxONE is growing.</paragraph>
@ -95,8 +113,8 @@
<paragraph><location><page_12><loc_22><loc_82><loc_89><loc_91></location>CP4D includes a mix of open-source and proprietary data and AI runtime databases; open-source run times like Python; open-source data platforms like Anaconda; ML and DL frameworks like Pytorch and Tensorflow; and thousands of reusable Python packages. All of them are available and supported on s390x architecture to provide seamless parity with x86 architecture and a seamless experience for enterprise data scientists, architects, and data and AI solution developers on IBM Z and IBM LinuxONE platforms.</paragraph>
<paragraph><location><page_12><loc_22><loc_73><loc_89><loc_81></location>Anaconda is one of the open-source data platforms that provide Python and R based data science ML frameworks; analytics and data visualization tools; and open-source data science tools and libraries like Conda, XGBoost, and SciKit-Learn. Anaconda runs natively on Linux on IBM Z and IBM LinuxONE, and on IBM z/OS Container Extensions (zcX) on z/OS. For more information, see Announcing Anaconda for Linux on IBM Z and LinuxONE.</paragraph>
<paragraph><location><page_12><loc_22><loc_63><loc_89><loc_72></location>In addition to strong, open-source ecosystem support for application development on Linux and enterprise operating systems, a new generation of IBM Z and IBM LinuxONE servers (IBM z16™) also have strong platform support, and AI acceleration capabilities that can be leveraged by open-source software to perform better on the server infrastructure. For example, the recently released CP4D 4.6 has Tensorflow and IBM SnapML frameworks that leverage the AI accelerators when running on an IBM z16 server.</paragraph>
<paragraph><location><page_12><loc_22><loc_59><loc_85><loc_62></location>So, to summarize, there is a huge, growing data and AI open source ecosystem that is supported and optimized on IBM Z and IBM LinuxONE servers.</paragraph>
<subtitle-level-1><location><page_12><loc_11><loc_53><loc_31><loc_55></location>Why AI on IBM Z</subtitle-level-1>
<paragraph><location><page_12><loc_22><loc_59><loc_85><loc_61></location>So, to summarize, there is a huge, growing data and AI open source ecosystem that is supported and optimized on IBM Z and IBM LinuxONE servers.</paragraph>
<subtitle-level-1><location><page_12><loc_10><loc_53><loc_31><loc_55></location>Why AI on IBM Z</subtitle-level-1>
<paragraph><location><page_12><loc_22><loc_42><loc_89><loc_51></location>Data and AI playing a major role in the modernization story to enable the digital transformation journey of every organization. Many organizations recognize the business value of infusing AI into their infrastructure. CP4D provides the cloud-native solution to put your data to work. With CP4D, all your data users can collaborate from a single, unified interface that supports many services that work together, including collecting data, organizing the data, analyzing the data, and infusing AI.</paragraph>
<paragraph><location><page_12><loc_22><loc_30><loc_89><loc_41></location>Traditional ML models' power most of today's ML applications in business and among AI practitioners. CP4D supports traditional ML frameworks for training and inferencing, such as Scikit-learn, Snap ML, and XGBoost. Snap ML is a library that provides high-speed training and inferencing of ML models that leverage the AI accelerator while running on an IBM z16 (Linux on IBM Z). CP4D supports DL frameworks such as TensorFlow and PyTorch. TensorFlow is a DL framework that leverages the AI accelerator while running on an IBM z16 (Linux on IBM Z).</paragraph>
<paragraph><location><page_12><loc_22><loc_23><loc_89><loc_29></location>Figure 7 on page 11 provides an overview of the components that are supported on CP4D on IBM Z. You can leverage Watson Studio for model building, training, and validation, and WML for deployment of the model. Eventually, applications can use the AI inference endpoint to score the model.</paragraph>
@ -119,27 +137,29 @@
<paragraph><location><page_14><loc_22><loc_87><loc_86><loc_91></location>For the health care industry, medical image processing (such as MRIs and x-rays), skin cancer detection, and patient monitoring activities such as infant motion analysis, is important.</paragraph>
<paragraph><location><page_14><loc_22><loc_81><loc_89><loc_85></location>For the airline industry, processes such as air traffic management, flight management systems, and flight maintenance predictions are use cases that are ideal candidates for using AI on IBM Z.</paragraph>
<paragraph><location><page_14><loc_22><loc_78><loc_68><loc_79></location>In the following sections, we describe the following use cases:</paragraph>
<paragraph><location><page_14><loc_22><loc_71><loc_89><loc_77></location>GLYPH<SM590000> "Use case 1: Responsible AI augmented with risk and regulatory compliance" on page 12 AI model lifecycle governance, risk management, and regulatory compliance are key to the success of the enterprises. It is imperative to adopt a typical AI model lifecycle to protect new end-to-end risks.</paragraph>
<paragraph><location><page_14><loc_22><loc_76><loc_89><loc_77></location>GLYPH<SM590000> "Use case 1: Responsible AI augmented with risk and regulatory compliance" on page 12</paragraph>
<paragraph><location><page_14><loc_25><loc_71><loc_89><loc_75></location>AI model lifecycle governance, risk management, and regulatory compliance are key to the success of the enterprises. It is imperative to adopt a typical AI model lifecycle to protect new end-to-end risks.</paragraph>
<paragraph><location><page_14><loc_22><loc_69><loc_66><loc_70></location>GLYPH<SM590000> "Use case 2: Credit default risk assessment" on page 22</paragraph>
<paragraph><location><page_14><loc_25><loc_62><loc_89><loc_68></location>Core banking solutions running on IBM Z that are involved in processing inbound transactions need real-time fraud detection to prevent fraud. Other types of possible use cases might be credit risk analysis, anti-money laundering, loan approval, fraud detection in payments, and instant payments.</paragraph>
<paragraph><location><page_14><loc_22><loc_60><loc_61><loc_61></location>GLYPH<SM590000> "Use case 3: Clearing and settlement" on page 25</paragraph>
<paragraph><location><page_14><loc_25><loc_56><loc_88><loc_59></location>The use of AI can help to predict which trades or transactions have high risk exposures, and propose solutions for a more efficient settlement process.</paragraph>
<paragraph><location><page_14><loc_22><loc_50><loc_87><loc_55></location>GLYPH<SM590000> "Use case 4: Remaining Useful Life of an aircraft engine" on page 27 We describe how AI can help to avoid unplanned aircraft downtime by determining the remaining time or cycles that an aircraft engine is likely to operate before failure.</paragraph>
<paragraph><location><page_14><loc_22><loc_47><loc_88><loc_50></location>GLYPH<SM590000> "Use case 5: AI-powered video analytics on an infant's motions for health prediction" on page 30</paragraph>
<paragraph><location><page_14><loc_22><loc_54><loc_74><loc_55></location>GLYPH<SM590000> "Use case 4: Remaining Useful Life of an aircraft engine" on page 27</paragraph>
<paragraph><location><page_14><loc_25><loc_50><loc_87><loc_53></location>We describe how AI can help to avoid unplanned aircraft downtime by determining the remaining time or cycles that an aircraft engine is likely to operate before failure.</paragraph>
<paragraph><location><page_14><loc_22><loc_47><loc_88><loc_49></location>GLYPH<SM590000> "Use case 5: AI-powered video analytics on an infant's motions for health prediction" on page 30</paragraph>
<paragraph><location><page_14><loc_25><loc_43><loc_89><loc_46></location>In this section, we describe how AI can predict an infant's health conditions by monitoring real-time body movements.</paragraph>
<subtitle-level-1><location><page_14><loc_11><loc_35><loc_89><loc_40></location>Use case 1: Responsible AI augmented with risk and regulatory compliance</subtitle-level-1>
<subtitle-level-1><location><page_14><loc_10><loc_35><loc_89><loc_39></location>Use case 1: Responsible AI augmented with risk and regulatory compliance</subtitle-level-1>
<paragraph><location><page_14><loc_22><loc_27><loc_89><loc_33></location>Advancement in AI is changing the world, and organizations must adopt AI to embrace new challenges daily. Many enterprises see tremendous value in adopting AI and ML technologies while establishing organization trust in the models, underlying data, and the process to be followed. An AI model lifecycle can be a daunting task.</paragraph>
<paragraph><location><page_14><loc_22><loc_23><loc_89><loc_26></location>How mature is your AI governance? In this section, we provide a use case demonstrating the trustworthiness of AI and its importance in daily monitoring.</paragraph>
<subtitle-level-1><location><page_14><loc_11><loc_19><loc_31><loc_21></location>Industry challenges</subtitle-level-1>
<subtitle-level-1><location><page_14><loc_10><loc_19><loc_31><loc_20></location>Industry challenges</subtitle-level-1>
<paragraph><location><page_14><loc_22><loc_16><loc_83><loc_17></location>Here are the three main reasons why organizations struggle with the adoption of AI:</paragraph>
<paragraph><location><page_14><loc_22><loc_14><loc_48><loc_15></location>GLYPH<SM590000> Scaling with growing regulations</paragraph>
<paragraph><location><page_14><loc_22><loc_12><loc_71><loc_13></location>GLYPH<SM590000> Lack of confidence in operationalized AI (making responsible AI)</paragraph>
<paragraph><location><page_14><loc_22><loc_9><loc_76><loc_11></location>GLYPH<SM590000> Challenges around managing the risk throughout the entire AI workflow</paragraph>
<paragraph><location><page_14><loc_22><loc_10><loc_76><loc_11></location>GLYPH<SM590000> Challenges around managing the risk throughout the entire AI workflow</paragraph>
<subtitle-level-1><location><page_15><loc_22><loc_90><loc_53><loc_91></location>Scaling with growing regulations</subtitle-level-1>
<paragraph><location><page_15><loc_22><loc_80><loc_88><loc_89></location>Laws and regulations in the data and AI space are accelerating, and many countries are proposing strict AI policies. Countries are monitoring adherence of these policies by the enterprises and imposing fines for any violations. Responding to these regulations are challenging global organizations where multiple regulations apply. For enterprises, it is important to adopt AI policies when there is change, and to validate explainable models to protect against discrimination.</paragraph>
<subtitle-level-1><location><page_15><loc_22><loc_77><loc_37><loc_78></location>Responsible AI</subtitle-level-1>
<paragraph><location><page_15><loc_22><loc_71><loc_89><loc_76></location>Responsible AI protects against loss of data privacy, and reduced customer loyalty and trust. A data scientist cannot maximize accuracy and model performance above all other concerns. Practicing responsible AI is a best practice, and you must establish protection and validation to ensure that any models that are placed into production are fair and explainable.</paragraph>
<subtitle-level-1><location><page_15><loc_22><loc_67><loc_60><loc_69></location>Risks throughout the entire AI workflow</subtitle-level-1>
<subtitle-level-1><location><page_15><loc_22><loc_67><loc_59><loc_69></location>Risks throughout the entire AI workflow</subtitle-level-1>
<paragraph><location><page_15><loc_22><loc_65><loc_64><loc_67></location>Organizations need to mitigate risk of the following items:</paragraph>
<paragraph><location><page_15><loc_22><loc_63><loc_63><loc_65></location>GLYPH<SM590000> Deciding not to use certain technologies or practices</paragraph>
<paragraph><location><page_15><loc_22><loc_61><loc_74><loc_62></location>GLYPH<SM590000> Using personal information when needed and with a user's consent</paragraph>
@ -152,11 +172,11 @@
<paragraph><location><page_16><loc_22><loc_85><loc_89><loc_91></location>For example, a business can start testing a model before production for fairness metrics. For this task, enterprises need an end-to-end workflow with approvals to mitigate these risks and increase the scale of AI investments, as shown in Figure 8, which presents a typical AI model lifecycle in an enterprise.</paragraph>
<caption><location><page_16><loc_10><loc_57><loc_34><loc_58></location>Figure 8 Typical AI model lifecycle</caption>
<figure>
<location><page_16><loc_10><loc_58><loc_89><loc_83></location>
<location><page_16><loc_10><loc_58><loc_89><loc_84></location>
<caption>Figure 8 Typical AI model lifecycle</caption>
</figure>
<paragraph><location><page_16><loc_22><loc_46><loc_88><loc_55></location>Due to regulations, more stakeholders adopt the typical AI model lifecycle to protect their brand from new end-to-end risks. To ensure various aspects of both regulatory compliance and security, the personas that must be involved include the chief financial officer (CFO), chief marketing officer (CMO), chief data officer (CDO), HR, and chief regulatory officer (CRO), along with the data engineers, data scientists, and business analysts, who build AI workflows.</paragraph>
<subtitle-level-1><location><page_16><loc_11><loc_42><loc_46><loc_44></location>IBM governance solution for IBM Z</subtitle-level-1>
<subtitle-level-1><location><page_16><loc_10><loc_42><loc_46><loc_44></location>IBM governance solution for IBM Z</subtitle-level-1>
<paragraph><location><page_16><loc_22><loc_38><loc_88><loc_41></location>AI model lifecycle governance, risk management, and regulatory compliance are key to the success of enterprises.</paragraph>
<paragraph><location><page_16><loc_22><loc_23><loc_89><loc_36></location>AI governance is a comprehensive framework that uses a set of automated processes, methodologies, and tools to manage an organization's use of AI. Consistent principles guiding the design, development, deployment, and monitoring of models are critical in driving responsible and trustworthy AI. AI governance includes processes that trace and record the origin of data, models (including associated metadata), and pipelines for audits. The details of entry should include the techniques that trained each model, the hyperparameters that were used, and the metrics from testing phases. These details provide increased transparency into the model's behavior throughout the lifecycle, the data that was influential in its development, and the possible risks.</paragraph>
<paragraph><location><page_16><loc_22><loc_16><loc_89><loc_21></location>In a world where trust, transparency and explainable AI matters, every organization wants compliance along with the comfort of understanding how analytic insights and decisions are made. The following sections describe some of the principles and organizational requirements for AI governance.</paragraph>
@ -165,31 +185,31 @@
<paragraph><location><page_17><loc_22><loc_83><loc_85><loc_84></location>GLYPH<SM590000> Monitor, catalog, and govern AI models from anywhere throughout the AI lifecycle.</paragraph>
<paragraph><location><page_17><loc_22><loc_81><loc_70><loc_82></location>GLYPH<SM590000> Automate the capture of model metadata for report generation.</paragraph>
<paragraph><location><page_17><loc_22><loc_78><loc_58><loc_80></location>GLYPH<SM590000> Drive transparent and explainable AI at scale.</paragraph>
<paragraph><location><page_17><loc_22><loc_76><loc_87><loc_78></location>GLYPH<SM590000> Increase accuracy of predictions by identifying how AI is used and where it is lagging.</paragraph>
<paragraph><location><page_17><loc_22><loc_76><loc_87><loc_77></location>GLYPH<SM590000> Increase accuracy of predictions by identifying how AI is used and where it is lagging.</paragraph>
<subtitle-level-1><location><page_17><loc_22><loc_73><loc_38><loc_75></location>Risk management</subtitle-level-1>
<paragraph><location><page_17><loc_22><loc_70><loc_89><loc_73></location>Risk management is used in IBM AI governance to identify, manage, monitor, and report on risk and compliance initiatives at scale:</paragraph>
<paragraph><location><page_17><loc_22><loc_68><loc_81><loc_69></location>GLYPH<SM590000> Automate facts and workflow management to comply with business standards.</paragraph>
<paragraph><location><page_17><loc_22><loc_66><loc_74><loc_67></location>GLYPH<SM590000> Use dynamic dashboards for clear and concise customizable results.</paragraph>
<paragraph><location><page_17><loc_22><loc_63><loc_72><loc_65></location>GLYPH<SM590000> Enhanced collaboration across multiple regions and geographies.</paragraph>
<paragraph><location><page_17><loc_22><loc_64><loc_72><loc_65></location>GLYPH<SM590000> Enhanced collaboration across multiple regions and geographies.</paragraph>
<subtitle-level-1><location><page_17><loc_22><loc_61><loc_42><loc_62></location>Regulatory compliance</subtitle-level-1>
<paragraph><location><page_17><loc_22><loc_54><loc_89><loc_60></location>Regulatory compliance is a set of rules that organizations must follow to protect sensitive information and ensure human safety. Any business that works with digital assets, consumer data, health regulations, employee safety, and private communications is subject to regulatory compliance.$^{3}$ The IBM AI governance solution for IBM Z includes the following tasks:</paragraph>
<paragraph><location><page_17><loc_22><loc_52><loc_71><loc_53></location>GLYPH<SM590000> Help adhere to external AI regulations for audit and compliance.</paragraph>
<paragraph><location><page_17><loc_22><loc_50><loc_76><loc_51></location>GLYPH<SM590000> Convert external AI regulations into policies for automatic enforcement.</paragraph>
<paragraph><location><page_17><loc_22><loc_48><loc_82><loc_49></location>GLYPH<SM590000> Use dynamic dashboards for compliance status across policies and regulations.</paragraph>
<paragraph><location><page_17><loc_22><loc_40><loc_89><loc_46></location>Enterprises can develop AI models and deploy them by using IBM Watson Studio or WML on CP4D on Red Hat OpenShift on a virtual machine that is based on IBM z/VM or Red Hat Enterprise Linux KVM on IBM Z. AI governance on IBM LinuxONE is supported in the following two ways:</paragraph>
<paragraph><location><page_17><loc_22><loc_37><loc_86><loc_40></location>GLYPH<SM590000> Monitor the AI models with Watson OpenScale on CP4D on Red Hat OpenShift on a virtual machine on IBM Z.</paragraph>
<paragraph><location><page_17><loc_22><loc_37><loc_86><loc_39></location>GLYPH<SM590000> Monitor the AI models with Watson OpenScale on CP4D on Red Hat OpenShift on a virtual machine on IBM Z.</paragraph>
<paragraph><location><page_17><loc_22><loc_29><loc_89><loc_36></location>GLYPH<SM590000> Enterprises can develop AI models by creating and training models by using Watson Studio and development tools such as Jupyter Notebook or JupyterLab, and then deploying the model onto WML on CP4D on Red Hat OpenShift on a virtual machine on IBM Z. Then, these enterprises can achieve end-end AI governance by running AI Factsheets, IBM Watson OpenScale, and IBM Watson OpenPagesfi on CP4D on x86.</paragraph>
<paragraph><location><page_17><loc_22><loc_26><loc_84><loc_27></location>Figure 9 on page 16 shows the end-to-end flow for a remote AI governance solution.</paragraph>
<caption><location><page_18><loc_11><loc_62><loc_48><loc_63></location>Figure 9 Remote AI governance solution end-to-end flow</caption>
<figure>
<location><page_18><loc_10><loc_63><loc_90><loc_90></location>
<location><page_18><loc_10><loc_63><loc_89><loc_90></location>
<caption>Figure 9 Remote AI governance solution end-to-end flow</caption>
</figure>
<paragraph><location><page_18><loc_22><loc_59><loc_72><loc_60></location>To achieve end-to-end AI governance, complete the following steps:</paragraph>
<paragraph><location><page_18><loc_22><loc_55><loc_89><loc_58></location>1. Create a model entry in IBM OpenPages by using CP4D on a x86 platform, as shown in Figure 10.</paragraph>
<caption><location><page_18><loc_10><loc_14><loc_46><loc_16></location>Figure 10 Creating a model entry in IBM OpenPages</caption>
<caption><location><page_18><loc_11><loc_14><loc_46><loc_15></location>Figure 10 Creating a model entry in IBM OpenPages</caption>
<figure>
<location><page_18><loc_10><loc_16><loc_89><loc_53></location>
<location><page_18><loc_10><loc_16><loc_90><loc_54></location>
<caption>Figure 10 Creating a model entry in IBM OpenPages</caption>
</figure>
<paragraph><location><page_19><loc_22><loc_87><loc_89><loc_91></location>2. Train a model by using Watson Studio and by using development tools such as Jupyter Notebook or JupyterLab on CP4D on Red Hat OpenShift on a virtual machine on IBM Z, as shown in Figure 11.</paragraph>
@ -211,93 +231,93 @@
<caption>Figure 13 External model</caption>
</figure>
<paragraph><location><page_20><loc_25><loc_45><loc_89><loc_48></location>You can track the model through each stage of the model lifecycle, as shown in Figure 14, by using AI Factsheets and OpenPages.</paragraph>
<caption><location><page_20><loc_11><loc_9><loc_31><loc_10></location>Figure 14 Tracking the model</caption>
<caption><location><page_20><loc_11><loc_9><loc_30><loc_10></location>Figure 14 Tracking the model</caption>
<figure>
<location><page_20><loc_10><loc_11><loc_90><loc_44></location>
<caption>Figure 14 Tracking the model</caption>
</figure>
<paragraph><location><page_21><loc_25><loc_88><loc_89><loc_91></location>You can see that the model facts are tracked and synchronized to IBM OpenPages for risk management, as shown in Figure 15.</paragraph>
<caption><location><page_21><loc_10><loc_46><loc_74><loc_48></location>Figure 15 Model facts that are tracked and synchronized to IBM OpenPages on an x86 platform</caption>
<caption><location><page_21><loc_11><loc_47><loc_73><loc_48></location>Figure 15 Model facts that are tracked and synchronized to IBM OpenPages on an x86 platform</caption>
<figure>
<location><page_21><loc_10><loc_48><loc_89><loc_86></location>
<location><page_21><loc_10><loc_48><loc_90><loc_87></location>
<caption>Figure 15 Model facts that are tracked and synchronized to IBM OpenPages on an x86 platform</caption>
</figure>
<paragraph><location><page_22><loc_22><loc_88><loc_86><loc_91></location>5. Create an external model by using IBM OpenScale on the x86 platform, as shown in Figure 16.</paragraph>
<caption><location><page_22><loc_11><loc_50><loc_48><loc_52></location>Figure 16 Creating an external model on an x86 platform</caption>
<caption><location><page_22><loc_11><loc_50><loc_48><loc_51></location>Figure 16 Creating an external model on an x86 platform</caption>
<figure>
<location><page_22><loc_10><loc_52><loc_90><loc_86></location>
<caption>Figure 16 Creating an external model on an x86 platform</caption>
</figure>
<paragraph><location><page_22><loc_22><loc_43><loc_89><loc_49></location>IBM OpenScale provides a comprehensive dashboard that tracks fairness, quality monitoring, drift, and explainability of a model. Fairness determines whether your model produces biased outcomes. Quality determines how well your model predicts outcomes. Drift is the degradation of predictive performance over time. A sample is shown in Figure 17 on page 21.</paragraph>
<paragraph><location><page_22><loc_22><loc_43><loc_90><loc_49></location>IBM OpenScale provides a comprehensive dashboard that tracks fairness, quality monitoring, drift, and explainability of a model. Fairness determines whether your model produces biased outcomes. Quality determines how well your model predicts outcomes. Drift is the degradation of predictive performance over time. A sample is shown in Figure 17 on page 21.</paragraph>
<caption><location><page_23><loc_11><loc_54><loc_63><loc_55></location>Figure 17 IBM OpenScale dashboard that is used to monitor the external model</caption>
<figure>
<location><page_23><loc_10><loc_56><loc_90><loc_90></location>
<caption>Figure 17 IBM OpenScale dashboard that is used to monitor the external model</caption>
</figure>
<paragraph><location><page_23><loc_22><loc_45><loc_89><loc_53></location>You developed and deployed the AI model by using Watson Studio, WML on CP4D on Red Hat OpenShift on a virtual machine on IBM Z, and end-to-end AI model governance by leveraging AI Factsheets, OpenScale, and OpenPages on CP4D on a x86 platform. Figure 18 shows end-to-end AI governance when using IBM OpenPages, AI Factsheets, and OpenScale.</paragraph>
<caption><location><page_23><loc_11><loc_7><loc_83><loc_8></location>Figure 18 Final result: End-to-end AI governance when using IBM OpenPages, AI Factsheets, and OpenScale</caption>
<caption><location><page_23><loc_10><loc_7><loc_83><loc_8></location>Figure 18 Final result: End-to-end AI governance when using IBM OpenPages, AI Factsheets, and OpenScale</caption>
<figure>
<location><page_23><loc_10><loc_9><loc_89><loc_44></location>
<location><page_23><loc_10><loc_9><loc_90><loc_44></location>
<caption>Figure 18 Final result: End-to-end AI governance when using IBM OpenPages, AI Factsheets, and OpenScale</caption>
</figure>
<subtitle-level-1><location><page_24><loc_11><loc_89><loc_64><loc_91></location>Use case 2: Credit default risk assessment</subtitle-level-1>
<subtitle-level-1><location><page_24><loc_10><loc_89><loc_64><loc_91></location>Use case 2: Credit default risk assessment</subtitle-level-1>
<paragraph><location><page_24><loc_22><loc_83><loc_89><loc_87></location>In today's world, many individuals or businesses seeking loans to meet their growing business needs often look to financial institutions. Financial institutions can offer loans to individuals or businesses and charge interest based on the current market situations.</paragraph>
<subtitle-level-1><location><page_24><loc_11><loc_79><loc_31><loc_80></location>Industry challenges</subtitle-level-1>
<paragraph><location><page_24><loc_22><loc_71><loc_89><loc_77></location>Financial institutions must make an accurate decision about whether to sanction a loan or not, and judging the likelihood of default is the difference between a successful and unsuccessful loan portfolio. In a traditional scenario, an experienced banker can judge someone's likelihood of default, but that is not an efficient method for judgment as a business grows.</paragraph>
<subtitle-level-1><location><page_24><loc_11><loc_67><loc_56><loc_69></location>Predictions of credit default risk assessment</subtitle-level-1>
<subtitle-level-1><location><page_24><loc_10><loc_67><loc_56><loc_69></location>Predictions of credit default risk assessment</subtitle-level-1>
<paragraph><location><page_24><loc_22><loc_55><loc_89><loc_65></location>In the modern world, growing business institutions can no longer rely on only experienced bankers to decide whether to sanction a loan knowing that there is a probability that the borrower might default on their loans. A better choice is to rely on technological advancements that can help with reasoning based on facts, such as leveraging credit risk modeling techniques to process the historical data of past borrowers to understand their credit behavior and make a more informed decision about whether to lend money, how much money, and decide on the tenure to close the loan.</paragraph>
<paragraph><location><page_24><loc_22><loc_49><loc_89><loc_53></location>Financial institutions can leverage AI solutions by using ML techniques to predict the credit risk. Applying AI to credit risk modeling techniques can benefit institutions in decision-making, and thus can help better manage the exposure to credit risk.</paragraph>
<paragraph><location><page_24><loc_22><loc_42><loc_89><loc_48></location>Figure 19 on page 23 shows a sample architecture about how to design and develop an AI model for credit risk assessment on IBM Z. An IBM WebSpherefi Application Server is used for handling in-bound transactions, and CP4D is used for AI model lifecycle management that includes building, training, and deploying the model.</paragraph>
<caption><location><page_25><loc_11><loc_55><loc_65><loc_57></location>Figure 19 Architecture for credit risk prediction by using an ML AI model on IBM Z</caption>
<caption><location><page_25><loc_11><loc_55><loc_64><loc_57></location>Figure 19 Architecture for credit risk prediction by using an ML AI model on IBM Z</caption>
<figure>
<location><page_25><loc_11><loc_57><loc_89><loc_90></location>
<location><page_25><loc_10><loc_57><loc_89><loc_90></location>
<caption>Figure 19 Architecture for credit risk prediction by using an ML AI model on IBM Z</caption>
</figure>
<paragraph><location><page_25><loc_22><loc_48><loc_89><loc_54></location>A data scientist can leverage Watson Studio to develop and train an AI model and WML to deploy and score the model. In this sample architecture, the WML Python run time leverages the ML framework, IBM Snap Machine Learning (Snap ML), for scoring, can leverage an integrated AI accelerator at the time of model import.</paragraph>
<paragraph><location><page_25><loc_22><loc_39><loc_89><loc_47></location>Then, the banking loan approval team can send a loan applicant request to the IBM WebSphere Application Server, which can make a request to the AI inference endpoint. The AI inference engine scores the transaction and sends the result back to the loan approval team. Based on the results, the approval team can decide on whether to approve a loan or not, and also decide how much they can lend, timelines, and other factors.</paragraph>
<paragraph><location><page_25><loc_22><loc_39><loc_89><loc_46></location>Then, the banking loan approval team can send a loan applicant request to the IBM WebSphere Application Server, which can make a request to the AI inference endpoint. The AI inference engine scores the transaction and sends the result back to the loan approval team. Based on the results, the approval team can decide on whether to approve a loan or not, and also decide how much they can lend, timelines, and other factors.</paragraph>
<paragraph><location><page_25><loc_22><loc_33><loc_86><loc_38></location>The transaction system that is shown in Figure 19 uses IBM WebSphere Liberty as an application server, but you also can use an IBM Open Libertyfi application server or any application server that can send RESTful API communications.</paragraph>
<paragraph><location><page_25><loc_22><loc_23><loc_89><loc_32></location>Models are frequently developed and tested in many platforms and languages, such as Python, Scala, R, and Go. Models can leverage ML frameworks like scikit-learn, Snap ML, or XGBoost, or DL frameworks like TensorFlow or PyTorch. Training a model can be done on any platform if you have enough computing power for complex models, but moving that model into production requires careful testing to ensure that transactions are not delayed, especially if you plan to run the model within a transaction.</paragraph>
<paragraph><location><page_25><loc_22><loc_19><loc_89><loc_22></location>We showed how IBM Z enable customers to use AI frameworks to detect credit risk. Now, we look at how you can leverage CP4D and TensorFlow on IBM Z to detect the credit risk.</paragraph>
<caption><location><page_26><loc_22><loc_90><loc_80><loc_91></location>Figure 20 shows an architecture for predicting credit risk by using DL on IBM Z.</caption>
<paragraph><location><page_25><loc_22><loc_19><loc_89><loc_21></location>We showed how IBM Z enable customers to use AI frameworks to detect credit risk. Now, we look at how you can leverage CP4D and TensorFlow on IBM Z to detect the credit risk.</paragraph>
<paragraph><location><page_26><loc_22><loc_90><loc_80><loc_91></location>Figure 20 shows an architecture for predicting credit risk by using DL on IBM Z.</paragraph>
<caption><location><page_26><loc_11><loc_53><loc_56><loc_54></location>Figure 20 Architecture for credit risk prediction by using DL on IBM Z</caption>
<figure>
<location><page_26><loc_11><loc_55><loc_90><loc_88></location>
<location><page_26><loc_10><loc_55><loc_90><loc_88></location>
<caption>Figure 20 Architecture for credit risk prediction by using DL on IBM Z</caption>
</figure>
<paragraph><location><page_26><loc_22><loc_46><loc_87><loc_52></location>Data scientists can start creating and training a DL AI model by using a Jupyter Notebook instance and Watson Studio. Then, they can deploy the model by using WML on CP4D running on IBM Z, which provides an endpoint. Other applications, including the IBM WebSphere server, can produce credit risk results by using the model's endpoint.</paragraph>
<paragraph><location><page_26><loc_22><loc_42><loc_89><loc_44></location>In summary, here are some considerations for developing real-time AI models, such as credit risk assessment:</paragraph>
<paragraph><location><page_26><loc_22><loc_39><loc_85><loc_41></location>GLYPH<SM590000> A preference for in-platform run times of the model, such as faster execution results.</paragraph>
<paragraph><location><page_26><loc_22><loc_39><loc_86><loc_41></location>GLYPH<SM590000> A preference for in-platform run times of the model, such as faster execution results.</paragraph>
<paragraph><location><page_26><loc_22><loc_37><loc_73><loc_38></location>GLYPH<SM590000> Less overhead in the end-to-end flows might improve scoring time.</paragraph>
<paragraph><location><page_26><loc_22><loc_33><loc_89><loc_36></location>GLYPH<SM590000> If you are using models that are not deployable, CP4D offers a custom Python run time to build your own stack if they are not available on the platform.</paragraph>
<paragraph><location><page_26><loc_22><loc_34><loc_89><loc_36></location>GLYPH<SM590000> If you are using models that are not deployable, CP4D offers a custom Python run time to build your own stack if they are not available on the platform.</paragraph>
<paragraph><location><page_26><loc_22><loc_30><loc_89><loc_33></location>GLYPH<SM590000> AI inferencing based on ML or DL models can increase the accuracy of better credit risk assessment.</paragraph>
<paragraph><location><page_26><loc_22><loc_25><loc_87><loc_29></location>GLYPH<SM590000> Using IBM z16 and on-chip AI acceleration with the Telum chip that is embedded with regular Integrated Facility for Linux (IFLs) provides an execution speed for your transactions that cannot be achieved by other means.</paragraph>
<subtitle-level-1><location><page_27><loc_11><loc_89><loc_55><loc_91></location>Use case 3: Clearing and settlement</subtitle-level-1>
<subtitle-level-1><location><page_27><loc_10><loc_89><loc_55><loc_91></location>Use case 3: Clearing and settlement</subtitle-level-1>
<paragraph><location><page_27><loc_22><loc_80><loc_88><loc_87></location>Clearing and settlements involve banks or financial institutions sending and receiving wire transfers by using secure interbank payments networks that can clear or settle numerous transactions. When an individual or business entity initiates a wire transfer, clearing begins the fund delivery process. Banks can begin the settlement phase either immediately after clearing takes place or later, mostly at the end of the business day.</paragraph>
<subtitle-level-1><location><page_27><loc_11><loc_76><loc_29><loc_77></location>Industry challenge</subtitle-level-1>
<subtitle-level-1><location><page_27><loc_10><loc_76><loc_29><loc_77></location>Industry challenge</subtitle-level-1>
<paragraph><location><page_27><loc_22><loc_71><loc_88><loc_74></location>Banks and financial institutions must deal with high-risk transactions that can lead to loss. Moreover, these transactions can lead to regulatory violations and extra compliance costs.</paragraph>
<subtitle-level-1><location><page_27><loc_11><loc_67><loc_43><loc_69></location>Clearing and settlement solution</subtitle-level-1>
<paragraph><location><page_27><loc_22><loc_59><loc_89><loc_65></location>Use AI to predict which trades or transactions have high risk exposures, and propose solutions for a more efficient settlement process. The expedited remediation of questionable transactions can prevent costly consequences, regulatory violations, and negative business impacts.</paragraph>
<paragraph><location><page_27><loc_22><loc_60><loc_89><loc_65></location>Use AI to predict which trades or transactions have high risk exposures, and propose solutions for a more efficient settlement process. The expedited remediation of questionable transactions can prevent costly consequences, regulatory violations, and negative business impacts.</paragraph>
<paragraph><location><page_27><loc_22><loc_49><loc_89><loc_58></location>In financial institutions, finding which financial transactions are legitimate and which transactions are fraudulent is of paramount importance. In this section, we go through a use case where we use AI to predict which trades or transactions have high risk exposures, and propose solutions for a more efficient settlement process. The expedited remediation of questionable transactions can prevent costly consequences, regulatory violations, and negative business impacts to financial institutions.</paragraph>
<paragraph><location><page_27><loc_22><loc_40><loc_89><loc_48></location>The goal is to predict in real time whether the transaction being processed might be a fraudulent transaction or not. To achieve this goal, we build an ML model that can do this prediction for the financial institution. Because there would be many transactions being processed at any point by the financial institution, it is important to perform this prediction of fraudulent transactions in near-real time in a few milliseconds.</paragraph>
<paragraph><location><page_27><loc_22><loc_33><loc_89><loc_39></location>One possible solution is to build and train a TensorFlow based DL model that learns from the historical data and predicts the fraudulent transactions. CP4D on IBM Z and IBM LinuxONE is a suitable product where this task can be achieved and the model deployed, and coming up with a serving endpoint.</paragraph>
<paragraph><location><page_28><loc_22><loc_88><loc_88><loc_91></location>Figure 21 provides a high-level diagram of a clearing and settlement use case for financial transactions that uses CP4D on IBM Z and IBM LinuxONE.</paragraph>
<caption><location><page_28><loc_11><loc_59><loc_75><loc_60></location>Figure 21 Clearing and settlement use case for financial transactions by using Cloud Pak for Data</caption>
<caption><location><page_28><loc_10><loc_59><loc_75><loc_60></location>Figure 21 Clearing and settlement use case for financial transactions by using Cloud Pak for Data</caption>
<figure>
<location><page_28><loc_10><loc_61><loc_89><loc_86></location>
<location><page_28><loc_10><loc_61><loc_89><loc_87></location>
<caption>Figure 21 Clearing and settlement use case for financial transactions by using Cloud Pak for Data</caption>
</figure>
<paragraph><location><page_28><loc_22><loc_56><loc_58><loc_57></location>Here are the steps of the high-level process flow:</paragraph>
<paragraph><location><page_28><loc_22><loc_52><loc_86><loc_55></location>1. Create a connection to a database (for example, an IBM Db2fi database) where the historical data will be used for ML model building.</paragraph>
<paragraph><location><page_28><loc_22><loc_53><loc_86><loc_55></location>1. Create a connection to a database (for example, an IBM Db2fi database) where the historical data will be used for ML model building.</paragraph>
<paragraph><location><page_28><loc_22><loc_49><loc_89><loc_52></location>2. Read the data from the database and prepare the data for AI by using the Data Refinery tool in CP4D.</paragraph>
<paragraph><location><page_28><loc_22><loc_44><loc_89><loc_48></location>3. A Jupyter Notebook or JupyterLab IDE that is provided by the Watson Studio component in CP4D helps us build and train the AI model. The trained model can be saved into a WML repository.</paragraph>
<paragraph><location><page_28><loc_22><loc_42><loc_77><loc_43></location>4. Deploy the saved model into a deployment space for batch deployment.</paragraph>
<paragraph><location><page_28><loc_22><loc_39><loc_68><loc_41></location>5. Create a batch deployment by using any of these interfaces:</paragraph>
<paragraph><location><page_28><loc_25><loc_37><loc_75><loc_39></location>a. Watson Studio user interface from an Analytics deployment space.</paragraph>
<paragraph><location><page_28><loc_25><loc_37><loc_75><loc_38></location>a. Watson Studio user interface from an Analytics deployment space.</paragraph>
<paragraph><location><page_28><loc_25><loc_35><loc_41><loc_36></location>b. WML Python client.</paragraph>
<paragraph><location><page_28><loc_25><loc_33><loc_40><loc_34></location>c. WML REST APIs.</paragraph>
<paragraph><location><page_28><loc_22><loc_31><loc_68><loc_32></location>6. A hardware configuration can be chosen for the deployment.</paragraph>
<paragraph><location><page_28><loc_22><loc_27><loc_89><loc_30></location>7. A batch deployment processes input data from a file, data connection, or connected data in a storage bucket, and writes the output to a selected destination.</paragraph>
<paragraph><location><page_28><loc_22><loc_23><loc_83><loc_26></location>8. One way to run batch deployment to predict or score is to create and run a batch deployment job.</paragraph>
<paragraph><location><page_28><loc_22><loc_24><loc_83><loc_26></location>8. One way to run batch deployment to predict or score is to create and run a batch deployment job.</paragraph>
<paragraph><location><page_28><loc_22><loc_21><loc_44><loc_23></location>9. Provide an input data type:</paragraph>
<paragraph><location><page_28><loc_25><loc_19><loc_61><loc_20></location>a. Inline data for entering a JSON format payload.</paragraph>
<paragraph><location><page_28><loc_25><loc_17><loc_80><loc_18></location>b. Select Data asset , click Select data source , and then specify your asset.</paragraph>
@ -309,19 +329,19 @@
<paragraph><location><page_29><loc_22><loc_81><loc_61><loc_83></location>GLYPH<SM590000> No Impact to SLAs and the batch process window.</paragraph>
<paragraph><location><page_29><loc_22><loc_79><loc_83><loc_80></location>GLYPH<SM590000> Proactively stop losses, and lower operational, regulatory, and compliance costs.</paragraph>
<paragraph><location><page_29><loc_22><loc_76><loc_87><loc_78></location>GLYPH<SM590000> The solution is using a DL framework like TensorFlow for high-performing, low latency scoring.</paragraph>
<subtitle-level-1><location><page_29><loc_11><loc_70><loc_79><loc_72></location>Use case 4: Remaining Useful Life of an aircraft engine</subtitle-level-1>
<subtitle-level-1><location><page_29><loc_10><loc_70><loc_79><loc_72></location>Use case 4: Remaining Useful Life of an aircraft engine</subtitle-level-1>
<paragraph><location><page_29><loc_22><loc_65><loc_89><loc_68></location>In this use case, we describe how an airline can deploy an AI model for inferencing by using IBMfi zSystems.</paragraph>
<paragraph><location><page_29><loc_22><loc_58><loc_89><loc_64></location>Remaining Useful Life (RUL) is the remaining time or cycles that an aircraft engine is likely to operate without any failure. In this case, it is the equivalent of the number of flights remaining for the engine after the last flight. By estimating RUL, the operator can decide on the next maintenance schedule and avoid unplanned downtime.</paragraph>
<paragraph><location><page_29><loc_22><loc_54><loc_86><loc_56></location>Figure 22 provides an overview of the inferencing architecture for the RUL of an aircraft engine when using IBM Z.</paragraph>
<caption><location><page_29><loc_11><loc_20><loc_40><loc_22></location>Figure 22 Inferencing architecture on IBM Z</caption>
<caption><location><page_29><loc_11><loc_20><loc_40><loc_21></location>Figure 22 Inferencing architecture on IBM Z</caption>
<figure>
<location><page_29><loc_10><loc_22><loc_88><loc_52></location>
<caption>Figure 22 Inferencing architecture on IBM Z</caption>
</figure>
<paragraph><location><page_29><loc_22><loc_8><loc_89><loc_19></location>Because we are looking into data-driven model development, the data set of our target is the run-to-failure data of the engine. We are looking into a supervised learning problem, and we use regression techniques to learn from the data. DL techniques such as Long Short-Term Memory (LSTM) or Gated Recurrent Units (GRU) are our choice because we are looking into a time series data set. TensorFlow or PyTorch frameworks are leveraged to create models. AI governance monitors the data and model drift to maintain the model quality throughout the model's life.</paragraph>
<paragraph><location><page_30><loc_22><loc_78><loc_89><loc_91></location>Open-source data from NASA was used to build the AI model, which then was deployed on CP4D. CP4D enables the data-scientist's journey from modeling to deployment in a seamless process. Data engineers leverage Db2 to host the data set, which includes the training, testing, and validation of a data set. Since data is hosted on Db2, you can expect low latency while retrieving the data and serve data security needs because Db2 is hosted on the IBM Z platform. Data is fetched by the data refinery to do the necessary pre-processing and data imputations. You can use the programming languages Golang or C++ for real-time predictions, depending on customer needs. For more information about this topic, see "Use case 3: Clearing and settlement" on page 25.</paragraph>
<paragraph><location><page_30><loc_22><loc_78><loc_90><loc_91></location>Open-source data from NASA was used to build the AI model, which then was deployed on CP4D. CP4D enables the data-scientist's journey from modeling to deployment in a seamless process. Data engineers leverage Db2 to host the data set, which includes the training, testing, and validation of a data set. Since data is hosted on Db2, you can expect low latency while retrieving the data and serve data security needs because Db2 is hosted on the IBM Z platform. Data is fetched by the data refinery to do the necessary pre-processing and data imputations. You can use the programming languages Golang or C++ for real-time predictions, depending on customer needs. For more information about this topic, see "Use case 3: Clearing and settlement" on page 25.</paragraph>
<paragraph><location><page_30><loc_22><loc_70><loc_89><loc_76></location>Model building is done on Watson Studio, leveraging the high-performance computing hardware on IBM Z. You can train the model anywhere (on your own hardware or the cloud) and bring the model directly into CP4D, which provides data scientists with the flexibility of implementation choices.</paragraph>
<paragraph><location><page_30><loc_22><loc_64><loc_89><loc_69></location>We used LSTM to build the AI model and used the training data. The model was continuously evaluated to model convergence. The final model is tested with the test data, which is never exposed at the time of training to make sure that the model works.</paragraph>
<paragraph><location><page_30><loc_22><loc_65><loc_89><loc_69></location>We used LSTM to build the AI model and used the training data. The model was continuously evaluated to model convergence. The final model is tested with the test data, which is never exposed at the time of training to make sure that the model works.</paragraph>
<paragraph><location><page_30><loc_22><loc_57><loc_89><loc_63></location>This model is deployed on WML on CP4D and runs on IBM Z. If required, the trained model can be converted to the Open Neural Network Exchange (ONNX) format before deployment. Based on project requirements, IBM Z supports high-throughput, low latency inference requirements by leveraging an AI accelerator.</paragraph>
<paragraph><location><page_30><loc_22><loc_47><loc_89><loc_56></location>For decision-making about an aircraft engine's life, it is important to be able to explain the model predictions from end to end. This explainability may be global or local. Global explainability enables decision-makers to evaluate the trained model in general from the subject matter expert (SME) point of view. Local explainability enables the operator to validate the reasons behind the present inference and relate it to the past data points, which are an indicative cause of the prediction.</paragraph>
<paragraph><location><page_30><loc_22><loc_40><loc_89><loc_45></location>The AI governance components such as IBM OpenScale on CP4D support explainability and manages the drifts in data and concept. OpenPages and AI FactSheet together can alert the stakeholders about important events through a dashboard and allow course correction at any point.</paragraph>
@ -329,7 +349,7 @@
<paragraph><location><page_30><loc_22><loc_28><loc_85><loc_31></location>Figure 23 on page 29 provides a more in-depth view of the architecture of an AI-based predictive maintenance application.</paragraph>
<caption><location><page_31><loc_11><loc_43><loc_35><loc_44></location>Figure 23 In-depth architectural view</caption>
<figure>
<location><page_31><loc_10><loc_45><loc_90><loc_90></location>
<location><page_31><loc_10><loc_45><loc_89><loc_90></location>
<caption>Figure 23 In-depth architectural view</caption>
</figure>
<paragraph><location><page_31><loc_22><loc_39><loc_82><loc_41></location>In summary, consider the following points while developing an AI-based predictive maintenance application:</paragraph>
@ -338,45 +358,44 @@
<paragraph><location><page_31><loc_22><loc_28><loc_87><loc_30></location>GLYPH<SM590000> IBM Z provides high data security and low latency requirements at scale for the critical applications.</paragraph>
<paragraph><location><page_31><loc_22><loc_24><loc_89><loc_27></location>GLYPH<SM590000> A data scientist can choose to train the model and deploy it on CP4D seamlessly with the latest tech stack that is available.</paragraph>
<paragraph><location><page_31><loc_22><loc_20><loc_82><loc_23></location>GLYPH<SM590000> The AIOps and MLOps supported by CP4D to track AI model and data lifecycle throughout the application lifecycle.</paragraph>
<subtitle-level-1><location><page_32><loc_11><loc_87><loc_89><loc_91></location>Use case 5: AI-powered video analytics on an infant's motions for health prediction</subtitle-level-1>
<subtitle-level-1><location><page_32><loc_10><loc_87><loc_89><loc_91></location>Use case 5: AI-powered video analytics on an infant's motions for health prediction</subtitle-level-1>
<paragraph><location><page_32><loc_22><loc_77><loc_89><loc_85></location>Each year, approximately 5 million newborns worldwide are suffering from a neuro-developmental disorder. Due to the lack of early diagnoses and intervention, many infants are disabled and abandoned, especially in countries with limited numbers of pediatricians with extensive experience in neuro-developmental disorders. This situation is a conundrum that plagues many families around the world.</paragraph>
<paragraph><location><page_32><loc_22><loc_70><loc_89><loc_76></location>Infant motion analysis plays critical importance to understanding and comprehending healthy childhood development. In infants, monitoring their poses provides information about their health that can lead to a better prediction of early developmental risk assessment and diagnosis.</paragraph>
<paragraph><location><page_32><loc_22><loc_64><loc_87><loc_68></location>Adults use different techniques and methods to express their feelings (like sick, happy, stressed, or hungry), but this case is usually different for infants who cannot express their feelings. Based on the baby movements, AI can predict their expression or health.</paragraph>
<paragraph><location><page_32><loc_22><loc_54><loc_87><loc_63></location>In this use case, we examine how AI-powered video analytics can assist new parents and hospitals by addressing pose-based real-time body movements of the infants (such as arching back, head banging, kicking legs, rubbing eyes, stretching, and sucking fingers). During the initial months of a baby's life, spontaneous movements might indicate later developmental disorders, such as cerebral palsy, Rett syndrome, and autism spectrum disorders.</paragraph>
<subtitle-level-1><location><page_32><loc_11><loc_50><loc_31><loc_51></location>Industry challenges</subtitle-level-1>
<subtitle-level-1><location><page_32><loc_10><loc_50><loc_31><loc_51></location>Industry challenges</subtitle-level-1>
<paragraph><location><page_32><loc_22><loc_42><loc_89><loc_48></location>There are video surveillance systems that are installed for monitoring an infant's movement in many hospitals or homes so that any problem can be witnessed and potentially even stopped before they take place. These systems require much manual work to monitor the real-stream videos and intervene when a problem is detected.</paragraph>
<paragraph><location><page_32><loc_22><loc_33><loc_89><loc_41></location>There is a certain amount of trust that you must place on the person who monitors a surveillance system to ensure that the job is being done effectively and efficiently, and that the surveillance system is being vigilantly watched. Because of the dependency on these manual efforts, you need something "smart" that monitors constantly the surveillance system and detect problems effectively.</paragraph>
<paragraph><location><page_32><loc_22><loc_27><loc_89><loc_32></location>AI is shaping the controls of surveillance that can map and track occurrences with self-learning abilities, AI can improve on human operations and analyze video footage in real time to alert the hospitals or parents if any anomalies are identified.</paragraph>
<paragraph><location><page_32><loc_22><loc_28><loc_89><loc_32></location>AI is shaping the controls of surveillance that can map and track occurrences with self-learning abilities, AI can improve on human operations and analyze video footage in real time to alert the hospitals or parents if any anomalies are identified.</paragraph>
<paragraph><location><page_32><loc_22><loc_23><loc_89><loc_26></location>Video processing a stream of data from surveillance systems and then performing advance analytics and detecting anomalies quickly is a significant challenge in the industry.</paragraph>
<subtitle-level-1><location><page_32><loc_11><loc_19><loc_45><loc_21></location>Infant motion analytics in real time</subtitle-level-1>
<subtitle-level-1><location><page_32><loc_10><loc_19><loc_45><loc_21></location>Infant motion analytics in real time</subtitle-level-1>
<paragraph><location><page_32><loc_22><loc_9><loc_89><loc_17></location>AI is the current "market trend evolution" in video analytics and advancing the decision-making capabilities of the human mind. DL-based computer vision AI techniques are being widely adopted by various industries to solve real-time problems. These techniques improve the detection and prediction accuracy without increasing the hardware cost exponentially. For users, AI greatly reduces the workload of the monitoring staff and provides benefits by detecting unusual incidents and solving many video forensic problems.</paragraph>
<paragraph><location><page_33><loc_11><loc_78><loc_12><loc_79></location>S</paragraph>
<paragraph><location><page_33><loc_22><loc_87><loc_88><loc_91></location>CP4D was used to build and deploy the AI-powered video analytics on infant's motion for health prediction use case on IBM Z. IBM Z with AI accelerator enables faster inference for detecting face and body movements and performing angle analytics in real time.</paragraph>
<paragraph><location><page_33><loc_22><loc_79><loc_89><loc_85></location>Figure 24 shows an architectural diagram about how to design and develop an AI model for real-time body pose detection on IBM Z. A deep convolutional neural network architecture was trained on the task of infant pose estimation on the custom data set by leveraging IBM Cloud Pak for Data.</paragraph>
<caption><location><page_33><loc_11><loc_47><loc_46><loc_48></location>Figure 24 Architecture for AI-powered video analytics</caption>
<figure>
<location><page_33><loc_10><loc_48><loc_89><loc_78></location>
<location><page_33><loc_10><loc_48><loc_89><loc_79></location>
<caption>Figure 24 Architecture for AI-powered video analytics</caption>
</figure>
<paragraph><location><page_33><loc_22><loc_35><loc_89><loc_45></location>Live camera feeds or recorded videos of an infant's movement are the inputs for a pose detection model. This video streaming data was stored in IBM Cloudfi Object Storage for image processing. Video data must be transformed into frames so that the infant's body poses can be detected. These post-estimation components of the pipeline predict the location of all 17-person key points with 3 degrees of freedom each (x, y location and visibility) plus two virtual alignment key points. This approach also embraces a compute-intensive heat map prediction of infant body posture.</paragraph>
<paragraph><location><page_33><loc_22><loc_24><loc_88><loc_33></location>When changes in body posture or movement happen, analytics can be performed, and a threshold can be set for the angle of the body and posture movements. An analysis can be performed on movement that is based on that threshold to help to predict an infant's health index in the output video stream by leveraging the IBM z16 on-chip AI acceleration, which provides an execution speed in real time on an edge device, which cannot be achieved by other means.</paragraph>
<paragraph><location><page_33><loc_22><loc_21><loc_72><loc_23></location>We can leverage the following AI technology stack for this use case:</paragraph>
<paragraph><location><page_33><loc_22><loc_22><loc_72><loc_23></location>We can leverage the following AI technology stack for this use case:</paragraph>
<paragraph><location><page_33><loc_22><loc_18><loc_89><loc_21></location>GLYPH<SM590000> Convolutional neural network: Build an artificial neural network model on video streaming and images.</paragraph>
<paragraph><location><page_33><loc_22><loc_16><loc_74><loc_17></location>GLYPH<SM590000> TensorFlow: A DL back-end framework that is based on TensorFlow.</paragraph>
<paragraph><location><page_33><loc_22><loc_12><loc_89><loc_15></location>GLYPH<SM590000> Mediapipe: A library that helps with video streaming processing and prediction of human pose estimation.</paragraph>
<paragraph><location><page_33><loc_22><loc_10><loc_84><loc_11></location>GLYPH<SM590000> OpenCV: A real-time computer vision library that helps perform image processing.</paragraph>
<paragraph><location><page_33><loc_22><loc_87><loc_88><loc_91></location>CP4D was used to build and deploy the AI-powered video analytics on infant's motion for health prediction use case on IBM Z. IBM Z with AI accelerator enables faster inference for detecting face and body movements and performing angle analytics in real time.</paragraph>
<paragraph><location><page_33><loc_22><loc_79><loc_89><loc_85></location>Figure 24 shows an architectural diagram about how to design and develop an AI model for real-time body pose detection on IBM Z. A deep convolutional neural network architecture was trained on the task of infant pose estimation on the custom data set by leveraging IBM Cloud Pak for Data.</paragraph>
<paragraph><location><page_34><loc_22><loc_87><loc_89><loc_91></location>WML was used for deployment of the pose detection model and generated notifications to users with web and mobile applications, and it integrates with Fitbit for push notifications so that hospitals and parents can take preventive actions.</paragraph>
<subtitle-level-1><location><page_34><loc_11><loc_81><loc_37><loc_83></location>Additional resources</subtitle-level-1>
<paragraph><location><page_34><loc_22><loc_76><loc_89><loc_79></location>GLYPH<SM590000> The Cloud Pak for Data 4.5 on IBM Z Overview Demo video provides an overview of some of the more important features of CP4D on IBM Z.</paragraph>
<paragraph><location><page_34><loc_22><loc_74><loc_49><loc_76></location>GLYPH<SM590000> IBM Cloud Pak for Data Tutorials.</paragraph>
<paragraph><location><page_34><loc_22><loc_74><loc_49><loc_75></location>GLYPH<SM590000> IBM Cloud Pak for Data Tutorials.</paragraph>
<paragraph><location><page_34><loc_22><loc_71><loc_85><loc_73></location>GLYPH<SM590000> Here are some additional use cases that use the data science frameworks that are available as part of CP4D on IBM Z and IBM LinuxONE:</paragraph>
<paragraph><location><page_34><loc_25><loc_67><loc_86><loc_70></location>-Payment Card Fraud Detection by using TensorFlow on CP4D on IBM Z and IBM LinuxONE is a payment card fraud detection use case.</paragraph>
<paragraph><location><page_34><loc_25><loc_63><loc_88><loc_66></location>-Fashion-MNIST clothing classification with PyTorch on Cloud Pak for Data on IBM Z and IBM LinuxONE is a Fashion-MNIST clothing classification use case.</paragraph>
<paragraph><location><page_34><loc_25><loc_57><loc_89><loc_62></location>-Payment Card Fraud Prevention by using Snap ML on IBM Cloud Pak for Data on Red Hat OpenShift on a virtual machine on IBM Z and IBM LinuxONE, which leverage the z16 integrated AI accelerator describes a use case that uses Snap Machine Learning in Cloud Pak for Data on IBM Z and IBM LinuxONE. It is a Snap ML use case.</paragraph>
<paragraph><location><page_34><loc_25><loc_57><loc_90><loc_62></location>-Payment Card Fraud Prevention by using Snap ML on IBM Cloud Pak for Data on Red Hat OpenShift on a virtual machine on IBM Z and IBM LinuxONE, which leverage the z16 integrated AI accelerator describes a use case that uses Snap Machine Learning in Cloud Pak for Data on IBM Z and IBM LinuxONE. It is a Snap ML use case.</paragraph>
<paragraph><location><page_34><loc_27><loc_53><loc_89><loc_56></location>A companion video can be found at Credit Card Fraud Detection by using Snap ML on IBM Cloud Pak for Data on IBM Z and IBM LinuxONE.</paragraph>
<subtitle-level-1><location><page_34><loc_11><loc_47><loc_23><loc_49></location>Summary</subtitle-level-1>
<subtitle-level-1><location><page_34><loc_11><loc_47><loc_22><loc_49></location>Summary</subtitle-level-1>
<paragraph><location><page_34><loc_22><loc_32><loc_89><loc_45></location>This IBM Redbooksfi publication presented an overview of how IBM Cloud Pak for Data on IBM Z can modernize your data infrastructure; develop and deploy ML and AI models; and instantiate highly efficient analytics deployment on IBM LinuxONE. This publication demonstrated these tasks by guiding the reader through five common use cases where CP4D on IBM Z and IBM LinuxONE uses the different features that are supported on the platform, and showing how the associated features can help an enterprise to build AI and ML models with core transactional data, which results in a highly efficient analytics deployment that minimizes latency, cost inefficiencies, and potential security exposures that are connected with data transportation.</paragraph>
<subtitle-level-1><location><page_34><loc_10><loc_28><loc_19><loc_29></location>Authors</subtitle-level-1>
<subtitle-level-1><location><page_34><loc_11><loc_28><loc_19><loc_29></location>Authors</subtitle-level-1>
<paragraph><location><page_34><loc_22><loc_23><loc_88><loc_26></location>This publication was produced by a team of specialists from around the world working with the IBM Redbooks team:</paragraph>
<paragraph><location><page_34><loc_22><loc_15><loc_89><loc_22></location>Jasmeet Bhatia is an AI on IBM Z Product Manager who supports CP4D on IBM Z. She has 2.5 years of combined experience as a data scientist and a product manager. Jasmeet lives in San Francisco, California and holds a Bachelor of Arts degree in Data Science. She is working on her Master of Science degree in Data Science. Her area of expertise includes AI, data science, and product management.</paragraph>
<paragraph><location><page_35><loc_22><loc_82><loc_89><loc_91></location>Ravi Gummadi is a Technical Leader for CP4D on Linux on IBM Z and IBM LinuxONE in India. He has 18+ years of experience in the design and development of enterprise software for various platforms, including IBM Z and IBM LinuxONE. He holds a master's degree in computer science and engineering from the Indian Institute of Technology Madras (IIT Madras). His areas of expertise include compilers, virtualization, big data analytics, containers, data, and AI, with a special focus on open-source ecosystems.</paragraph>
@ -384,59 +403,56 @@
<paragraph><location><page_35><loc_22><loc_55><loc_89><loc_70></location>Srirama Sharma is a Lead Technical Architect for IBM Cloud Pak, IBM Instanafi, IBM Turbonomicfi, and Red Hat Advanced Cluster Management for Kubernetes (RHACM) on IBM Z and LinuxONE. He has 18+ years of experience in UNIX and Linux application and device driver development. He designs ISV solutions on IBM Systems and IBM Blockchainfi. He also works on cloud-native adoption of enterprise solutions on IBM Z and LinuxONE. Srirama holds a Bachelor of Engineering degree in computer science from Visvesvaraya Technological University (VTU). He lives in Bangalore, Karnataka. His areas of expertise include UNIX and Linux systems programming, virtualization, performance benchmarking of Financial Services Sector (FSS) industry solutions, open-source ecosystems, server infrastructure, and cloud-native adoption and modernization.</paragraph>
<paragraph><location><page_35><loc_22><loc_53><loc_71><loc_54></location>Thanks to the following people for their contributions to this project:</paragraph>
<paragraph><location><page_35><loc_22><loc_48><loc_51><loc_51></location>Lydia Parziale, Project Manager IBM Redbooks, Poughkeepsie Center</paragraph>
<paragraph><location><page_35><loc_22><loc_44><loc_60><loc_47></location>Shin Kelly Yang, AI on IBM Z Product Management IBM US</paragraph>
<paragraph><location><page_35><loc_22><loc_44><loc_59><loc_47></location>Shin Kelly Yang, AI on IBM Z Product Management IBM US</paragraph>
<paragraph><location><page_35><loc_22><loc_40><loc_88><loc_43></location>Tom Ramey, Anna Shugol, Andrew Sica, Jonathan Sloan, Elpida Tzortzatos, Meeta Vouk, IBM</paragraph>
<subtitle-level-1><location><page_35><loc_11><loc_36><loc_57><loc_37></location>Now you can become a published author, too!</subtitle-level-1>
<paragraph><location><page_35><loc_22><loc_24><loc_89><loc_34></location>Here's an opportunity to spotlight your skills, grow your career, and become a published author-all at the same time! Join an IBM Redbooks residency project and help write a book in your area of expertise, while honing your experience using leading-edge technologies. Your efforts will help to increase product acceptance and customer satisfaction, as you expand your network of technical contacts and relationships. Residencies run from two to six weeks in length, and you can participate either in person or as a remote resident working from your home base.</paragraph>
<paragraph><location><page_35><loc_22><loc_21><loc_89><loc_22></location>Find out more about the residency program, browse the residency index, and apply online at:</paragraph>
<paragraph><location><page_35><loc_22><loc_19><loc_49><loc_20></location>ibm.com /redbooks/residencies.html</paragraph>
<subtitle-level-1><location><page_36><loc_11><loc_89><loc_44><loc_91></location>Stay connected to IBM Redbooks</subtitle-level-1>
<subtitle-level-1><location><page_36><loc_10><loc_89><loc_44><loc_91></location>Stay connected to IBM Redbooks</subtitle-level-1>
<paragraph><location><page_36><loc_22><loc_87><loc_39><loc_88></location>GLYPH<SM590000> Find us on LinkedIn:</paragraph>
<paragraph><location><page_36><loc_25><loc_84><loc_64><loc_86></location>http://www.linkedin.com/groups?home=&gid=2130806</paragraph>
<paragraph><location><page_36><loc_22><loc_81><loc_89><loc_83></location>GLYPH<SM590000> Explore new Redbooks publications, residencies, and workshops with the IBM Redbooks weekly newsletter:</paragraph>
<paragraph><location><page_36><loc_25><loc_79><loc_74><loc_80></location>https://www.redbooks.ibm.com/Redbooks.nsf/subscribe?OpenForm</paragraph>
<paragraph><location><page_36><loc_22><loc_76><loc_70><loc_78></location>GLYPH<SM590000> Stay current on recent Redbooks publications with RSS Feeds:</paragraph>
<paragraph><location><page_36><loc_25><loc_74><loc_54><loc_76></location>http://www.redbooks.ibm.com/rss.html</paragraph>
<paragraph><location><page_36><loc_25><loc_74><loc_54><loc_75></location>http://www.redbooks.ibm.com/rss.html</paragraph>
<subtitle-level-1><location><page_37><loc_11><loc_88><loc_25><loc_91></location>Notices</subtitle-level-1>
<paragraph><location><page_37><loc_10><loc_80><loc_89><loc_83></location>This information was developed for products and services offered in the US. This material might be available from IBM in other languages. However, you may be required to own a copy of the product or product version in that language in order to access it.</paragraph>
<paragraph><location><page_37><loc_10><loc_71><loc_89><loc_78></location>IBM may not offer the products, services, or features discussed in this document in other countries. Consult your local IBM representative for information on the products and services currently available in your area. Any reference to an IBM product, program, or service is not intended to state or imply that only that IBM product, program, or service may be used. Any functionally equivalent product, program, or service that does not infringe any IBM intellectual property right may be used instead. However, it is the user's responsibility to evaluate and verify the operation of any non-IBM product, program, or service.</paragraph>
<paragraph><location><page_37><loc_10><loc_66><loc_89><loc_69></location>IBM may have patents or pending patent applications covering subject matter described in this document. The furnishing of this document does not grant you any license to these patents. You can send license inquiries, in writing, to:</paragraph>
<paragraph><location><page_37><loc_11><loc_64><loc_87><loc_66></location>IBM Director of Licensing, IBM Corporation, North Castle Drive, MD-NC119, Armonk, NY 10504-1785, US</paragraph>
<paragraph><location><page_37><loc_10><loc_64><loc_87><loc_66></location>IBM Director of Licensing, IBM Corporation, North Castle Drive, MD-NC119, Armonk, NY 10504-1785, US</paragraph>
<paragraph><location><page_37><loc_10><loc_57><loc_89><loc_63></location>INTERNATIONAL BUSINESS MACHINES CORPORATION PROVIDES THIS PUBLICATION "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Some jurisdictions do not allow disclaimer of express or implied warranties in certain transactions, therefore, this statement may not apply to you.</paragraph>
<paragraph><location><page_37><loc_10><loc_51><loc_89><loc_56></location>This information could include technical inaccuracies or typographical errors. Changes are periodically made to the information herein; these changes will be incorporated in new editions of the publication. IBM may make improvements and/or changes in the product(s) and/or the program(s) described in this publication at any time without notice.</paragraph>
<paragraph><location><page_37><loc_10><loc_45><loc_88><loc_49></location>Any references in this information to non-IBM websites are provided for convenience only and do not in any manner serve as an endorsement of those websites. The materials at those websites are not part of the materials for this IBM product and use of those websites is at your own risk.</paragraph>
<paragraph><location><page_37><loc_11><loc_42><loc_85><loc_44></location>IBM may use or distribute any of the information you provide in any way it believes appropriate without incurring any obligation to you.</paragraph>
<paragraph><location><page_37><loc_10><loc_42><loc_85><loc_44></location>IBM may use or distribute any of the information you provide in any way it believes appropriate without incurring any obligation to you.</paragraph>
<paragraph><location><page_37><loc_10><loc_38><loc_83><loc_40></location>The performance data and client examples cited are presented for illustrative purposes only. Actual performance results may vary depending on specific configurations and operating conditions.</paragraph>
<paragraph><location><page_37><loc_10><loc_32><loc_89><loc_37></location>Information concerning non-IBM products was obtained from the suppliers of those products, their published announcements or other publicly available sources. IBM has not tested those products and cannot confirm the accuracy of performance, compatibility or any other claims related to non-IBM products. Questions on the capabilities of non-IBM products should be addressed to the suppliers of those products.</paragraph>
<paragraph><location><page_37><loc_11><loc_28><loc_89><loc_30></location>Statements regarding IBM's future direction or intent are subject to change or withdrawal without notice, and represent goals and objectives only.</paragraph>
<paragraph><location><page_37><loc_10><loc_28><loc_89><loc_30></location>Statements regarding IBM's future direction or intent are subject to change or withdrawal without notice, and represent goals and objectives only.</paragraph>
<paragraph><location><page_37><loc_10><loc_21><loc_89><loc_26></location>This information contains examples of data and reports used in daily business operations. To illustrate them as completely as possible, the examples include the names of individuals, companies, brands, and products. All of these names are fictitious and any similarity to actual people or business enterprises is entirely coincidental.</paragraph>
<subtitle-level-1><location><page_37><loc_11><loc_19><loc_28><loc_20></location>COPYRIGHT LICENSE:</subtitle-level-1>
<subtitle-level-1><location><page_37><loc_10><loc_19><loc_28><loc_20></location>COPYRIGHT LICENSE:</subtitle-level-1>
<paragraph><location><page_37><loc_10><loc_8><loc_89><loc_18></location>This information contains sample application programs in source language, which illustrate programming techniques on various operating platforms. You may copy, modify, and distribute these sample programs in any form without payment to IBM, for the purposes of developing, using, marketing or distributing application programs conforming to the application programming interface for the operating platform for which the sample programs are written. These examples have not been thoroughly tested under all conditions. IBM, therefore, cannot guarantee or imply reliability, serviceability, or function of these programs. The sample programs are provided "AS IS", without warranty of any kind. IBM shall not be liable for any damages arising out of your use of the sample programs.</paragraph>
<subtitle-level-1><location><page_38><loc_10><loc_89><loc_25><loc_91></location>Trademarks</subtitle-level-1>
<paragraph><location><page_38><loc_10><loc_82><loc_89><loc_87></location>IBM, the IBM logo, and ibm.com are trademarks or registered trademarks of International Business Machines Corporation, registered in many jurisdictions worldwide. Other product and service names might be trademarks of IBM or other companies. A current list of IBM trademarks is available on the web at "Copyright and trademark information" at http://www.ibm.com/legal/copytrade.shtml</paragraph>
<paragraph><location><page_38><loc_10><loc_78><loc_89><loc_81></location>The following terms are trademarks or registered trademarks of International Business Machines Corporation, and might also be trademarks or registered trademarks in other countries.</paragraph>
<table>
<location><page_38><loc_12><loc_69><loc_80><loc_77></location>
<row_0><col_0><body>Db2fi IBMfi</col_0><col_1><body>IBM Watsonfi</col_1><col_2><body>Redbooks (log o) fi Turbon</col_2></row_0>
<row_1><col_0><body></col_0><col_1><body>IBM z16™</col_1><col_2><body>omicfi</col_2></row_1>
<row_2><col_0><body>IBM Blockchainfi</col_0><col_1><body>Instanafi</col_1><col_2><body>WebSpherefi</col_2></row_2>
<row_3><col_0><body>IBM Cloudfi IBM Clou</col_0><col_1><body>Open Libertyfi</col_1><col_2><body>z/OSfi</col_2></row_3>
<row_4><col_0><body>d Pakfi</col_0><col_1><body>OpenPagesfi</col_1><col_2><body>z16™</col_2></row_4>
<row_5><col_0><body>IBM Telum™</col_0><col_1><body>Redbooksfi</col_1><col_2><body></col_2></row_5>
</table>
<paragraph><location><page_38><loc_12><loc_69><loc_24><loc_77></location>Db2fi IBMfi IBM Blockchainfi IBM Cloudfi IBM Clou d Pakfi IBM Telum™</paragraph>
<paragraph><location><page_38><loc_39><loc_69><loc_48><loc_77></location>IBM Watsonfi IBM z16™ Instanafi Open Libertyfi OpenPagesfi Redbooksfi</paragraph>
<paragraph><location><page_38><loc_65><loc_70><loc_80><loc_77></location>Redbooks (log o) fi Turbon omicfi WebSpherefi z/OSfi z16™</paragraph>
<paragraph><location><page_38><loc_10><loc_66><loc_51><loc_67></location>The following terms are trademarks of other companies:</paragraph>
<paragraph><location><page_38><loc_11><loc_62><loc_86><loc_65></location>Intel, Intel logo, Intel Inside logo, and Intel Centrino logo are trademarks or registered trademarks of Intel Corporation or its subsidiaries in the United States and other countries.</paragraph>
<paragraph><location><page_38><loc_10><loc_62><loc_86><loc_65></location>Intel, Intel logo, Intel Inside logo, and Intel Centrino logo are trademarks or registered trademarks of Intel Corporation or its subsidiaries in the United States and other countries.</paragraph>
<paragraph><location><page_38><loc_10><loc_59><loc_89><loc_61></location>The registered trademark Linuxfi is used pursuant to a sublicense from the Linux Foundation, the exclusive licensee of Linus Torvalds, owner of the mark on a worldwide basis.</paragraph>
<paragraph><location><page_38><loc_10><loc_55><loc_87><loc_57></location>Red Hat and OpenShift are trademarks or registered trademarks of Red Hat, Inc. or its subsidiaries in the United States and other countries.</paragraph>
<paragraph><location><page_38><loc_11><loc_52><loc_77><loc_54></location>UNIX is a registered trademark of The Open Group in the United States and other countries.</paragraph>
<paragraph><location><page_38><loc_11><loc_55><loc_87><loc_57></location>Red Hat and OpenShift are trademarks or registered trademarks of Red Hat, Inc. or its subsidiaries in the United States and other countries.</paragraph>
<paragraph><location><page_38><loc_11><loc_52><loc_77><loc_53></location>UNIX is a registered trademark of The Open Group in the United States and other countries.</paragraph>
<paragraph><location><page_38><loc_10><loc_50><loc_76><loc_51></location>Other company, product, or service names may be trademarks or service marks of others.</paragraph>
<figure>
<location><page_40><loc_7><loc_2><loc_11><loc_5></location>
</figure>
<paragraph><location><page_40><loc_47><loc_94><loc_68><loc_96></location>Back cover</paragraph>
<figure>
<location><page_40><loc_78><loc_90><loc_93><loc_95></location>
</figure>
<paragraph><location><page_40><loc_81><loc_85><loc_92><loc_86></location>REDP-5695-00</paragraph>
<paragraph><location><page_40><loc_79><loc_82><loc_92><loc_83></location>ISBN 0738461067</paragraph>
<paragraph><location><page_40><loc_81><loc_11><loc_92><loc_13></location>Printed in U.S.A.</paragraph>
<paragraph><location><page_40><loc_81><loc_11><loc_92><loc_12></location>Printed in U.S.A.</paragraph>
<figure>
<location><page_40><loc_71><loc_2><loc_94><loc_7></location>
</figure>

File diff suppressed because one or more lines are too long

View File

@ -5,6 +5,27 @@ Front cover
## IBM Cloud Pak for Data on IBM Z
Jasmeet Bhatia
Ravi Gummadi
Srirama Sharma
<!-- image -->
<!-- image -->
<!-- image -->
<!-- image -->
<!-- image -->
<!-- image -->
@ -180,7 +201,9 @@ For the airline industry, processes such as air traffic management, flight manag
In the following sections, we describe the following use cases:
GLYPH<SM590000> "Use case 1: Responsible AI augmented with risk and regulatory compliance" on page 12 AI model lifecycle governance, risk management, and regulatory compliance are key to the success of the enterprises. It is imperative to adopt a typical AI model lifecycle to protect new end-to-end risks.
GLYPH<SM590000> "Use case 1: Responsible AI augmented with risk and regulatory compliance" on page 12
AI model lifecycle governance, risk management, and regulatory compliance are key to the success of the enterprises. It is imperative to adopt a typical AI model lifecycle to protect new end-to-end risks.
GLYPH<SM590000> "Use case 2: Credit default risk assessment" on page 22
@ -190,7 +213,9 @@ GLYPH<SM590000> "Use case 3: Clearing and settlement" on page 25
The use of AI can help to predict which trades or transactions have high risk exposures, and propose solutions for a more efficient settlement process.
GLYPH<SM590000> "Use case 4: Remaining Useful Life of an aircraft engine" on page 27 We describe how AI can help to avoid unplanned aircraft downtime by determining the remaining time or cycles that an aircraft engine is likely to operate before failure.
GLYPH<SM590000> "Use case 4: Remaining Useful Life of an aircraft engine" on page 27
We describe how AI can help to avoid unplanned aircraft downtime by determining the remaining time or cycles that an aircraft engine is likely to operate before failure.
GLYPH<SM590000> "Use case 5: AI-powered video analytics on an infant's motions for health prediction" on page 30
@ -530,7 +555,9 @@ Video processing a stream of data from surveillance systems and then performing
AI is the current "market trend evolution" in video analytics and advancing the decision-making capabilities of the human mind. DL-based computer vision AI techniques are being widely adopted by various industries to solve real-time problems. These techniques improve the detection and prediction accuracy without increasing the hardware cost exponentially. For users, AI greatly reduces the workload of the monitoring staff and provides benefits by detecting unusual incidents and solving many video forensic problems.
S
CP4D was used to build and deploy the AI-powered video analytics on infant's motion for health prediction use case on IBM Z. IBM Z with AI accelerator enables faster inference for detecting face and body movements and performing angle analytics in real time.
Figure 24 shows an architectural diagram about how to design and develop an AI model for real-time body pose detection on IBM Z. A deep convolutional neural network architecture was trained on the task of infant pose estimation on the custom data set by leveraging IBM Cloud Pak for Data.
Figure 24 Architecture for AI-powered video analytics
<!-- image -->
@ -549,10 +576,6 @@ GLYPH<SM590000> Mediapipe: A library that helps with video streaming processing
GLYPH<SM590000> OpenCV: A real-time computer vision library that helps perform image processing.
CP4D was used to build and deploy the AI-powered video analytics on infant's motion for health prediction use case on IBM Z. IBM Z with AI accelerator enables faster inference for detecting face and body movements and performing angle analytics in real time.
Figure 24 shows an architectural diagram about how to design and develop an AI model for real-time body pose detection on IBM Z. A deep convolutional neural network architecture was trained on the task of infant pose estimation on the custom data set by leveraging IBM Cloud Pak for Data.
WML was used for deployment of the pose detection model and generated notifications to users with web and mobile applications, and it integrates with Fitbit for push notifications so that hospitals and parents can take preventive actions.
## Additional resources
@ -653,14 +676,11 @@ IBM, the IBM logo, and ibm.com are trademarks or registered trademarks of Intern
The following terms are trademarks or registered trademarks of International Business Machines Corporation, and might also be trademarks or registered trademarks in other countries.
Db2fi IBMfi IBM Blockchainfi IBM Cloudfi IBM Clou d Pakfi IBM Telum™
| Db2fi IBMfi | IBM Watsonfi | Redbooks (log o) fi Turbon |
|----------------------|----------------|------------------------------|
| | IBM z16™ | omicfi |
| IBM Blockchainfi | Instanafi | WebSpherefi |
| IBM Cloudfi IBM Clou | Open Libertyfi | z/OSfi |
| d Pakfi | OpenPagesfi | z16™ |
| IBM Telum™ | Redbooksfi | |
IBM Watsonfi IBM z16™ Instanafi Open Libertyfi OpenPagesfi Redbooksfi
Redbooks (log o) fi Turbon omicfi WebSpherefi z/OSfi z16™
The following terms are trademarks of other companies:
@ -679,6 +699,9 @@ Other company, product, or service names may be trademarks or service marks of o
Back cover
<!-- image -->
REDP-5695-00
ISBN 0738461067

File diff suppressed because one or more lines are too long

View File

@ -77,6 +77,8 @@ def verify_tables(doc_pred: DsDocument, doc_true: DsDocument):
assert doc_pred.tables is not None, "no tables predicted, but expected in doc_true"
# print("Expected number of tables: {}, result: {}".format(len(doc_true.tables), len(doc_pred.tables)))
assert len(doc_true.tables) == len(
doc_pred.tables
), "document has different count of tables than expected."
@ -96,8 +98,9 @@ def verify_tables(doc_pred: DsDocument, doc_true: DsDocument):
for i, row in enumerate(true_item.data):
for j, col in enumerate(true_item.data[i]):
# print("true: ", true_item.data[i][j])
# print("pred: ", pred_item.data[i][j])
# print("true: ", true_item.data[i][j].text)
# print("pred: ", pred_item.data[i][j].text)
# print("")
assert (
true_item.data[i][j].text == pred_item.data[i][j].text