
* feat: adding new vlm-models support Signed-off-by: Peter Staar <taa@zurich.ibm.com> * fixed the transformers Signed-off-by: Peter Staar <taa@zurich.ibm.com> * got microsoft/Phi-4-multimodal-instruct to work Signed-off-by: Peter Staar <taa@zurich.ibm.com> * working on vlm's Signed-off-by: Peter Staar <taa@zurich.ibm.com> * refactoring the VLM part Signed-off-by: Peter Staar <taa@zurich.ibm.com> * all working, now serious refacgtoring necessary Signed-off-by: Peter Staar <taa@zurich.ibm.com> * refactoring the download_model Signed-off-by: Peter Staar <taa@zurich.ibm.com> * added the formulate_prompt Signed-off-by: Peter Staar <taa@zurich.ibm.com> * pixtral 12b runs via MLX and native transformers Signed-off-by: Peter Staar <taa@zurich.ibm.com> * added the VlmPredictionToken Signed-off-by: Peter Staar <taa@zurich.ibm.com> * refactoring minimal_vlm_pipeline Signed-off-by: Peter Staar <taa@zurich.ibm.com> * fixed the MyPy Signed-off-by: Peter Staar <taa@zurich.ibm.com> * added pipeline_model_specializations file Signed-off-by: Peter Staar <taa@zurich.ibm.com> * need to get Phi4 working again ... Signed-off-by: Peter Staar <taa@zurich.ibm.com> * finalising last points for vlms support Signed-off-by: Peter Staar <taa@zurich.ibm.com> * fixed the pipeline for Phi4 Signed-off-by: Peter Staar <taa@zurich.ibm.com> * streamlining all code Signed-off-by: Peter Staar <taa@zurich.ibm.com> * reformatted the code Signed-off-by: Peter Staar <taa@zurich.ibm.com> * fixing the tests Signed-off-by: Peter Staar <taa@zurich.ibm.com> * added the html backend to the VLM pipeline Signed-off-by: Peter Staar <taa@zurich.ibm.com> * fixed the static load_from_doctags Signed-off-by: Peter Staar <taa@zurich.ibm.com> * restore stable imports Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * use AutoModelForVision2Seq for Pixtral and review example (including rename) Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * remove unused value Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * refactor instances of VLM models Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * skip compare example in CI Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * use lowercase and uppercase only Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * add new minimal_vlm example and refactor pipeline_options_vlm_model for cleaner import Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * rename pipeline_vlm_model_spec Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * move more argument to options and simplify model init Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * add supported_devices Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * remove not-needed function Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * exclude minimal_vlm Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * missing file Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * add message for transformers version Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * rename to specs Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * use module import and remove MLX from non-darwin Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * remove hf_vlm_model and add extra_generation_args Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * use single HF VLM model class Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * remove torch type Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * add docs for vision models Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> --------- Signed-off-by: Peter Staar <taa@zurich.ibm.com> Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> Co-authored-by: Michele Dolfi <dol@zurich.ibm.com>
72 lines
2.7 KiB
Python
72 lines
2.7 KiB
Python
from collections.abc import Iterable
|
|
from concurrent.futures import ThreadPoolExecutor
|
|
|
|
from docling.datamodel.base_models import Page, VlmPrediction
|
|
from docling.datamodel.document import ConversionResult
|
|
from docling.datamodel.pipeline_options_vlm_model import ApiVlmOptions
|
|
from docling.exceptions import OperationNotAllowed
|
|
from docling.models.base_model import BasePageModel
|
|
from docling.utils.api_image_request import api_image_request
|
|
from docling.utils.profiling import TimeRecorder
|
|
|
|
|
|
class ApiVlmModel(BasePageModel):
|
|
def __init__(
|
|
self,
|
|
enabled: bool,
|
|
enable_remote_services: bool,
|
|
vlm_options: ApiVlmOptions,
|
|
):
|
|
self.enabled = enabled
|
|
self.vlm_options = vlm_options
|
|
if self.enabled:
|
|
if not enable_remote_services:
|
|
raise OperationNotAllowed(
|
|
"Connections to remote services is only allowed when set explicitly. "
|
|
"pipeline_options.enable_remote_services=True, or using the CLI "
|
|
"--enable-remote-services."
|
|
)
|
|
|
|
self.timeout = self.vlm_options.timeout
|
|
self.concurrency = self.vlm_options.concurrency
|
|
self.prompt_content = (
|
|
f"This is a page from a document.\n{self.vlm_options.prompt}"
|
|
)
|
|
self.params = {
|
|
**self.vlm_options.params,
|
|
"temperature": 0,
|
|
}
|
|
|
|
def __call__(
|
|
self, conv_res: ConversionResult, page_batch: Iterable[Page]
|
|
) -> Iterable[Page]:
|
|
def _vlm_request(page):
|
|
assert page._backend is not None
|
|
if not page._backend.is_valid():
|
|
return page
|
|
else:
|
|
with TimeRecorder(conv_res, "vlm"):
|
|
assert page.size is not None
|
|
|
|
hi_res_image = page.get_image(scale=self.vlm_options.scale)
|
|
assert hi_res_image is not None
|
|
if hi_res_image:
|
|
if hi_res_image.mode != "RGB":
|
|
hi_res_image = hi_res_image.convert("RGB")
|
|
|
|
page_tags = api_image_request(
|
|
image=hi_res_image,
|
|
prompt=self.prompt_content,
|
|
url=self.vlm_options.url,
|
|
timeout=self.timeout,
|
|
headers=self.vlm_options.headers,
|
|
**self.params,
|
|
)
|
|
|
|
page.predictions.vlm_response = VlmPrediction(text=page_tags)
|
|
|
|
return page
|
|
|
|
with ThreadPoolExecutor(max_workers=self.concurrency) as executor:
|
|
yield from executor.map(_vlm_request, page_batch)
|