
* feat: adding new vlm-models support Signed-off-by: Peter Staar <taa@zurich.ibm.com> * fixed the transformers Signed-off-by: Peter Staar <taa@zurich.ibm.com> * got microsoft/Phi-4-multimodal-instruct to work Signed-off-by: Peter Staar <taa@zurich.ibm.com> * working on vlm's Signed-off-by: Peter Staar <taa@zurich.ibm.com> * refactoring the VLM part Signed-off-by: Peter Staar <taa@zurich.ibm.com> * all working, now serious refacgtoring necessary Signed-off-by: Peter Staar <taa@zurich.ibm.com> * refactoring the download_model Signed-off-by: Peter Staar <taa@zurich.ibm.com> * added the formulate_prompt Signed-off-by: Peter Staar <taa@zurich.ibm.com> * pixtral 12b runs via MLX and native transformers Signed-off-by: Peter Staar <taa@zurich.ibm.com> * added the VlmPredictionToken Signed-off-by: Peter Staar <taa@zurich.ibm.com> * refactoring minimal_vlm_pipeline Signed-off-by: Peter Staar <taa@zurich.ibm.com> * fixed the MyPy Signed-off-by: Peter Staar <taa@zurich.ibm.com> * added pipeline_model_specializations file Signed-off-by: Peter Staar <taa@zurich.ibm.com> * need to get Phi4 working again ... Signed-off-by: Peter Staar <taa@zurich.ibm.com> * finalising last points for vlms support Signed-off-by: Peter Staar <taa@zurich.ibm.com> * fixed the pipeline for Phi4 Signed-off-by: Peter Staar <taa@zurich.ibm.com> * streamlining all code Signed-off-by: Peter Staar <taa@zurich.ibm.com> * reformatted the code Signed-off-by: Peter Staar <taa@zurich.ibm.com> * fixing the tests Signed-off-by: Peter Staar <taa@zurich.ibm.com> * added the html backend to the VLM pipeline Signed-off-by: Peter Staar <taa@zurich.ibm.com> * fixed the static load_from_doctags Signed-off-by: Peter Staar <taa@zurich.ibm.com> * restore stable imports Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * use AutoModelForVision2Seq for Pixtral and review example (including rename) Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * remove unused value Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * refactor instances of VLM models Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * skip compare example in CI Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * use lowercase and uppercase only Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * add new minimal_vlm example and refactor pipeline_options_vlm_model for cleaner import Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * rename pipeline_vlm_model_spec Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * move more argument to options and simplify model init Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * add supported_devices Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * remove not-needed function Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * exclude minimal_vlm Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * missing file Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * add message for transformers version Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * rename to specs Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * use module import and remove MLX from non-darwin Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * remove hf_vlm_model and add extra_generation_args Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * use single HF VLM model class Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * remove torch type Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * add docs for vision models Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> --------- Signed-off-by: Peter Staar <taa@zurich.ibm.com> Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> Co-authored-by: Michele Dolfi <dol@zurich.ibm.com>
82 lines
2.0 KiB
Python
82 lines
2.0 KiB
Python
from enum import Enum
|
|
from typing import Any, Dict, List, Literal
|
|
|
|
from pydantic import AnyUrl, BaseModel
|
|
from typing_extensions import deprecated
|
|
|
|
from docling.datamodel.accelerator_options import AcceleratorDevice
|
|
|
|
|
|
class BaseVlmOptions(BaseModel):
|
|
kind: str
|
|
prompt: str
|
|
|
|
|
|
class ResponseFormat(str, Enum):
|
|
DOCTAGS = "doctags"
|
|
MARKDOWN = "markdown"
|
|
HTML = "html"
|
|
|
|
|
|
class InferenceFramework(str, Enum):
|
|
MLX = "mlx"
|
|
TRANSFORMERS = "transformers"
|
|
|
|
|
|
class TransformersModelType(str, Enum):
|
|
AUTOMODEL = "automodel"
|
|
AUTOMODEL_VISION2SEQ = "automodel-vision2seq"
|
|
AUTOMODEL_CAUSALLM = "automodel-causallm"
|
|
|
|
|
|
class InlineVlmOptions(BaseVlmOptions):
|
|
kind: Literal["inline_model_options"] = "inline_model_options"
|
|
|
|
repo_id: str
|
|
trust_remote_code: bool = False
|
|
load_in_8bit: bool = True
|
|
llm_int8_threshold: float = 6.0
|
|
quantized: bool = False
|
|
|
|
inference_framework: InferenceFramework
|
|
transformers_model_type: TransformersModelType = TransformersModelType.AUTOMODEL
|
|
response_format: ResponseFormat
|
|
|
|
supported_devices: List[AcceleratorDevice] = [
|
|
AcceleratorDevice.CPU,
|
|
AcceleratorDevice.CUDA,
|
|
AcceleratorDevice.MPS,
|
|
]
|
|
|
|
scale: float = 2.0
|
|
|
|
temperature: float = 0.0
|
|
stop_strings: List[str] = []
|
|
extra_generation_config: Dict[str, Any] = {}
|
|
|
|
use_kv_cache: bool = True
|
|
max_new_tokens: int = 4096
|
|
|
|
@property
|
|
def repo_cache_folder(self) -> str:
|
|
return self.repo_id.replace("/", "--")
|
|
|
|
|
|
@deprecated("Use InlineVlmOptions instead.")
|
|
class HuggingFaceVlmOptions(InlineVlmOptions):
|
|
pass
|
|
|
|
|
|
class ApiVlmOptions(BaseVlmOptions):
|
|
kind: Literal["api_model_options"] = "api_model_options"
|
|
|
|
url: AnyUrl = AnyUrl(
|
|
"http://localhost:11434/v1/chat/completions"
|
|
) # Default to ollama
|
|
headers: Dict[str, str] = {}
|
|
params: Dict[str, Any] = {}
|
|
scale: float = 2.0
|
|
timeout: float = 60
|
|
concurrency: int = 1
|
|
response_format: ResponseFormat
|