mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-01-07 21:23:08 +08:00
[Bugfix][VLM] fix llava processor (#15285)
Signed-off-by: Mengqing Cao <cmq0113@163.com>
This commit is contained in:
parent
93a00d7dde
commit
c21b99b912
@ -233,7 +233,13 @@ class LlavaDummyInputsBuilder(BaseDummyInputsBuilder[_I]):
|
||||
class LlavaProcessingInfo(BaseLlavaProcessingInfo):
|
||||
|
||||
def get_hf_processor(self, **kwargs: object):
|
||||
return self.ctx.get_hf_processor(LlavaProcessor, **kwargs)
|
||||
hf_processor = self.ctx.get_hf_processor(LlavaProcessor, **kwargs)
|
||||
# In case patch_size is omitted from `processor_config.json`
|
||||
# e.g. for E5-V: https://huggingface.co/royokong/e5-v
|
||||
if hf_processor.patch_size is None:
|
||||
patch_size = self.get_vision_encoder_info().get_patch_size()
|
||||
hf_processor.patch_size = patch_size
|
||||
return hf_processor
|
||||
|
||||
|
||||
class BaseLlavaMultiModalProcessor(BaseMultiModalProcessor[_I]):
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user