mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-01-23 20:04:31 +08:00
Replace {func} with mkdocs style links (#18610)
Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
parent
38a95cb4a8
commit
2edb533af2
@ -540,7 +540,7 @@ class LlavaNextForConditionalGeneration(nn.Module, SupportsMultiModal,
|
||||
Unlike in LLaVA-1.5, the number of image tokens inputted to the language
|
||||
model depends on the original size of the input image. Including the
|
||||
original image token in the input, the required number of image tokens
|
||||
is given by {func}`get_llava_next_image_feature_size`.
|
||||
is given by [get_llava_next_image_feature_size][].
|
||||
|
||||
This way, the `positions` and `attn_metadata` are consistent
|
||||
with the `input_ids`.
|
||||
|
||||
@ -387,7 +387,7 @@ _M = TypeVar("_M", bound=Union[_HasModalityAttr, _HasModalityProp])
|
||||
|
||||
|
||||
def full_groupby_modality(values: Iterable[_M]) -> ItemsView[str, list[_M]]:
|
||||
"""Convenience function to apply {func}`full_groupby` based on modality."""
|
||||
"""Convenience function to apply [full_groupby][] based on modality."""
|
||||
return full_groupby(values, key=lambda x: x.modality)
|
||||
|
||||
|
||||
|
||||
@ -157,7 +157,7 @@ class Platform:
|
||||
return self._enum == PlatformEnum.OOT
|
||||
|
||||
def is_cuda_alike(self) -> bool:
|
||||
"""Stateless version of {func}`torch.cuda.is_available`."""
|
||||
"""Stateless version of [torch.cuda.is_available][]."""
|
||||
return self._enum in (PlatformEnum.CUDA, PlatformEnum.ROCM)
|
||||
|
||||
def is_sleep_mode_available(self) -> bool:
|
||||
@ -194,7 +194,7 @@ class Platform:
|
||||
cls,
|
||||
device_id: int = 0,
|
||||
) -> Optional[DeviceCapability]:
|
||||
"""Stateless version of {func}`torch.cuda.get_device_capability`."""
|
||||
"""Stateless version of [torch.cuda.get_device_capability][]."""
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
|
||||
@ -27,7 +27,7 @@ VLLM_INVALID_TOKEN_ID = -1
|
||||
|
||||
|
||||
def array_full(token_id: int, count: int):
|
||||
"""{class}`array` equivalent of {func}`numpy.full`."""
|
||||
"""{class}`array` equivalent of [numpy.full][]."""
|
||||
return array(VLLM_TOKEN_ID_ARRAY_TYPE, [token_id]) * count
|
||||
|
||||
|
||||
|
||||
@ -66,7 +66,7 @@ def gather_mm_placeholders(
|
||||
"""
|
||||
Reconstructs the embeddings from the placeholder tokens.
|
||||
|
||||
This is the operation of {func}`scatter_mm_placeholders`.
|
||||
This is the operation of [scatter_mm_placeholders][].
|
||||
"""
|
||||
if is_embed is None:
|
||||
return placeholders
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user