mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-03-27 06:52:32 +08:00
[Misc] Slight spelling modification (#18039)
Signed-off-by: Jee Jee Li <pandaleefree@gmail.com>
This commit is contained in:
parent
60f7624334
commit
c06af9a959
@ -74,7 +74,7 @@ vLLM is flexible and easy to use with:
|
||||
- OpenAI-compatible API server
|
||||
- Support NVIDIA GPUs, AMD CPUs and GPUs, Intel CPUs and GPUs, PowerPC CPUs, TPU, and AWS Neuron.
|
||||
- Prefix caching support
|
||||
- Multi-lora support
|
||||
- Multi-LoRA support
|
||||
|
||||
vLLM seamlessly supports most popular open-source models on HuggingFace, including:
|
||||
- Transformer-like LLMs (e.g., Llama)
|
||||
|
||||
@ -119,7 +119,7 @@ class LogitsProcessor(nn.Module):
|
||||
|
||||
def extra_repr(self) -> str:
|
||||
s = f"vocab_size={self.vocab_size}"
|
||||
s += f", forg_vocab_size={self.org_vocab_size}"
|
||||
s += f", org_vocab_size={self.org_vocab_size}"
|
||||
s += f", scale={self.scale}, logits_as_input={self.logits_as_input}"
|
||||
return s
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user