[Doc] Fix prompt embedding examples (#18350)

Signed-off-by: wangli <wangli858794774@gmail.com>
This commit is contained in:
Li Wang 2025-05-19 21:48:16 +08:00 committed by GitHub
parent d637b96099
commit c5bb0ebdc6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -36,6 +36,7 @@ llm = LLM(model=model_name, enable_prompt_embeds=True)
chat = [{"role": "user", "content": "Please tell me about the capital of France."}]
token_ids = tokenizer.apply_chat_template(chat, add_generation_prompt=True, return_tensors='pt')
embedding_layer = transformers_model.get_input_embeddings()
prompt_embeds = embedding_layer(token_ids).squeeze(0)
# Single prompt inference
@ -116,6 +117,7 @@ transformers_model = transformers.AutoModelForCausalLM.from_pretrained(model_nam
chat = [{"role": "user", "content": "Please tell me about the capital of France."}]
token_ids = tokenizer.apply_chat_template(chat, add_generation_prompt=True, return_tensors='pt')
embedding_layer = transformers_model.get_input_embeddings()
prompt_embeds = embedding_layer(token_ids).squeeze(0)
# Prompt embeddings