mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-01-29 05:07:13 +08:00
[Bugfix] Update Gradio OpenAI Chatbot Webserver example to new Gradio message history format (#29249)
Signed-off-by: joshiemoore <joshiemoore98@gmail.com>
This commit is contained in:
parent
3e1ad40655
commit
c309bb5245
@ -25,25 +25,17 @@ import gradio as gr
|
||||
from openai import OpenAI
|
||||
|
||||
|
||||
def format_history_to_openai(history):
|
||||
history_openai_format = [
|
||||
{"role": "system", "content": "You are a great AI assistant."}
|
||||
]
|
||||
for human, assistant in history:
|
||||
history_openai_format.append({"role": "user", "content": human})
|
||||
history_openai_format.append({"role": "assistant", "content": assistant})
|
||||
return history_openai_format
|
||||
|
||||
|
||||
def predict(message, history, client, model_name, temp, stop_token_ids):
|
||||
# Format history to OpenAI chat format
|
||||
history_openai_format = format_history_to_openai(history)
|
||||
history_openai_format.append({"role": "user", "content": message})
|
||||
messages = [
|
||||
{"role": "system", "content": "You are a great AI assistant."},
|
||||
*history,
|
||||
{"role": "user", "content": message},
|
||||
]
|
||||
|
||||
# Send request to OpenAI API (vLLM server)
|
||||
stream = client.chat.completions.create(
|
||||
model=model_name,
|
||||
messages=history_openai_format,
|
||||
messages=messages,
|
||||
temperature=temp,
|
||||
stream=True,
|
||||
extra_body={
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user