[CI/Build] Test both text and token IDs in batched OpenAI Completions API (#5568)

This commit is contained in:
Cyrus Leung 2024-06-15 19:29:42 +08:00 committed by GitHub
parent 0e9164b40a
commit 81fbb3655f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -655,10 +655,12 @@ async def test_completion_stream_options(client: openai.AsyncOpenAI,
[MODEL_NAME, "zephyr-lora"], [MODEL_NAME, "zephyr-lora"],
) )
async def test_batch_completions(client: openai.AsyncOpenAI, model_name: str): async def test_batch_completions(client: openai.AsyncOpenAI, model_name: str):
# test both text and token IDs
for prompts in (["Hello, my name is"] * 2, [[0, 0, 0, 0, 0]] * 2):
# test simple list # test simple list
batch = await client.completions.create( batch = await client.completions.create(
model=model_name, model=model_name,
prompt=["Hello, my name is", "Hello, my name is"], prompt=prompts,
max_tokens=5, max_tokens=5,
temperature=0.0, temperature=0.0,
) )
@ -668,7 +670,7 @@ async def test_batch_completions(client: openai.AsyncOpenAI, model_name: str):
# test n = 2 # test n = 2
batch = await client.completions.create( batch = await client.completions.create(
model=model_name, model=model_name,
prompt=["Hello, my name is", "Hello, my name is"], prompt=prompts,
n=2, n=2,
max_tokens=5, max_tokens=5,
temperature=0.0, temperature=0.0,
@ -688,7 +690,7 @@ async def test_batch_completions(client: openai.AsyncOpenAI, model_name: str):
# test streaming # test streaming
batch = await client.completions.create( batch = await client.completions.create(
model=model_name, model=model_name,
prompt=["Hello, my name is", "Hello, my name is"], prompt=prompts,
max_tokens=5, max_tokens=5,
temperature=0.0, temperature=0.0,
stream=True, stream=True,