# SPDX-License-Identifier: Apache-2.0 from __future__ import annotations import time from collections import defaultdict, deque from collections.abc import Iterable from typing import Optional, Union from vllm.config import VllmConfig from vllm.distributed.kv_transfer.kv_connector.factory import ( KVConnectorFactory) from vllm.distributed.kv_transfer.kv_connector.v1 import KVConnectorRole from vllm.logger import init_logger from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalRegistry from vllm.v1.core.encoder_cache_manager import (EncoderCacheManager, compute_encoder_budget) from vllm.v1.core.kv_cache_manager import KVCacheManager from vllm.v1.core.sched.interface import SchedulerInterface from vllm.v1.core.sched.output import (CachedRequestData, NewRequestData, SchedulerOutput) from vllm.v1.core.sched.utils import check_stop from vllm.v1.engine import (EngineCoreEventType, EngineCoreOutput, EngineCoreOutputs) from vllm.v1.kv_cache_interface import KVCacheConfig from vllm.v1.metrics.stats import SchedulerStats from vllm.v1.outputs import ModelRunnerOutput from vllm.v1.request import Request, RequestStatus from vllm.v1.spec_decode.metrics import SpecDecodingStats from vllm.v1.structured_output import StructuredOutputManager logger = init_logger(__name__) class Scheduler(SchedulerInterface): def __init__( self, vllm_config: VllmConfig, kv_cache_config: KVCacheConfig, structured_output_manager: StructuredOutputManager, mm_registry: MultiModalRegistry = MULTIMODAL_REGISTRY, include_finished_set: bool = False, log_stats: bool = False, ) -> None: self.vllm_config = vllm_config self.scheduler_config = vllm_config.scheduler_config self.cache_config = vllm_config.cache_config self.lora_config = vllm_config.lora_config self.kv_cache_config = kv_cache_config self.log_stats = log_stats self.structured_output_manager = structured_output_manager # include_finished_set controls whether a separate set of finished # request ids should be included in the EngineCoreOutputs returned # by update_from_outputs(). This is currently used in the multi-engine # case to track request lifetimes efficiently. self.include_finished_set = include_finished_set # Scheduling constraints. self.max_num_running_reqs = self.scheduler_config.max_num_seqs self.max_num_scheduled_tokens = \ self.scheduler_config.max_num_batched_tokens self.max_model_len = self.scheduler_config.max_model_len # Create KVConnector for the Scheduler. Note that each Worker # will have a corresponding KVConnector with Role=WORKER. # KV Connector pushes/pull of remote KVs for P/D and offloading. self.connector = None if self.vllm_config.kv_transfer_config is not None: self.connector = KVConnectorFactory.create_connector_v1( config=self.vllm_config, role=KVConnectorRole.SCHEDULER) num_gpu_blocks = self.cache_config.num_gpu_blocks assert num_gpu_blocks is not None and num_gpu_blocks > 0 # Create the KV cache manager. self.kv_cache_manager = KVCacheManager( kv_cache_config=kv_cache_config, max_model_len=self.max_model_len, enable_caching=self.cache_config.enable_prefix_caching, caching_hash_algo=self.cache_config.prefix_caching_hash_algo, log_stats=self.log_stats) self.block_size = self.cache_config.block_size # req_id -> Request self.requests: dict[str, Request] = {} # Priority queues for requests. self.waiting: deque[Request] = deque() self.running: list[Request] = [] # The request IDs that are finished in between the previous and the # current steps. This is used to notify the workers about the finished # requests so that they can free the cached states for those requests. # This is flushed at the end of each scheduling step. self.finished_req_ids: set[str] = set() # OPTIMIZATION: Cache the CachedRequestData objects to avoid creating # them at each scheduling step. # Request id -> deque of CachedRequestData self._cached_reqs_data: dict[ str, deque[CachedRequestData]] = defaultdict(deque) # Encoder-related. # Calculate encoder cache size if applicable # NOTE: For now we use the same budget for both compute and space. # This can be changed when we make encoder cache for embedding caching # across requests. encoder_compute_budget, encoder_cache_size = compute_encoder_budget( model_config=vllm_config.model_config, scheduler_config=vllm_config.scheduler_config, mm_registry=mm_registry, ) # NOTE(woosuk): Here, "encoder" includes the vision encoder (and # projector if needed). Currently, we assume that the encoder also # has the Transformer architecture (e.g., ViT). self.max_num_encoder_input_tokens = encoder_compute_budget # NOTE: For the models without encoder (e.g., text-only models), # the encoder cache will not be initialized because cache size is 0 # for these models. self.encoder_cache_manager = EncoderCacheManager( cache_size=encoder_cache_size) self.num_lookahead_tokens = 0 speculative_config = vllm_config.speculative_config if speculative_config and speculative_config.method == "eagle": self.num_lookahead_tokens = \ speculative_config.num_speculative_tokens def schedule(self) -> SchedulerOutput: # NOTE(woosuk) on the scheduling algorithm: # There's no "decoding phase" nor "prefill phase" in the scheduler. # Each request just has the num_computed_tokens and # num_tokens_with_spec. num_tokens_with_spec = # len(prompt_token_ids) + len(output_token_ids) + len(spec_token_ids). # At each step, the scheduler tries to assign tokens to the requests # so that each request's num_computed_tokens can catch up its # num_tokens_with_spec. This is general enough to cover # chunked prefills, prefix caching, speculative decoding, # and the "jump decoding" optimization in the future. scheduled_new_reqs: list[Request] = [] scheduled_resumed_reqs: list[Request] = [] scheduled_running_reqs: list[Request] = [] preempted_reqs: list[Request] = [] # NOTE: structured_output_request_ids maps # a request's (request that uses structured output) # request_id to the running request index. # This will helps us determine to slice the grammar bitmask # and only applies valid mask for requests that # uses structured decoding. structured_output_request_ids: dict[str, int] = {} req_to_new_block_ids: dict[str, list[int]] = {} num_scheduled_tokens: dict[str, int] = {} token_budget = self.max_num_scheduled_tokens # Encoder-related. scheduled_encoder_inputs: dict[str, list[int]] = {} encoder_budget = self.max_num_encoder_input_tokens # Spec decode-related. scheduled_spec_decode_tokens: dict[str, list[int]] = {} # For logging. scheduled_timestamp = time.monotonic() # First, schedule the RUNNING requests. req_index = 0 while req_index < len(self.running) and token_budget > 0: request = self.running[req_index] num_new_tokens = (request.num_tokens_with_spec - request.num_computed_tokens) if (0 < self.scheduler_config.long_prefill_token_threshold < num_new_tokens): num_new_tokens = ( self.scheduler_config.long_prefill_token_threshold) num_new_tokens = min(num_new_tokens, token_budget) # Make sure the input position does not exceed the max model len. # This is necessary when using spec decoding. num_new_tokens = min( num_new_tokens, self.max_model_len - request.num_computed_tokens) # Schedule encoder inputs. encoder_inputs_to_schedule = None new_encoder_budget = encoder_budget if request.has_encoder_inputs: (encoder_inputs_to_schedule, num_new_tokens, new_encoder_budget) = self._try_schedule_encoder_inputs( request, request.num_computed_tokens, num_new_tokens, encoder_budget) if num_new_tokens == 0: # The request cannot be scheduled because one of the following # reasons: # 1. No new tokens to schedule. This may happen when PP>1 and # we have already scheduled all prompt tokens but they are # not finished yet. # 2. The encoder budget is exhausted. # 3. The encoder cache is exhausted. # NOTE(woosuk): Here, by doing `continue` instead of `break`, # we do not strictly follow the FCFS scheduling policy and # allow the lower-priority requests to be scheduled. req_index += 1 continue while True: new_blocks = self.kv_cache_manager.allocate_slots( request, num_new_tokens, num_lookahead_tokens=self.num_lookahead_tokens) if new_blocks is None: # The request cannot be scheduled. # Preempt the lowest-priority request. preempted_req = self.running.pop() self.kv_cache_manager.free(preempted_req) preempted_req.status = RequestStatus.PREEMPTED preempted_req.num_computed_tokens = 0 if self.log_stats: preempted_req.record_event( EngineCoreEventType.PREEMPTED, scheduled_timestamp) self.waiting.appendleft(preempted_req) preempted_reqs.append(preempted_req) if preempted_req == request: # No more request to preempt. can_schedule = False break else: # The request can be scheduled. can_schedule = True break if not can_schedule: break assert new_blocks is not None # Schedule the request. scheduled_running_reqs.append(request) if request.use_structured_output: # PERF: in case of chunked prefill, # request might not include any new tokens. # Therefore, we might introduce some additional # cycle to fill in the bitmask, which could be a big no-op. structured_output_request_ids[request.request_id] = req_index req_to_new_block_ids[request.request_id] = [ b.block_id for b in new_blocks ] num_scheduled_tokens[request.request_id] = num_new_tokens token_budget -= num_new_tokens req_index += 1 # Speculative decode related. if request.spec_token_ids: num_scheduled_spec_tokens = (num_new_tokens + request.num_computed_tokens - request.num_tokens) if num_scheduled_spec_tokens > 0: # Trim spec_token_ids list to num_scheduled_spec_tokens. del request.spec_token_ids[num_scheduled_spec_tokens:] scheduled_spec_decode_tokens[request.request_id] = ( request.spec_token_ids) # Encoder-related. if encoder_inputs_to_schedule: scheduled_encoder_inputs[request.request_id] = ( encoder_inputs_to_schedule) # Allocate the encoder cache. for i in encoder_inputs_to_schedule: self.encoder_cache_manager.allocate(request, i) encoder_budget = new_encoder_budget # Record the LoRAs in scheduled_running_reqs scheduled_loras: set[int] = set() if self.lora_config: scheduled_loras = set( req.lora_request.lora_int_id for req in scheduled_running_reqs if req.lora_request and req.lora_request.lora_int_id > 0) assert len(scheduled_loras) <= self.lora_config.max_loras # Use a temporary deque to collect requests that need to be skipped # and put back at the head of the waiting queue later skipped_waiting_requests: deque[Request] = deque() # Next, schedule the WAITING requests. if not preempted_reqs: while self.waiting and token_budget > 0: if len(self.running) == self.max_num_running_reqs: break request = self.waiting[0] # Skip request if the structured output request is still waiting # for FSM compilation. if request.status == RequestStatus.WAITING_FOR_FSM: structured_output_req = request.structured_output_request if structured_output_req and structured_output_req.grammar: request.status = RequestStatus.WAITING else: self.waiting.popleft() skipped_waiting_requests.appendleft(request) continue # Check that adding the request still respects the max_loras # constraint. if self.lora_config and request.lora_request and ( len(scheduled_loras) == self.lora_config.max_loras and request.lora_request.lora_int_id not in scheduled_loras): # Scheduling would exceed max_loras, skip. self.waiting.popleft() skipped_waiting_requests.appendleft(request) continue # Get already-cached tokens. computed_blocks, num_computed_tokens = \ self.kv_cache_manager.get_computed_blocks(request) # Get externally-cached tokens if using a KVConnector. num_external_tokens = ( 0 if self.connector is None else self.connector.get_num_new_matched_tokens( request, num_computed_tokens)) # Total computed tokens (local + external). num_computed_tokens += num_external_tokens # Number of tokens to be scheduled. # We use `request.num_tokens` instead of # `request.num_prompt_tokens` to consider the resumed requests, # which have output tokens. num_new_tokens = request.num_tokens - num_computed_tokens if (0 < self.scheduler_config.long_prefill_token_threshold < num_new_tokens): num_new_tokens = ( self.scheduler_config.long_prefill_token_threshold) num_new_tokens = min(num_new_tokens, token_budget) assert num_new_tokens > 0 # Schedule encoder inputs. if request.has_encoder_inputs: (encoder_inputs_to_schedule, num_new_tokens, new_encoder_budget) = self._try_schedule_encoder_inputs( request, num_computed_tokens, num_new_tokens, encoder_budget) if num_new_tokens == 0: # The request cannot be scheduled. break else: encoder_inputs_to_schedule = None new_encoder_budget = encoder_budget new_blocks = self.kv_cache_manager.allocate_slots( request, num_new_tokens + num_external_tokens, computed_blocks, num_lookahead_tokens=self.num_lookahead_tokens, ) if new_blocks is None: # The request cannot be scheduled. break # KVConnector: update internal state after allocation. # This information is used to determine if a load is # needed for this request. if self.connector is not None: self.connector.update_state_after_alloc( request, num_external_tokens, ) self.waiting.popleft() if request.use_structured_output: structured_output_request_ids[ request.request_id] = req_index req_index += 1 self.running.append(request) if self.log_stats: request.record_event(EngineCoreEventType.SCHEDULED, scheduled_timestamp) if request.status == RequestStatus.WAITING: scheduled_new_reqs.append(request) elif request.status == RequestStatus.PREEMPTED: scheduled_resumed_reqs.append(request) else: raise RuntimeError( f"Invalid request status: {request.status}") if self.lora_config and request.lora_request: scheduled_loras.add(request.lora_request.lora_int_id) req_to_new_block_ids[request.request_id] = [ b.block_id for b in computed_blocks + new_blocks ] num_scheduled_tokens[request.request_id] = num_new_tokens token_budget -= num_new_tokens request.status = RequestStatus.RUNNING request.num_computed_tokens = num_computed_tokens # Encoder-related. if encoder_inputs_to_schedule: scheduled_encoder_inputs[request.request_id] = ( encoder_inputs_to_schedule) # Allocate the encoder cache. for i in encoder_inputs_to_schedule: self.encoder_cache_manager.allocate(request, i) encoder_budget = new_encoder_budget # Put back any skipped requests at the head of the waiting queue if skipped_waiting_requests: self.waiting.extendleft(skipped_waiting_requests) # Check if the scheduling constraints are satisfied. total_num_scheduled_tokens = sum(num_scheduled_tokens.values()) assert total_num_scheduled_tokens <= self.max_num_scheduled_tokens assert token_budget >= 0 assert len(self.running) <= self.max_num_running_reqs # Since some requests in the RUNNING queue may not be scheduled in # this step, the total number of scheduled requests can be smaller than # len(self.running). assert (len(scheduled_new_reqs) + len(scheduled_resumed_reqs) + len(scheduled_running_reqs) <= len(self.running)) # Get the longest common prefix among all requests in the running queue. # This can be potentially used for cascade attention. num_common_prefix_blocks = 0 if self.running: any_request = self.running[0] num_common_prefix_blocks = ( self.kv_cache_manager.get_num_common_prefix_blocks( any_request, len(self.running))) grammar_bitmask = self.structured_output_manager.grammar_bitmask( self.requests, structured_output_request_ids, len(self.running), ) # Construct the scheduler output. new_reqs_data = [ NewRequestData.from_request(req, req_to_new_block_ids[req.request_id]) for req in scheduled_new_reqs ] resumed_reqs_data = [ self._make_cached_request_data( req, num_scheduled_tokens[req.request_id], len(scheduled_spec_decode_tokens.get(req.request_id, ())), req_to_new_block_ids[req.request_id], resumed_from_preemption=True, ) for req in scheduled_resumed_reqs ] running_reqs_data = [ self._make_cached_request_data( req, num_scheduled_tokens[req.request_id], len(scheduled_spec_decode_tokens.get(req.request_id, ())), req_to_new_block_ids[req.request_id], resumed_from_preemption=False, ) for req in scheduled_running_reqs ] scheduler_output = SchedulerOutput( scheduled_new_reqs=new_reqs_data, scheduled_cached_reqs=resumed_reqs_data + running_reqs_data, num_scheduled_tokens=num_scheduled_tokens, total_num_scheduled_tokens=total_num_scheduled_tokens, scheduled_spec_decode_tokens=scheduled_spec_decode_tokens, scheduled_encoder_inputs=scheduled_encoder_inputs, num_common_prefix_blocks=num_common_prefix_blocks, # finished_req_ids is an existing state in the scheduler, # instead of being newly scheduled in this step. # It contains the request IDs that are finished in between # the previous and the current steps. finished_req_ids=self.finished_req_ids, free_encoder_input_ids=self.encoder_cache_manager.get_freed_ids(), structured_output_request_ids=structured_output_request_ids, grammar_bitmask=grammar_bitmask, ) # NOTE(Kuntai): this function is designed for multiple purposes: # 1. Plan the KV cache store # 2. Wrap up all the KV cache load / save ops into an opaque object # 3. Clear the internal states of the connector if self.connector is not None: meta = self.connector.build_connector_meta(scheduler_output) scheduler_output.kv_connector_metadata = meta # Advance the number of computed tokens for the request AFTER # the request is scheduled. # 1. The scheduler_output of the current step has to include the # original number of scheduled tokens to determine input IDs. # 2. Advance the number of computed tokens here allowing us to # schedule the prefill request again immediately in the next # scheduling step. # 3. If some tokens (e.g. spec tokens) are rejected later, the number of # computed tokens will be adjusted in update_from_output. for req_id, num_scheduled_token in num_scheduled_tokens.items(): self.requests[req_id].num_computed_tokens += num_scheduled_token self.finished_req_ids = set() return scheduler_output def _make_cached_request_data( self, request: Request, num_scheduled_tokens: int, num_scheduled_spec_tokens: int, new_block_ids: list[int], resumed_from_preemption: bool, ) -> CachedRequestData: # OPTIMIZATION: Cache the CachedRequestData objects to avoid creating # them at each scheduling step. num_computed_tokens = request.num_computed_tokens num_regular_tokens = num_scheduled_tokens - num_scheduled_spec_tokens new_token_ids = request.all_token_ids[ num_computed_tokens:num_computed_tokens + num_regular_tokens] req_data_queue = self._cached_reqs_data.get(request.request_id) if req_data_queue: req_data = req_data_queue.popleft() req_data.resumed_from_preemption = resumed_from_preemption req_data.new_token_ids = new_token_ids req_data.new_block_ids = new_block_ids req_data.num_computed_tokens = num_computed_tokens else: # No cached request data, or all cached request data has been # used by the scheduled requests. req_data = CachedRequestData.from_request(request, resumed_from_preemption, new_token_ids, new_block_ids) return req_data def _try_schedule_encoder_inputs( self, request: Request, num_computed_tokens: int, num_new_tokens: int, encoder_budget: int, ) -> tuple[list[int], int, int]: """ Determine which encoder inputs need to be scheduled in the current step, and update `num_new_tokens` and encoder token budget accordingly. An encoder input will be scheduled if: - Its output tokens overlap with the range of tokens being computed in this step, i.e., [num_computed_tokens, num_computed_tokens + num_new_tokens). - It is not already computed and stored in the encoder cache. - There is sufficient encoder token budget to process it. - The encoder cache has space to store it. If an encoder input cannot be scheduled due to cache or budget limitations, the method adjusts `num_new_tokens` to schedule only the decoder tokens up to just before the unschedulable encoder input. Note that num_computed_tokens includes both locally cached blocks and externally cached blocks (via KVConnector). """ if num_new_tokens == 0 or not request.has_encoder_inputs: return [], num_new_tokens, encoder_budget encoder_inputs_to_schedule: list[int] = [] mm_positions = request.mm_positions assert mm_positions is not None assert len(mm_positions) > 0 for i, pos_info in enumerate(mm_positions): start_pos = pos_info.offset num_encoder_tokens = pos_info.length # The encoder output is needed if the two ranges overlap: # [num_computed_tokens, num_computed_tokens + num_new_tokens) and # [start_pos, start_pos + num_encoder_tokens) if start_pos >= num_computed_tokens + num_new_tokens: # The encoder input is not needed in this step. break if start_pos + num_encoder_tokens <= num_computed_tokens: # The encoder input is already computed and stored # in the decoder's KV cache. continue if self.encoder_cache_manager.has_cache(request, i): # The encoder input is already computed and cached. continue # If no encoder input chunking is allowed, we do not want to # partially schedule a multimodal item. If the scheduled range would # only cover part of the mm input, roll back to before the mm item. if (self.scheduler_config.disable_chunked_mm_input and num_computed_tokens < start_pos and (num_computed_tokens + num_new_tokens) < (start_pos + num_encoder_tokens)): num_new_tokens = start_pos - num_computed_tokens break if (not self.encoder_cache_manager.can_allocate(request, i) or num_encoder_tokens > encoder_budget): # The encoder cache is full or the encoder budget is exhausted. # NOTE(woosuk): We assume that the encoder input tokens should # be processed altogether, as the encoder usually uses # bidirectional attention. if num_computed_tokens < start_pos: # We only schedule the decoder tokens just before the # encoder input. num_new_tokens = start_pos - num_computed_tokens else: # Because of prefix caching, num_computed_tokens is greater # than start_pos even though its encoder input is not # available. In this case, we can't schedule any token for # the request in this step. num_new_tokens = 0 break encoder_budget -= num_encoder_tokens encoder_inputs_to_schedule.append(i) return encoder_inputs_to_schedule, num_new_tokens, encoder_budget def update_from_output( self, scheduler_output: SchedulerOutput, model_runner_output: ModelRunnerOutput, ) -> EngineCoreOutputs: sampled_token_ids = model_runner_output.sampled_token_ids spec_token_ids = model_runner_output.spec_token_ids logprobs = model_runner_output.logprobs prompt_logprobs_dict = model_runner_output.prompt_logprobs_dict num_scheduled_tokens = scheduler_output.num_scheduled_tokens new_running: list[Request] = [] outputs: list[EngineCoreOutput] = [] spec_decoding_stats: Optional[SpecDecodingStats] = None # NOTE(woosuk): As len(self.running) can be up to 1K or more, the below # loop can be a performance bottleneck. We should do our best to avoid # expensive operations inside the loop. for request in self.running: req_id = request.request_id num_tokens_scheduled = num_scheduled_tokens.get(req_id, 0) if num_tokens_scheduled == 0: # The request was not scheduled in this step. new_running.append(request) continue req_index = model_runner_output.req_id_to_index[req_id] generated_token_ids = sampled_token_ids[req_index] scheduled_spec_token_ids = ( scheduler_output.scheduled_spec_decode_tokens.get(req_id)) if scheduled_spec_token_ids: # num_computed_tokens represents the number of tokens # processed in the current step, considering scheduled # tokens and rejections. If some tokens are rejected, # num_computed_tokens is decreased by the number of rejected # tokens, where is given by: # len(scheduled_spec_token_ids) + 1 - len(generated_token_ids). num_tokens_rejected = (len(scheduled_spec_token_ids) + 1 - len(generated_token_ids)) request.num_computed_tokens -= num_tokens_rejected spec_decoding_stats = self.make_spec_decoding_stats( spec_decoding_stats, num_draft_tokens=len(scheduled_spec_token_ids), num_accepted_tokens=len(generated_token_ids) - 1) cached_encoder_input_ids = ( self.encoder_cache_manager.get_cached_input_ids(request)) # OPTIMIZATION: Avoid list(set) if the set is empty. if cached_encoder_input_ids: for input_id in list(cached_encoder_input_ids): mm_positions = request.mm_positions[input_id] start_pos = mm_positions.offset num_tokens = mm_positions.length if start_pos + num_tokens <= request.num_computed_tokens: # The encoder output is already processed and stored # in the decoder's KV cache. self.encoder_cache_manager.free_encoder_input( request, input_id) # Add newly generated spec token ids to the request. if spec_token_ids is not None: request.spec_token_ids = spec_token_ids[req_index] stopped = False new_logprobs = None new_token_ids = generated_token_ids # Append generated tokens and check for stop. Note that if # a request is still being prefilled, we expect the model runner # to return empty token ids for the request. for num_new, output_token_id in enumerate(new_token_ids, 1): request.append_output_token_ids(output_token_id) # Check for stop and update request state. # This must be called before we make the EngineCoreOutput. stopped = check_stop(request, self.max_model_len) if stopped: self._free_request(request) del new_token_ids[num_new:] # Trim new tokens if needed. break # Extract sample logprobs if needed. if request.sampling_params.logprobs is not None and logprobs: # NOTE: once we support N tokens per step (spec decode), # the outer lists can be of length > 1. new_logprobs = logprobs.slice(req_index, req_index + 1) if new_token_ids and request.use_structured_output: # NOTE: structured_output_request # should not be None if use_structured_output, we have # check above, so safe to ignore type warning request.structured_output_request.grammar.accept_tokens( # type: ignore[union-attr] req_id, new_token_ids) # Get prompt logprobs for this request. prompt_logprobs_tensors = prompt_logprobs_dict.get(req_id) if new_token_ids: # Add EngineCoreOutput for this Request. outputs.append( EngineCoreOutput( request_id=req_id, new_token_ids=new_token_ids, finish_reason=request.get_finished_reason(), new_logprobs=new_logprobs, new_prompt_logprobs_tensors=prompt_logprobs_tensors, stop_reason=request.stop_reason, events=request.take_events())) else: # Invariant: EngineCore returns no partial prefill outputs. assert not prompt_logprobs_tensors if not stopped: new_running.append(request) # Return the cached request data to the queue so they can be reused. for req_data in scheduler_output.scheduled_cached_reqs: self._cached_reqs_data[req_data.req_id].append(req_data) self.running = new_running engine_core_outputs = EngineCoreOutputs( outputs=outputs, scheduler_stats=self.make_stats(spec_decoding_stats), ) if self.include_finished_set: #TODO currently sending duplicates here, improve this engine_core_outputs.finished_requests = ( scheduler_output.finished_req_ids | self.finished_req_ids) return engine_core_outputs def add_request(self, request: Request) -> None: self.waiting.append(request) self.requests[request.request_id] = request if self.log_stats: request.record_event(EngineCoreEventType.QUEUED) def finish_requests( self, request_ids: Union[str, Iterable[str]], finished_status: RequestStatus, ) -> None: """Handles the finish signal from outside the scheduler. For example, the API server can abort a request when the client disconnects. """ assert RequestStatus.is_finished(finished_status) if isinstance(request_ids, str): request_ids = (request_ids, ) else: request_ids = set(request_ids) for req_id in request_ids: request = self.requests.get(req_id) if request is None: # Invalid request ID. continue if request.status == RequestStatus.RUNNING: self.running.remove(request) else: self.waiting.remove(request) request.status = finished_status self._free_request(request) def _free_request(self, request: Request) -> None: assert request.is_finished() self.kv_cache_manager.free(request) self.kv_cache_manager.free_block_hashes(request) self.encoder_cache_manager.free(request) self._cached_reqs_data.pop(request.request_id, None) del self.requests[request.request_id] self.finished_req_ids.add(request.request_id) def get_num_unfinished_requests(self) -> int: return len(self.waiting) + len(self.running) def has_finished_requests(self) -> bool: return len(self.finished_req_ids) > 0 def reset_prefix_cache(self) -> bool: return self.kv_cache_manager.reset_prefix_cache() def make_stats( self, spec_decoding_stats: Optional[SpecDecodingStats] = None, ) -> Optional[SchedulerStats]: if not self.log_stats: return None prefix_cache_stats = self.kv_cache_manager.make_prefix_cache_stats() assert prefix_cache_stats is not None return SchedulerStats( num_running_reqs=len(self.running), num_waiting_reqs=len(self.waiting), gpu_cache_usage=self.kv_cache_manager.usage, prefix_cache_stats=prefix_cache_stats, spec_decoding_stats=spec_decoding_stats, ) def make_spec_decoding_stats( self, spec_decoding_stats: Optional[SpecDecodingStats], num_draft_tokens: int, num_accepted_tokens: int, ) -> Optional[SpecDecodingStats]: if not self.log_stats: return None if spec_decoding_stats is None: spec_decoding_stats = SpecDecodingStats() spec_decoding_stats.observe(num_draft_tokens=num_draft_tokens, num_accepted_tokens=num_accepted_tokens) return spec_decoding_stats