From 26a465584ae14a04a1f6e9b36621d70e9d907c10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicol=C3=B2=20Lucchesi?= Date: Mon, 24 Nov 2025 18:18:04 +0100 Subject: [PATCH 01/43] [NIXL] Use config to enable telemetry + NIXL version bump (#29305) Signed-off-by: NickLucche --- requirements/kv_connectors.txt | 2 +- .../kv_transfer/kv_connector/v1/nixl_connector.py | 9 +++------ 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/requirements/kv_connectors.txt b/requirements/kv_connectors.txt index b1f3269cd3813..083230c171096 100644 --- a/requirements/kv_connectors.txt +++ b/requirements/kv_connectors.txt @@ -1,2 +1,2 @@ lmcache -nixl >= 0.6.0 # Required for disaggregated prefill +nixl >= 0.7.1 # Required for disaggregated prefill diff --git a/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py b/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py index 7c0911240493c..493938d4aad92 100644 --- a/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +++ b/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py @@ -4,7 +4,6 @@ import contextlib import copy import logging import math -import os import queue import threading import time @@ -810,9 +809,6 @@ class NixlConnectorWorker: self.nixl_backends = vllm_config.kv_transfer_config.get_from_extra_config( "backends", ["UCX"] ) - # TODO temporary, once nixl allows for telemetry flag in config - # (next release), we can remove this env var. - os.environ["NIXL_TELEMETRY_ENABLE"] = "1" # Agent. non_ucx_backends = [b for b in self.nixl_backends if b != "UCX"] @@ -828,10 +824,11 @@ class NixlConnectorWorker: if nixl_agent_config is None: config = None else: + # Enable telemetry by default for NIXL 0.7.1 and above. config = ( - nixl_agent_config(backends=self.nixl_backends) + nixl_agent_config(backends=self.nixl_backends, capture_telemetry=True) if len(non_ucx_backends) > 0 - else nixl_agent_config(num_threads=num_threads) + else nixl_agent_config(num_threads=num_threads, capture_telemetry=True) ) self.nixl_wrapper = NixlWrapper(str(uuid.uuid4()), config) From cc313cb73d75cd5ac2715fc45bfadb89888cf8cd Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Mon, 24 Nov 2025 09:32:27 -0800 Subject: [PATCH 02/43] [Model Runner V2] Implement Single-step Eagle 1 (#29300) Signed-off-by: Woosuk Kwon --- vllm/v1/worker/gpu/input_batch.py | 3 + vllm/v1/worker/gpu/model_runner.py | 79 +++++++++ vllm/v1/worker/gpu/sampler.py | 5 +- vllm/v1/worker/gpu/spec_decode/__init__.py | 18 ++ vllm/v1/worker/gpu/spec_decode/eagle.py | 197 +++++++++++++++++++++ 5 files changed, 300 insertions(+), 2 deletions(-) create mode 100644 vllm/v1/worker/gpu/spec_decode/eagle.py diff --git a/vllm/v1/worker/gpu/input_batch.py b/vllm/v1/worker/gpu/input_batch.py index 7675cb45170b5..1177d25e300cf 100644 --- a/vllm/v1/worker/gpu/input_batch.py +++ b/vllm/v1/worker/gpu/input_batch.py @@ -37,6 +37,9 @@ class InputBuffers: self.seq_lens = torch.zeros(max_num_reqs, dtype=torch.int32, device=device) self.cu_num_logits = self._make_buffer(max_num_reqs + 1, dtype=torch.int32) + # Spec decoding. + self.next_prefill_tokens = self._make_buffer(max_num_reqs, dtype=torch.int32) + # Structured outputs. self.bitmask_indices = self._make_buffer(max_num_reqs, dtype=torch.int32) self.grammar_bitmask = self._make_buffer( diff --git a/vllm/v1/worker/gpu/model_runner.py b/vllm/v1/worker/gpu/model_runner.py index 6e332ee4b75b8..205298a415d43 100644 --- a/vllm/v1/worker/gpu/model_runner.py +++ b/vllm/v1/worker/gpu/model_runner.py @@ -45,6 +45,7 @@ from vllm.v1.worker.gpu.input_batch import ( prepare_prefill_inputs, ) from vllm.v1.worker.gpu.sampler import Sampler, compute_prompt_logprobs +from vllm.v1.worker.gpu.spec_decode import init_speculator from vllm.v1.worker.gpu.spec_decode.rejection_sample import rejection_sample from vllm.v1.worker.gpu.states import RequestState, SamplingMetadata from vllm.v1.worker.gpu.structured_outputs import apply_grammar_bitmask @@ -97,16 +98,20 @@ class GPUModelRunner(LoRAModelRunnerMixin, KVConnectorModelRunnerMixin): if self.use_async_scheduling: self.input_prep_event = torch.cuda.Event() self.structured_outputs_event = torch.cuda.Event() + self.spec_decode_event = torch.cuda.Event() else: self.input_prep_event = None self.structured_outputs_event = None + self.spec_decode_event = None if self.speculative_config is not None: self.do_spec_decode = True self.num_speculative_steps = self.speculative_config.num_speculative_tokens + self.speculator = init_speculator(self.vllm_config, self.device) else: self.do_spec_decode = False self.num_speculative_steps = 0 + self.speculator = None self.req_states = RequestState( max_num_reqs=self.max_num_reqs, @@ -153,6 +158,8 @@ class GPUModelRunner(LoRAModelRunnerMixin, KVConnectorModelRunnerMixin): self.vllm_config, self.device, ) + if self.do_spec_decode: + self.speculator.load_model(self.model) time_after_load = time.perf_counter() self.model_memory_usage = m.consumed_memory @@ -285,6 +292,33 @@ class GPUModelRunner(LoRAModelRunnerMixin, KVConnectorModelRunnerMixin): logits = self.model.compute_logits(hidden_states) self.sampler(logits, sampling_metadata) + @torch.inference_mode() + def _dummy_speculator_run( + self, + hidden_states: torch.Tensor, + aux_hidden_states: list[torch.Tensor] | None, + ) -> None: + num_tokens = hidden_states.shape[0] + num_reqs = min(num_tokens, self.max_num_reqs) + input_batch = InputBatch.make_dummy( + num_reqs=num_reqs, + num_tokens=num_tokens, + input_buffers=self.input_buffers, + device=self.device, + ) + sampling_metadata = SamplingMetadata.make_dummy( + num_reqs=num_reqs, + device=self.device, + ) + num_sampled = torch.ones(num_reqs, dtype=torch.int32, device=self.device) + self.propose_draft( + input_batch=input_batch, + sampling_metadata=sampling_metadata, + last_hidden_states=hidden_states, + aux_hidden_states=aux_hidden_states, + num_sampled=num_sampled, + ) + @torch.inference_mode() def profile_run(self) -> None: hidden_states, sample_hidden_states = self._dummy_run( @@ -292,6 +326,8 @@ class GPUModelRunner(LoRAModelRunnerMixin, KVConnectorModelRunnerMixin): skip_attn=True, ) self._dummy_sampler_run(sample_hidden_states) + if self.do_spec_decode: + self._dummy_speculator_run(hidden_states, None) torch.cuda.synchronize() del hidden_states, sample_hidden_states gc.collect() @@ -727,6 +763,41 @@ class GPUModelRunner(LoRAModelRunnerMixin, KVConnectorModelRunnerMixin): self.req_states.prefill_len.np[idx_mapping_np], ) + @torch.inference_mode() + def propose_draft( + self, + input_batch: InputBatch, + sampling_metadata: SamplingMetadata, + last_hidden_states: torch.Tensor, + aux_hidden_states: list[torch.Tensor] | None, + num_sampled: torch.Tensor, + ) -> torch.Tensor: + num_reqs = input_batch.num_reqs + idx_mapping_np = input_batch.idx_mapping_np + with async_barrier(self.spec_decode_event): + self.input_buffers.next_prefill_tokens.np[:num_reqs] = ( + self.req_states.prefill_token_ids[ + idx_mapping_np, + self.req_states.num_computed_prefill_tokens[idx_mapping_np], + ] + ) + next_prefill_tokens = self.input_buffers.next_prefill_tokens.copy_to_gpu( + num_reqs + ) + + assert self.speculator is not None + draft_tokens = self.speculator.propose( + input_batch, + sampling_metadata, + last_hidden_states, + aux_hidden_states, + num_sampled, + self.req_states.last_sampled_tokens, + next_prefill_tokens, + ) + self.req_states.draft_tokens[input_batch.idx_mapping] = draft_tokens + return draft_tokens + def get_cudagraph_and_dp_padding( self, scheduler_output: SchedulerOutput, @@ -913,6 +984,14 @@ class GPUModelRunner(LoRAModelRunnerMixin, KVConnectorModelRunnerMixin): self.postprocess( input_batch, sampler_output.sampled_token_ids, num_sampled_tokens ) + if self.do_spec_decode: + _ = self.propose_draft( + input_batch, + sampling_metadata, + hidden_states, + None, # aux_hidden_states + num_sampled_tokens, + ) if self.use_async_scheduling: return async_output diff --git a/vllm/v1/worker/gpu/sampler.py b/vllm/v1/worker/gpu/sampler.py index c48ed2d8ca167..d8676079ab951 100644 --- a/vllm/v1/worker/gpu/sampler.py +++ b/vllm/v1/worker/gpu/sampler.py @@ -100,8 +100,9 @@ def _gumbel_sample_kernel( mask=mask, other=float("-inf"), ) + logits = logits.to(tl.float32) - temp = tl.load(temp_ptr + req_idx) + temp = tl.load(temp_ptr + req_idx).to(tl.float32) if temp != 0.0: # Calculate the seed for gumbel noise. seed = tl.load(seeds_ptr + req_idx) @@ -116,7 +117,7 @@ def _gumbel_sample_kernel( # Apply temperature. if APPLY_TEMPERATURE: # NOTE(woosuk): Use div_rn to match the behavior of torch. - logits = tl.div_rn(logits, temp.to(tl.float32)) + logits = tl.div_rn(logits, temp) # Apply gumbel noise. logits = tl.where(mask, logits + gumbel_noise, float("-inf")) diff --git a/vllm/v1/worker/gpu/spec_decode/__init__.py b/vllm/v1/worker/gpu/spec_decode/__init__.py index e69de29bb2d1d..15b85204e05ce 100644 --- a/vllm/v1/worker/gpu/spec_decode/__init__.py +++ b/vllm/v1/worker/gpu/spec_decode/__init__.py @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +import torch + +from vllm.config import VllmConfig + + +def init_speculator( + vllm_config: VllmConfig, + device: torch.device, +): + speculative_config = vllm_config.speculative_config + assert speculative_config is not None + if speculative_config.use_eagle(): + from vllm.v1.worker.gpu.spec_decode.eagle import EagleSpeculator + + return EagleSpeculator(vllm_config, device) + raise NotImplementedError(f"{speculative_config.method} is not supported yet.") diff --git a/vllm/v1/worker/gpu/spec_decode/eagle.py b/vllm/v1/worker/gpu/spec_decode/eagle.py new file mode 100644 index 0000000000000..0f11903e14540 --- /dev/null +++ b/vllm/v1/worker/gpu/spec_decode/eagle.py @@ -0,0 +1,197 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +import torch +import torch.nn as nn + +from vllm.config import VllmConfig +from vllm.config.compilation import CUDAGraphMode +from vllm.forward_context import set_forward_context +from vllm.model_executor.model_loader import get_model +from vllm.triton_utils import tl, triton +from vllm.v1.worker.gpu.input_batch import InputBatch +from vllm.v1.worker.gpu.sampler import gumbel_sample +from vllm.v1.worker.gpu.states import SamplingMetadata + + +class EagleSpeculator: + def __init__(self, vllm_config: VllmConfig, device: torch.device): + self.vllm_config = vllm_config + self.device = device + + self.speculative_config = vllm_config.speculative_config + assert self.speculative_config is not None + self.method = self.speculative_config.method + self.num_speculative_steps = self.speculative_config.num_speculative_tokens + self.draft_model_config = self.speculative_config.draft_model_config + + self.scheduler_config = vllm_config.scheduler_config + self.max_num_reqs = self.scheduler_config.max_num_seqs + self.max_num_tokens = self.scheduler_config.max_num_batched_tokens + + self.input_ids = torch.zeros( + self.max_num_tokens, dtype=torch.int32, device=device + ) + self.positions = torch.zeros( + self.max_num_tokens, dtype=torch.int64, device=device + ) + + def load_model(self, target_model: nn.Module) -> None: + from vllm.compilation.backends import set_model_tag + + with set_model_tag("eagle_head"): + self.model = get_model( + vllm_config=self.vllm_config, model_config=self.draft_model_config + ) + + share_lm_head = True + if share_lm_head and hasattr(target_model, "lm_head"): + if hasattr(self.model, "lm_head"): + del self.model.lm_head + self.model.lm_head = target_model.lm_head + + @torch.inference_mode() + def propose( + self, + input_batch: InputBatch, + sampling_metadata: SamplingMetadata, + # [num_tokens, hidden_size] + last_hidden_states: torch.Tensor, + # num_layers x [num_tokens, hidden_size] + aux_hidden_states: list[torch.Tensor] | None, + # [num_reqs] + num_sampled: torch.Tensor, + # [max_num_reqs, 1] + last_sampled: torch.Tensor, + # [num_reqs] + next_prefill_tokens: torch.Tensor, + ) -> torch.Tensor: + if aux_hidden_states: + assert self.method == "eagle3" + hidden_states = self.model.combine_hidden_states( + torch.cat(aux_hidden_states, dim=-1) + ) + else: + hidden_states = last_hidden_states + + # Get the input ids and last token indices for the speculator. + last_token_indices = prepare_eagle_inputs( + self.input_ids, + input_batch, + num_sampled, + last_sampled, + next_prefill_tokens, + ) + input_ids = self.input_ids[: input_batch.num_tokens_after_padding] + + # Prefill: Run the eagle speculator with eager mode. + with set_forward_context( + input_batch.attn_metadata, + self.vllm_config, + num_tokens=input_batch.num_tokens_after_padding, + cudagraph_runtime_mode=CUDAGraphMode.NONE, + ): + ret_hidden_states = self.model( + input_ids=input_ids, + positions=input_batch.positions, + hidden_states=hidden_states, + ) + if self.method == "mtp": + last_hidden_states = ret_hidden_states + hidden_states = ret_hidden_states + else: + last_hidden_states, hidden_states = ret_hidden_states + sample_hidden_states = last_hidden_states[last_token_indices] + logits = self.model.compute_logits(sample_hidden_states) + + num_reqs = input_batch.num_reqs + cu_num_logits = input_batch.cu_num_logits[:num_reqs] + temperature = sampling_metadata.temperature[cu_num_logits] + seed = sampling_metadata.seeds[cu_num_logits] + # NOTE(woosuk): We must add 1 to the positions to match the Gumbel noise + # used for draft and target sampling. + pos = input_batch.positions[last_token_indices] + 1 + draft_tokens = gumbel_sample( + logits, temperature, seed, pos, apply_temperature=True + ) + if self.num_speculative_steps == 1: + # Early exit. + return draft_tokens.view(-1, 1) + raise NotImplementedError("num_speculative_steps > 1 is not supported yet.") + + +@triton.jit +def _prepare_eagle_inputs_kernel( + last_token_indices_ptr, + eagle_input_ids_ptr, + target_input_ids_ptr, + idx_mapping_ptr, + last_sampled_ptr, + next_prefill_tokens_ptr, + num_sampled_ptr, + query_start_loc_ptr, + cu_num_logits_ptr, + BLOCK_SIZE: tl.constexpr, +): + batch_idx = tl.program_id(0) + query_start = tl.load(query_start_loc_ptr + batch_idx) + query_end = tl.load(query_start_loc_ptr + batch_idx + 1) + query_len = query_end - query_start + + # Get the true query length and next token after accounting for rejected tokens. + num_sampled = tl.load(num_sampled_ptr + batch_idx) + if num_sampled > 0: + req_state_idx = tl.load(idx_mapping_ptr + batch_idx) + next_token = tl.load(last_sampled_ptr + req_state_idx).to(tl.int32) + + logits_start = tl.load(cu_num_logits_ptr + batch_idx) + logits_end = tl.load(cu_num_logits_ptr + batch_idx + 1) + num_logits = logits_end - logits_start + + num_rejected = num_logits - num_sampled + query_len -= num_rejected + else: + # Chunked prefilling. + # Get the next prefill token. + next_token = tl.load(next_prefill_tokens_ptr + batch_idx) + + # Shift target_input_ids by one. + for i in range(1, query_len, BLOCK_SIZE): + block = i + tl.arange(0, BLOCK_SIZE) + mask = block < query_len + input_ids = tl.load(target_input_ids_ptr + query_start + block, mask=mask) + tl.store(eagle_input_ids_ptr + query_start + block - 1, input_ids, mask=mask) + + last_token_index = query_start + query_len - 1 + tl.store(last_token_indices_ptr + batch_idx, last_token_index) + tl.store(eagle_input_ids_ptr + last_token_index, next_token) + + +def prepare_eagle_inputs( + eagle_input_ids: torch.Tensor, + input_batch: InputBatch, + # [num_reqs] + num_sampled: torch.Tensor, + # [max_num_reqs, 1] + last_sampled: torch.Tensor, + # [max_num_reqs] + next_prefill_tokens: torch.Tensor, +) -> torch.Tensor: + num_reqs = input_batch.num_reqs + last_token_indices = torch.empty( + num_reqs, + dtype=torch.int64, + device=eagle_input_ids.device, + ) + _prepare_eagle_inputs_kernel[(num_reqs,)]( + last_token_indices, + eagle_input_ids, + input_batch.input_ids, + input_batch.idx_mapping, + last_sampled, + next_prefill_tokens, + num_sampled, + input_batch.query_start_loc, + input_batch.cu_num_logits, + BLOCK_SIZE=1024, + ) + return last_token_indices From cec418b5df3bf032a83b6a6795e8026d39e199bd Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Mon, 24 Nov 2025 09:34:37 -0800 Subject: [PATCH 03/43] [Model Runner V2] Change Numba AoT to JIT (#29328) Signed-off-by: Woosuk Kwon --- vllm/v1/worker/gpu/input_batch.py | 71 +++++++----------------------- vllm/v1/worker/gpu/model_runner.py | 24 ++++++---- 2 files changed, 32 insertions(+), 63 deletions(-) diff --git a/vllm/v1/worker/gpu/input_batch.py b/vllm/v1/worker/gpu/input_batch.py index 1177d25e300cf..3ac43ea4952de 100644 --- a/vllm/v1/worker/gpu/input_batch.py +++ b/vllm/v1/worker/gpu/input_batch.py @@ -4,7 +4,6 @@ from dataclasses import dataclass from typing import Any import numba -import numba.types as types import numpy as np import torch @@ -147,80 +146,42 @@ class InputBatch: ) -# NOTE: With the type annotations, this function is pre-compiled -# before the first call. -@numba.jit( - [ - types.none( - types.int32[:], # idx_mapping - types.int32[:], # num_scheduled_tokens - types.int32[:, :], # prefill_token_ids - types.int32[:], # num_computed_prefill_tokens - types.int32[:], # prefill_len - types.int32[:], # input_ids - types.int32[:], # query_start_loc - ) - ], - nopython=True, - cache=True, -) +@numba.njit(cache=True) def _prepare_prefill_inputs( - idx_mapping: np.ndarray, # batch_idx -> req_idx - num_scheduled_tokens: np.ndarray, # [B] + idx_mapping: np.ndarray, # [B] + query_lens: np.ndarray, # [B] + query_start_loc: np.ndarray, # [B + 1] prefill_token_ids: np.ndarray, # [N, max_model_len] num_computed_prefill_tokens: np.ndarray, # [N] - prefill_len: np.ndarray, # [N] input_ids: np.ndarray, # [num_input_tokens] - query_start_loc: np.ndarray, # [B + 1] ) -> None: - num_reqs = num_scheduled_tokens.shape[0] - query_start_loc[0] = 0 - - cu_num_tokens = 0 + num_reqs = idx_mapping.shape[0] + query_starts = query_start_loc[:num_reqs] + query_ends = query_start_loc[1 : num_reqs + 1] + starts = num_computed_prefill_tokens[idx_mapping] + ends = starts + query_lens for i in range(num_reqs): - req_idx = idx_mapping[i] - query_len = num_scheduled_tokens[i] - - start = num_computed_prefill_tokens[req_idx] - end = min(start + query_len, prefill_len[req_idx]) - n = end - start - - start_idx = cu_num_tokens - input_ids[start_idx : start_idx + n] = prefill_token_ids[req_idx, start:end] - - cu_num_tokens = start_idx + query_len - query_start_loc[i + 1] = cu_num_tokens - - # Pad the inputs for CUDA graphs. - # Note: pad query_start_loc to be non-decreasing, as kernels - # like FlashAttention requires that - query_start_loc[num_reqs + 1 :].fill(cu_num_tokens) + input_ids[query_starts[i] : query_ends[i]] = prefill_token_ids[ + idx_mapping[i], starts[i] : ends[i] + ] def prepare_prefill_inputs( idx_mapping: np.ndarray, num_scheduled_tokens: np.ndarray, - total_num_tokens: int, + query_start_loc: np.ndarray, prefill_token_ids: np.ndarray, num_computed_prefill_tokens: np.ndarray, - prefill_len: np.ndarray, - input_ids: CpuGpuBuffer, - query_start_loc: CpuGpuBuffer, + input_ids: np.ndarray, ) -> None: _prepare_prefill_inputs( idx_mapping, num_scheduled_tokens, + query_start_loc, prefill_token_ids, num_computed_prefill_tokens, - prefill_len, - input_ids.np, - query_start_loc.np, + input_ids, ) - input_ids.copy_to_gpu(total_num_tokens) - # NOTE(woosuk): We should copy the whole query_start_loc and seq_lens - # tensors from CPU to GPU, because they may include paddings needed - # for full CUDA graph mode. - query_start_loc.copy_to_gpu() @triton.jit diff --git a/vllm/v1/worker/gpu/model_runner.py b/vllm/v1/worker/gpu/model_runner.py index 205298a415d43..e0ed183d3c5b0 100644 --- a/vllm/v1/worker/gpu/model_runner.py +++ b/vllm/v1/worker/gpu/model_runner.py @@ -502,20 +502,28 @@ class GPUModelRunner(LoRAModelRunnerMixin, KVConnectorModelRunnerMixin): # Block tables: num_kv_cache_groups x [num_reqs, max_num_blocks] block_tables = self.block_tables.gather_block_tables(idx_mapping) - # Copy prefill tokens from CPU to GPU and get query_start_loc. + # Get query_start_loc. + np.cumsum( + num_scheduled_tokens, + out=self.input_buffers.query_start_loc.np[1 : num_reqs + 1], + ) + # Pad for full CUDA graph mode. + # Some attention backends like FA3 require query_start_loc to be non-decreasing. + self.input_buffers.query_start_loc.np[num_reqs + 1 :] = num_tokens + self.input_buffers.query_start_loc.copy_to_gpu() + query_start_loc_gpu = self.input_buffers.query_start_loc.gpu[: num_reqs + 1] + query_start_loc_np = self.input_buffers.query_start_loc.np[: num_reqs + 1] + + # Copy prefill tokens from CPU to GPU. prepare_prefill_inputs( idx_mapping_np, num_scheduled_tokens, - num_tokens, + query_start_loc_np, self.req_states.prefill_token_ids, self.req_states.num_computed_prefill_tokens, - self.req_states.prefill_len.np, - self.input_buffers.input_ids, - self.input_buffers.query_start_loc, + self.input_buffers.input_ids.np, ) - query_start_loc = self.input_buffers.query_start_loc - query_start_loc_gpu = query_start_loc.gpu[: num_reqs + 1] - query_start_loc_np = query_start_loc.np[: num_reqs + 1] + self.input_buffers.input_ids.copy_to_gpu(num_tokens) # Prepare positions and seq_lens. prepare_pos_seq_lens( From 8f066146c395dfadb86914c88d9a0f3173f8fa39 Mon Sep 17 00:00:00 2001 From: bnellnm <49004751+bnellnm@users.noreply.github.com> Date: Mon, 24 Nov 2025 13:38:04 -0500 Subject: [PATCH 04/43] [MoE][Refactor] Make select_experts a non-static method (#29067) Signed-off-by: Bill Nell --- tests/kernels/moe/test_flashinfer.py | 19 +-- tests/test_routing_simulator.py | 35 ++++- .../layers/fused_moe/fused_moe_method_base.py | 6 +- .../fused_moe/fused_moe_modular_method.py | 41 +----- vllm/model_executor/layers/fused_moe/layer.py | 118 +++++++++-------- .../fused_moe/unquantized_fused_moe_method.py | 32 +---- .../layers/quantization/awq_marlin.py | 17 +-- .../layers/quantization/bitsandbytes.py | 20 +-- .../compressed_tensors_moe.py | 123 +++--------------- .../layers/quantization/experts_int8.py | 19 +-- .../model_executor/layers/quantization/fp8.py | 29 +---- .../layers/quantization/gguf.py | 17 +-- .../layers/quantization/gptq_marlin.py | 19 +-- .../layers/quantization/modelopt.py | 45 ++----- .../layers/quantization/moe_wna16.py | 17 +-- .../layers/quantization/mxfp4.py | 23 +--- .../layers/quantization/quark/quark_moe.py | 38 +----- .../model_executor/layers/quantization/rtn.py | 17 +-- 18 files changed, 163 insertions(+), 472 deletions(-) diff --git a/tests/kernels/moe/test_flashinfer.py b/tests/kernels/moe/test_flashinfer.py index 638741e91619b..a6977f222408d 100644 --- a/tests/kernels/moe/test_flashinfer.py +++ b/tests/kernels/moe/test_flashinfer.py @@ -11,7 +11,6 @@ from vllm.model_executor.layers.fused_moe.config import ( fp8_w8a8_moe_quant_config, ) from vllm.model_executor.layers.fused_moe.fused_moe import fused_experts -from vllm.model_executor.layers.fused_moe.layer import FusedMoE from vllm.model_executor.layers.quantization.utils.flashinfer_utils import ( apply_flashinfer_per_tensor_scale_fp8, flashinfer_cutlass_moe_fp8, @@ -151,14 +150,11 @@ def test_flashinfer_per_tensor_moe_fp8_no_graph( td = TestData.make_moe_tensors_8bit(m, k, n, e, reorder=True) score = torch.randn((m, e), device="cuda", dtype=torch.bfloat16) - topk_weights, topk_ids, _ = FusedMoE.select_experts( + topk_weights, topk_ids = Llama4MoE.custom_routing_function( hidden_states=td.hidden_states, - router_logits=score, - use_grouped_topk=False, - top_k=topk, + gating_output=score, + topk=topk, renormalize=False, - custom_routing_function=Llama4MoE.custom_routing_function, - scoring_func="softmax", ) quant_config = fp8_w8a8_moe_quant_config( @@ -219,14 +215,11 @@ def test_flashinfer_cutlass_moe_fp8_no_graph( ) score = torch.randn((m, e), device="cuda", dtype=torch.bfloat16) - topk_weights, topk_ids, _ = FusedMoE.select_experts( + topk_weights, topk_ids = Llama4MoE.custom_routing_function( hidden_states=td.hidden_states, - router_logits=score, - use_grouped_topk=False, - top_k=topk, + gating_output=score, + topk=topk, renormalize=False, - custom_routing_function=Llama4MoE.custom_routing_function, - scoring_func="softmax", ) quant_config = fp8_w8a8_moe_quant_config( diff --git a/tests/test_routing_simulator.py b/tests/test_routing_simulator.py index 5a162fa8f791b..e8826eb441a24 100644 --- a/tests/test_routing_simulator.py +++ b/tests/test_routing_simulator.py @@ -9,9 +9,16 @@ different routing strategies and analyze their performance, including integration tests with FusedMoE layer. """ +import tempfile + import pytest import torch +from vllm.config import VllmConfig, set_current_vllm_config +from vllm.distributed import ( + init_distributed_environment, + initialize_model_parallel, +) from vllm.model_executor.layers.fused_moe.routing_simulator import ( DistributionBasedRouting, RoutingSimulator, @@ -89,6 +96,28 @@ def test_routing_strategy_integration(monkeypatch, device): # Test different routing strategies strategies = RoutingSimulator.get_available_strategies() + vllm_config = VllmConfig() + with set_current_vllm_config(vllm_config): + temp_file = tempfile.mkstemp()[1] + init_distributed_environment( + world_size=1, + rank=0, + local_rank=0, + distributed_init_method=f"file://{temp_file}", + ) + initialize_model_parallel( + tensor_model_parallel_size=1, + pipeline_model_parallel_size=1, + ) + fused_moe = FusedMoE( + num_experts=num_experts, + top_k=top_k, + hidden_size=hidden_size, + intermediate_size=0, + use_grouped_topk=False, + renormalize=True, + ) + for strategy in strategies: # Set environment variable env_name = "VLLM_MOE_ROUTING_SIMULATION_STRATEGY" @@ -98,13 +127,9 @@ def test_routing_strategy_integration(monkeypatch, device): envs.environment_variables[env_name] = lambda s=strategy: s # Test the select_experts method - topk_weights, topk_ids, _ = FusedMoE.select_experts( + topk_weights, topk_ids, _ = fused_moe.select_experts( hidden_states=hidden_states, router_logits=router_logits, - top_k=top_k, - use_grouped_topk=False, - renormalize=True, - indices_type=torch.long, ) # Verify output shapes diff --git a/vllm/model_executor/layers/fused_moe/fused_moe_method_base.py b/vllm/model_executor/layers/fused_moe/fused_moe_method_base.py index 073e90a4e6808..ef7090c349fc6 100644 --- a/vllm/model_executor/layers/fused_moe/fused_moe_method_base.py +++ b/vllm/model_executor/layers/fused_moe/fused_moe_method_base.py @@ -90,10 +90,14 @@ class FusedMoEMethodBase(QuantizeMethodBase): def allow_inplace(self) -> bool: return False + @property + def method_name(self) -> str: + return self.__class__.__name__ + @abstractmethod def apply( self, - layer: torch.nn.Module, + layer: "FusedMoE", # type: ignore[name-defined] # noqa: F821 x: torch.Tensor, router_logits: torch.Tensor, top_k: int, diff --git a/vllm/model_executor/layers/fused_moe/fused_moe_modular_method.py b/vllm/model_executor/layers/fused_moe/fused_moe_modular_method.py index c6dc95acdb636..c23c41df226f0 100644 --- a/vllm/model_executor/layers/fused_moe/fused_moe_modular_method.py +++ b/vllm/model_executor/layers/fused_moe/fused_moe_modular_method.py @@ -66,6 +66,10 @@ class FusedMoEModularMethod(FusedMoEMethodBase, CustomOp): def allow_inplace(self) -> bool: return self.old_quant_method.allow_inplace + @property + def method_name(self) -> str: + return self.old_quant_method.method_name + def create_weights( self, layer: torch.nn.Module, @@ -84,7 +88,7 @@ class FusedMoEModularMethod(FusedMoEMethodBase, CustomOp): def apply( self, - layer: torch.nn.Module, + layer: "FusedMoE", # type: ignore[name-defined] # noqa: F821 x: torch.Tensor, router_logits: torch.Tensor, top_k: int, @@ -105,42 +109,9 @@ class FusedMoEModularMethod(FusedMoEMethodBase, CustomOp): logical_to_physical_map: torch.Tensor | None = None, logical_replica_count: torch.Tensor | None = None, ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]: - # Is getattr needed? - zero_expert_num = getattr(layer, "zero_expert_num", 0) - zero_expert_type = getattr(layer, "zero_expert_type", None) - - if enable_eplb: - if self.supports_eplb: - assert expert_load_view is not None - assert logical_to_physical_map is not None - assert logical_replica_count is not None - else: - raise NotImplementedError( - "EPLB is not supported for " - f"{self.old_quant_method.__class__.__name__}." - ) - topk_weights, topk_ids, zero_expert_result = layer.select_experts( hidden_states=x, router_logits=router_logits, - use_grouped_topk=use_grouped_topk, - top_k=top_k, - renormalize=renormalize, - topk_group=topk_group, - num_expert_group=num_expert_group, - custom_routing_function=custom_routing_function, - scoring_func=scoring_func, - routed_scaling_factor=routed_scaling_factor, - e_score_correction_bias=e_score_correction_bias, - indices_type=self.topk_indices_dtype, - enable_eplb=enable_eplb, - expert_map=expert_map, - expert_load_view=expert_load_view, - logical_to_physical_map=logical_to_physical_map, - logical_replica_count=logical_replica_count, - global_num_experts=global_num_experts, - zero_expert_num=zero_expert_num, - zero_expert_type=zero_expert_type, ) result = self.fused_experts( @@ -156,7 +127,7 @@ class FusedMoEModularMethod(FusedMoEMethodBase, CustomOp): expert_map=None if self.disable_expert_map else expert_map, ) - if zero_expert_num != 0 and zero_expert_type is not None: + if layer.zero_expert_num != 0 and layer.zero_expert_type is not None: assert not isinstance(result, tuple), ( "Shared + zero experts are mutually exclusive not yet supported" ) diff --git a/vllm/model_executor/layers/fused_moe/layer.py b/vllm/model_executor/layers/fused_moe/layer.py index 6619b64b2bbc0..0ef3130b26333 100644 --- a/vllm/model_executor/layers/fused_moe/layer.py +++ b/vllm/model_executor/layers/fused_moe/layer.py @@ -1510,30 +1510,11 @@ class FusedMoE(CustomOp): logits_shape, dtype=moe.in_dtype, device=torch.cuda.current_device() ) - @staticmethod def select_experts( + self, hidden_states: torch.Tensor, router_logits: torch.Tensor, - top_k: int, - use_grouped_topk: bool, - renormalize: bool, - topk_group: int | None = None, - num_expert_group: int | None = None, - custom_routing_function: Callable | None = None, - scoring_func: str = "softmax", - routed_scaling_factor: float = 1.0, - e_score_correction_bias: torch.Tensor | None = None, - indices_type: torch.dtype | None = None, - enable_eplb: bool = False, - expert_map: torch.Tensor | None = None, - expert_load_view: torch.Tensor | None = None, - logical_to_physical_map: torch.Tensor | None = None, - logical_replica_count: torch.Tensor | None = None, - global_num_experts: int | None = None, - zero_expert_num: int | None = None, - zero_expert_type: str | None = None, - num_fused_shared_experts: int = 0, - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor | None]: """ Route the input hidden states to the top-k experts based on the router logits. @@ -1552,6 +1533,27 @@ class FusedMoE(CustomOp): fused_topk_bias, ) + if self.enable_eplb: + if self.quant_method.supports_eplb: + if self.expert_load_view is None: + raise ValueError( + "enable_eplb=True requiere expert_load_view != None" + ) + if self.logical_to_physical_map is None: + raise ValueError( + "enable_eplb=True requiere logical_to_physical_map != None" + ) + if self.logical_replica_count is None: + raise ValueError( + "enable_eplb=True requiere logical_replica_count != None" + ) + else: + raise NotImplementedError( + f"EPLB is not supported for {self.quant_method.method_name}." + ) + + indices_type = self.quant_method.topk_indices_dtype + # Check if we should use a routing simulation strategy routing_strategy = envs.VLLM_MOE_ROUTING_SIMULATION_STRATEGY if routing_strategy != "": @@ -1559,20 +1561,20 @@ class FusedMoE(CustomOp): hidden_states=hidden_states, router_logits=router_logits, strategy_name=routing_strategy, - top_k=top_k, + top_k=self.top_k, indices_type=indices_type, ) # DeepSeekv2 uses grouped_top_k - elif use_grouped_topk: - assert topk_group is not None - assert num_expert_group is not None + elif self.use_grouped_topk: + assert self.topk_group is not None + assert self.num_expert_group is not None if rocm_aiter_ops.is_fused_moe_enabled(): if not rocm_aiter_ops.is_fusion_moe_shared_experts_enabled(): - assert num_fused_shared_experts == 0 + assert self.num_fused_shared_experts == 0 grouped_topk_impl = partial( rocm_aiter_grouped_topk, - num_fused_shared_experts=num_fused_shared_experts, + num_fused_shared_experts=self.num_fused_shared_experts, ) else: grouped_topk_impl = grouped_topk @@ -1580,50 +1582,46 @@ class FusedMoE(CustomOp): topk_weights, topk_ids = grouped_topk_impl( hidden_states=hidden_states, gating_output=router_logits, - topk=top_k, - renormalize=renormalize, - num_expert_group=num_expert_group, - topk_group=topk_group, - scoring_func=scoring_func, - routed_scaling_factor=routed_scaling_factor, - e_score_correction_bias=e_score_correction_bias, + topk=self.top_k, + renormalize=self.renormalize, + num_expert_group=self.num_expert_group, + topk_group=self.topk_group, + scoring_func=self.scoring_func, + routed_scaling_factor=self.routed_scaling_factor, + e_score_correction_bias=self.e_score_correction_bias, ) - elif e_score_correction_bias is not None: + elif self.e_score_correction_bias is not None: topk_weights, topk_ids = fused_topk_bias( hidden_states=hidden_states, gating_output=router_logits, - e_score_correction_bias=e_score_correction_bias.data, - topk=top_k, - renormalize=renormalize, + e_score_correction_bias=self.e_score_correction_bias.data, + topk=self.top_k, + renormalize=self.renormalize, ) - if routed_scaling_factor != 1.0: - topk_weights *= routed_scaling_factor - elif custom_routing_function is None: + if self.routed_scaling_factor != 1.0: + topk_weights *= self.routed_scaling_factor + elif self.custom_routing_function is None: topk_weights, topk_ids, token_expert_indices = fused_topk( hidden_states=hidden_states, gating_output=router_logits, - topk=top_k, - renormalize=renormalize, + topk=self.top_k, + renormalize=self.renormalize, indices_type=indices_type, ) else: - topk_weights, topk_ids = custom_routing_function( + topk_weights, topk_ids = self.custom_routing_function( hidden_states=hidden_states, gating_output=router_logits, - topk=top_k, - renormalize=renormalize, + topk=self.top_k, + renormalize=self.renormalize, ) - if enable_eplb: - assert expert_load_view is not None - assert logical_to_physical_map is not None - assert logical_replica_count is not None - + if self.enable_eplb: topk_ids = eplb_map_to_physical_and_record( topk_ids=topk_ids, - expert_load_view=expert_load_view, - logical_to_physical_map=logical_to_physical_map, - logical_replica_count=logical_replica_count, + expert_load_view=self.expert_load_view, + logical_to_physical_map=self.logical_to_physical_map, + logical_replica_count=self.logical_replica_count, ) if (indices_type is not None) and topk_ids.dtype != indices_type: @@ -1633,16 +1631,16 @@ class FusedMoE(CustomOp): # Compute zero expert result if needed if ( - zero_expert_num is not None - and zero_expert_num > 0 - and zero_expert_type is not None - and global_num_experts is not None + self.zero_expert_num is not None + and self.zero_expert_num > 0 + and self.zero_expert_type is not None + and self.global_num_experts is not None ): zero_expert_result = zero_experts_compute_triton( expert_indices=topk_ids, expert_scales=topk_weights, - num_experts=global_num_experts, - zero_expert_type=zero_expert_type, + num_experts=self.global_num_experts, + zero_expert_type=self.zero_expert_type, hidden_states=hidden_states, ) else: diff --git a/vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py b/vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py index 63b0e6f573d65..48e5a8907f926 100644 --- a/vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py +++ b/vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py @@ -331,7 +331,7 @@ class UnquantizedFusedMoEMethod(FusedMoEMethodBase, CustomOp): def forward_cuda( self, - layer: torch.nn.Module, + layer: "FusedMoE", # type: ignore[name-defined] # noqa: F821 x: torch.Tensor, use_grouped_topk: bool, top_k: int, @@ -352,31 +352,9 @@ class UnquantizedFusedMoEMethod(FusedMoEMethodBase, CustomOp): logical_to_physical_map: torch.Tensor | None = None, logical_replica_count: torch.Tensor | None = None, ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]: - zero_expert_num = getattr(layer, "zero_expert_num", 0) - zero_expert_type = getattr(layer, "zero_expert_type", None) - topk_weights, topk_ids, zero_expert_result = layer.select_experts( hidden_states=x, router_logits=router_logits, - use_grouped_topk=use_grouped_topk, - top_k=top_k, - renormalize=renormalize, - topk_group=topk_group, - num_expert_group=num_expert_group, - custom_routing_function=custom_routing_function, - scoring_func=scoring_func, - routed_scaling_factor=routed_scaling_factor, - e_score_correction_bias=e_score_correction_bias, - indices_type=self.topk_indices_dtype, - enable_eplb=enable_eplb, - expert_map=expert_map, - expert_load_view=expert_load_view, - logical_to_physical_map=logical_to_physical_map, - logical_replica_count=logical_replica_count, - global_num_experts=global_num_experts, - zero_expert_num=zero_expert_num, - zero_expert_type=zero_expert_type, - num_fused_shared_experts=layer.num_fused_shared_experts, ) if self.rocm_aiter_moe_enabled: @@ -415,7 +393,7 @@ class UnquantizedFusedMoEMethod(FusedMoEMethodBase, CustomOp): expert_map=expert_map, ) - if zero_expert_num != 0 and zero_expert_type is not None: + if layer.zero_expert_num != 0 and layer.zero_expert_type is not None: assert not isinstance(result, tuple), ( "Shared + zero experts are mutually exclusive not yet supported" ) @@ -425,7 +403,7 @@ class UnquantizedFusedMoEMethod(FusedMoEMethodBase, CustomOp): def forward_cpu( self, - layer: torch.nn.Module, + layer: "FusedMoE", # type: ignore[name-defined] # noqa: F821 x: torch.Tensor, use_grouped_topk: bool, top_k: int, @@ -474,7 +452,7 @@ class UnquantizedFusedMoEMethod(FusedMoEMethodBase, CustomOp): def forward_xpu( self, - layer: torch.nn.Module, + layer: "FusedMoE", # type: ignore[name-defined] # noqa: F821 x: torch.Tensor, use_grouped_topk: bool, top_k: int, @@ -515,7 +493,7 @@ class UnquantizedFusedMoEMethod(FusedMoEMethodBase, CustomOp): def forward_tpu( self, - layer: torch.nn.Module, + layer: "FusedMoE", # type: ignore[name-defined] # noqa: F821 x: torch.Tensor, use_grouped_topk: bool, top_k: int, diff --git a/vllm/model_executor/layers/quantization/awq_marlin.py b/vllm/model_executor/layers/quantization/awq_marlin.py index 3f6ea68072b40..66945e2d2a7c8 100644 --- a/vllm/model_executor/layers/quantization/awq_marlin.py +++ b/vllm/model_executor/layers/quantization/awq_marlin.py @@ -597,7 +597,7 @@ class AWQMoEMethod(FusedMoEMethodBase): def apply( self, - layer: torch.nn.Module, + layer: FusedMoE, x: torch.Tensor, router_logits: torch.Tensor, top_k: int, @@ -618,24 +618,11 @@ class AWQMoEMethod(FusedMoEMethodBase): logical_to_physical_map: torch.Tensor | None = None, logical_replica_count: torch.Tensor | None = None, ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]: - if enable_eplb: - raise NotImplementedError("EPLB not supported for `AWQMoEMethod` yet.") - assert activation == "silu", "Only SiLU activation is supported." - topk_weights, topk_ids, _ = FusedMoE.select_experts( + topk_weights, topk_ids, _ = layer.select_experts( hidden_states=x, router_logits=router_logits, - use_grouped_topk=use_grouped_topk, - top_k=top_k, - renormalize=renormalize, - topk_group=topk_group, - num_expert_group=num_expert_group, - custom_routing_function=custom_routing_function, - scoring_func=scoring_func, - routed_scaling_factor=routed_scaling_factor, - e_score_correction_bias=e_score_correction_bias, - indices_type=self.topk_indices_dtype, ) return fused_marlin_moe( diff --git a/vllm/model_executor/layers/quantization/bitsandbytes.py b/vllm/model_executor/layers/quantization/bitsandbytes.py index e5a741e639ad9..1e57fa218b797 100644 --- a/vllm/model_executor/layers/quantization/bitsandbytes.py +++ b/vllm/model_executor/layers/quantization/bitsandbytes.py @@ -495,7 +495,7 @@ class BitsAndBytesMoEMethod(FusedMoEMethodBase): def apply( self, - layer: torch.nn.Module, + layer: FusedMoE, x: torch.Tensor, router_logits: torch.Tensor, top_k: int, @@ -518,25 +518,11 @@ class BitsAndBytesMoEMethod(FusedMoEMethodBase): ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]: from vllm.model_executor.layers.fused_moe import fused_experts - if enable_eplb: - raise NotImplementedError( - "EPLB not supported for `BitsAndBytesMoEMethod` yet." - ) - - topk_weights, topk_ids, _ = FusedMoE.select_experts( + topk_weights, topk_ids, _ = layer.select_experts( hidden_states=x, router_logits=router_logits, - use_grouped_topk=use_grouped_topk, - top_k=top_k, - renormalize=renormalize, - topk_group=topk_group, - num_expert_group=num_expert_group, - custom_routing_function=custom_routing_function, - scoring_func=scoring_func, - routed_scaling_factor=routed_scaling_factor, - e_score_correction_bias=e_score_correction_bias, - indices_type=self.topk_indices_dtype, ) + # TODO(bnell): Do these need to be called on the hot path? if self.quant_config.load_in_8bit: w13, w2 = self._apply_8bit_dequant(layer) else: diff --git a/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py b/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py index ad547dd409822..149e4419c64a4 100644 --- a/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +++ b/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py @@ -511,7 +511,7 @@ class CompressedTensorsW4A4MoeMethod(CompressedTensorsMoEMethod): def apply( self, - layer: torch.nn.Module, + layer: FusedMoE, x: torch.Tensor, router_logits: torch.Tensor, top_k: int, @@ -532,16 +532,17 @@ class CompressedTensorsW4A4MoeMethod(CompressedTensorsMoEMethod): logical_to_physical_map: torch.Tensor | None = None, logical_replica_count: torch.Tensor | None = None, ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]: - if enable_eplb: - raise NotImplementedError( - "EPLB not supported for `CompressedTensorsW4A4MoeMethod` yet." - ) assert activation == "silu", "Only SiLU activation is supported." if ( self.allow_flashinfer and self.flashinfer_moe_backend == FlashinferMoeBackend.TENSORRT_LLM ): + if enable_eplb: + raise NotImplementedError( + "EPLB not supported for `CompressedTensorsW4A4MoeMethod` yet." + ) + return flashinfer_trtllm_fp4_moe( layer=layer, x=x, @@ -554,19 +555,9 @@ class CompressedTensorsW4A4MoeMethod(CompressedTensorsMoEMethod): e_score_correction_bias=e_score_correction_bias, ) - topk_weights, topk_ids, _ = FusedMoE.select_experts( + topk_weights, topk_ids, _ = layer.select_experts( hidden_states=x, router_logits=router_logits, - use_grouped_topk=use_grouped_topk, - top_k=top_k, - renormalize=renormalize, - topk_group=topk_group, - num_expert_group=num_expert_group, - custom_routing_function=custom_routing_function, - scoring_func=scoring_func, - routed_scaling_factor=routed_scaling_factor, - e_score_correction_bias=e_score_correction_bias, - indices_type=self.topk_indices_dtype, ) if self.use_marlin: @@ -1109,7 +1100,7 @@ class CompressedTensorsW8A8Fp8MoEMethod(CompressedTensorsMoEMethod): def apply( self, - layer: torch.nn.Module, + layer: FusedMoE, x: torch.Tensor, router_logits: torch.Tensor, top_k: int, @@ -1130,31 +1121,9 @@ class CompressedTensorsW8A8Fp8MoEMethod(CompressedTensorsMoEMethod): logical_to_physical_map: torch.Tensor | None = None, logical_replica_count: torch.Tensor | None = None, ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]: - if enable_eplb: - assert expert_load_view is not None - assert logical_to_physical_map is not None - assert logical_replica_count is not None - assert isinstance(layer, FusedMoE) - - topk_weights, topk_ids, _ = FusedMoE.select_experts( + topk_weights, topk_ids, _ = layer.select_experts( hidden_states=x, router_logits=router_logits, - use_grouped_topk=use_grouped_topk, - top_k=top_k, - renormalize=renormalize, - topk_group=topk_group, - num_expert_group=num_expert_group, - custom_routing_function=custom_routing_function, - scoring_func=scoring_func, - routed_scaling_factor=routed_scaling_factor, - e_score_correction_bias=e_score_correction_bias, - indices_type=self.topk_indices_dtype, - num_fused_shared_experts=layer.num_fused_shared_experts, - enable_eplb=enable_eplb, - expert_map=expert_map, - expert_load_view=expert_load_view, - logical_to_physical_map=logical_to_physical_map, - logical_replica_count=logical_replica_count, ) per_act_token = self.input_quant.strategy == QuantizationStrategy.TOKEN @@ -1377,7 +1346,7 @@ class CompressedTensorsW8A8Int8MoEMethod(CompressedTensorsMoEMethod): def apply( self, - layer: torch.nn.Module, + layer: FusedMoE, x: torch.Tensor, router_logits: torch.Tensor, top_k: int, @@ -1398,26 +1367,11 @@ class CompressedTensorsW8A8Int8MoEMethod(CompressedTensorsMoEMethod): logical_to_physical_map: torch.Tensor | None = None, logical_replica_count: torch.Tensor | None = None, ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]: - if enable_eplb: - raise NotImplementedError( - "EPLB not supported for `CompressedTensorsW8A8Int8MoEMethod` yet." - ) - from vllm.model_executor.layers.fused_moe import fused_experts - topk_weights, topk_ids, _ = FusedMoE.select_experts( + topk_weights, topk_ids, _ = layer.select_experts( hidden_states=x, router_logits=router_logits, - use_grouped_topk=use_grouped_topk, - top_k=top_k, - renormalize=renormalize, - topk_group=topk_group, - num_expert_group=num_expert_group, - custom_routing_function=custom_routing_function, - scoring_func=scoring_func, - routed_scaling_factor=routed_scaling_factor, - e_score_correction_bias=e_score_correction_bias, - indices_type=self.topk_indices_dtype, ) return fused_experts( @@ -1738,7 +1692,7 @@ class CompressedTensorsWNA16MarlinMoEMethod(CompressedTensorsMoEMethod): def apply( self, - layer: torch.nn.Module, + layer: FusedMoE, x: torch.Tensor, router_logits: torch.Tensor, top_k: int, @@ -1759,26 +1713,11 @@ class CompressedTensorsWNA16MarlinMoEMethod(CompressedTensorsMoEMethod): logical_to_physical_map: torch.Tensor | None = None, logical_replica_count: torch.Tensor | None = None, ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]: - if enable_eplb: - raise NotImplementedError( - "EPLB not supported for `CompressedTensorsWNA16MarlinMoEMethod` yet." - ) - assert activation == "silu", f"{activation} not supported for Marlin MoE." - topk_weights, topk_ids, _ = FusedMoE.select_experts( + topk_weights, topk_ids, _ = layer.select_experts( hidden_states=x, router_logits=router_logits, - use_grouped_topk=use_grouped_topk, - top_k=top_k, - renormalize=renormalize, - topk_group=topk_group, - num_expert_group=num_expert_group, - custom_routing_function=custom_routing_function, - scoring_func=scoring_func, - routed_scaling_factor=routed_scaling_factor, - e_score_correction_bias=e_score_correction_bias, - indices_type=self.topk_indices_dtype, ) return fused_marlin_moe( @@ -2001,7 +1940,7 @@ class CompressedTensorsWNA16MoEMethod(CompressedTensorsMoEMethod): def apply( self, - layer: torch.nn.Module, + layer: FusedMoE, x: torch.Tensor, router_logits: torch.Tensor, top_k: int, @@ -2022,43 +1961,11 @@ class CompressedTensorsWNA16MoEMethod(CompressedTensorsMoEMethod): logical_to_physical_map: torch.Tensor | None = None, logical_replica_count: torch.Tensor | None = None, ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]: - if enable_eplb: - if expert_load_view is None: - raise ValueError("enable_eplb=True requiere expert_load_view != None") - if logical_to_physical_map is None: - raise ValueError( - "enable_eplb=True requiere logical_to_physical_map != None" - ) - if logical_replica_count is None: - raise ValueError( - "enable_eplb=True requiere logical_replica_count != None" - ) - if not isinstance(layer, FusedMoE): - raise TypeError( - "EPLB is only supported when `layer` is a instance of FusedMoE." - ) - from vllm.model_executor.layers.fused_moe import fused_experts - topk_weights, topk_ids, _ = FusedMoE.select_experts( + topk_weights, topk_ids, _ = layer.select_experts( hidden_states=x, router_logits=router_logits, - use_grouped_topk=use_grouped_topk, - top_k=top_k, - renormalize=renormalize, - topk_group=topk_group, - num_expert_group=num_expert_group, - custom_routing_function=custom_routing_function, - scoring_func=scoring_func, - routed_scaling_factor=routed_scaling_factor, - e_score_correction_bias=e_score_correction_bias, - indices_type=self.topk_indices_dtype, - num_fused_shared_experts=getattr(layer, "num_fused_shared_experts", 0), - enable_eplb=enable_eplb, - expert_map=expert_map, - expert_load_view=expert_load_view, - logical_to_physical_map=logical_to_physical_map, - logical_replica_count=logical_replica_count, ) return fused_experts( diff --git a/vllm/model_executor/layers/quantization/experts_int8.py b/vllm/model_executor/layers/quantization/experts_int8.py index 5241f9a2301be..7ebe40ec84687 100644 --- a/vllm/model_executor/layers/quantization/experts_int8.py +++ b/vllm/model_executor/layers/quantization/experts_int8.py @@ -137,7 +137,7 @@ class ExpertsInt8MoEMethod(FusedMoEMethodBase): def apply( self, - layer: torch.nn.Module, + layer: FusedMoE, x: torch.Tensor, router_logits: torch.Tensor, top_k: int, @@ -158,26 +158,11 @@ class ExpertsInt8MoEMethod(FusedMoEMethodBase): logical_to_physical_map: torch.Tensor | None = None, logical_replica_count: torch.Tensor | None = None, ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]: - if enable_eplb: - raise NotImplementedError( - "EPLB not supported for `ExpertsInt8MoEMethod` yet." - ) - from vllm.model_executor.layers.fused_moe import fused_experts - topk_weights, topk_ids, _ = FusedMoE.select_experts( + topk_weights, topk_ids, _ = layer.select_experts( hidden_states=x, router_logits=router_logits, - use_grouped_topk=use_grouped_topk, - top_k=top_k, - renormalize=renormalize, - topk_group=topk_group, - num_expert_group=num_expert_group, - custom_routing_function=custom_routing_function, - scoring_func=scoring_func, - routed_scaling_factor=routed_scaling_factor, - e_score_correction_bias=e_score_correction_bias, - indices_type=self.topk_indices_dtype, ) return fused_experts( diff --git a/vllm/model_executor/layers/quantization/fp8.py b/vllm/model_executor/layers/quantization/fp8.py index 91bd45bf879cb..9e2718057038d 100644 --- a/vllm/model_executor/layers/quantization/fp8.py +++ b/vllm/model_executor/layers/quantization/fp8.py @@ -1140,7 +1140,7 @@ class Fp8MoEMethod(FusedMoEMethodBase): def apply( self, - layer: torch.nn.Module, + layer: FusedMoE, x: torch.Tensor, router_logits: torch.Tensor, top_k: int, @@ -1216,31 +1216,9 @@ class Fp8MoEMethod(FusedMoEMethodBase): apply_router_weight_on_input=apply_router_weight_on_input, ) - zero_expert_num = getattr(layer, "zero_expert_num", 0) - zero_expert_type = getattr(layer, "zero_expert_type", None) - - select_result = FusedMoE.select_experts( + select_result = layer.select_experts( hidden_states=x, router_logits=router_logits, - use_grouped_topk=use_grouped_topk, - top_k=top_k, - renormalize=renormalize, - topk_group=topk_group, - num_expert_group=num_expert_group, - custom_routing_function=custom_routing_function, - scoring_func=scoring_func, - routed_scaling_factor=routed_scaling_factor, - e_score_correction_bias=e_score_correction_bias, - indices_type=self.topk_indices_dtype, - enable_eplb=enable_eplb, - expert_map=expert_map, - expert_load_view=expert_load_view, - logical_to_physical_map=logical_to_physical_map, - logical_replica_count=logical_replica_count, - global_num_experts=global_num_experts, - zero_expert_num=zero_expert_num, - zero_expert_type=zero_expert_type, - num_fused_shared_experts=layer.num_fused_shared_experts, ) topk_weights, topk_ids, zero_expert_result = select_result @@ -1322,7 +1300,8 @@ class Fp8MoEMethod(FusedMoEMethodBase): self.allow_cutlass_block_scaled_grouped_gemm ), ) - if zero_expert_num != 0 and zero_expert_type is not None: + + if layer.zero_expert_num != 0 and layer.zero_expert_type is not None: assert not isinstance(result, tuple), ( "Shared + zero experts are mutually exclusive not yet supported" ) diff --git a/vllm/model_executor/layers/quantization/gguf.py b/vllm/model_executor/layers/quantization/gguf.py index 42d7a67371ae8..bcdfafb50fc5a 100644 --- a/vllm/model_executor/layers/quantization/gguf.py +++ b/vllm/model_executor/layers/quantization/gguf.py @@ -621,7 +621,7 @@ class GGUFMoEMethod(FusedMoEMethodBase): def apply( self, - layer: torch.nn.Module, + layer: FusedMoE, x: torch.Tensor, router_logits: torch.Tensor, top_k: int, @@ -642,9 +642,6 @@ class GGUFMoEMethod(FusedMoEMethodBase): logical_to_physical_map: torch.Tensor | None = None, logical_replica_count: torch.Tensor | None = None, ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]: - if enable_eplb: - raise NotImplementedError("EPLB not supported for `GGUFMoEMethod` yet.") - assert activation == "silu", "Only SiLU activation is supported." if apply_router_weight_on_input: raise NotImplementedError( @@ -652,19 +649,9 @@ class GGUFMoEMethod(FusedMoEMethodBase): "fused GGUF MoE method." ) - topk_weights, topk_ids, _ = FusedMoE.select_experts( + topk_weights, topk_ids, _ = layer.select_experts( hidden_states=x, router_logits=router_logits, - use_grouped_topk=use_grouped_topk, - top_k=top_k, - renormalize=renormalize, - topk_group=topk_group, - num_expert_group=num_expert_group, - custom_routing_function=custom_routing_function, - scoring_func=scoring_func, - routed_scaling_factor=routed_scaling_factor, - e_score_correction_bias=e_score_correction_bias, - indices_type=self.topk_indices_dtype, ) return fused_moe_gguf( x, diff --git a/vllm/model_executor/layers/quantization/gptq_marlin.py b/vllm/model_executor/layers/quantization/gptq_marlin.py index 68a122fd46c6b..77b15db373a3a 100644 --- a/vllm/model_executor/layers/quantization/gptq_marlin.py +++ b/vllm/model_executor/layers/quantization/gptq_marlin.py @@ -722,7 +722,7 @@ class GPTQMarlinMoEMethod(FusedMoEMethodBase): def apply( self, - layer: torch.nn.Module, + layer: FusedMoE, x: torch.Tensor, router_logits: torch.Tensor, top_k: int, @@ -743,26 +743,11 @@ class GPTQMarlinMoEMethod(FusedMoEMethodBase): logical_to_physical_map: torch.Tensor | None = None, logical_replica_count: torch.Tensor | None = None, ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]: - if enable_eplb: - raise NotImplementedError( - "EPLB not supported for `GPTQMarlinMoEMethod` yet." - ) - assert activation == "silu", "Only SiLU activation is supported." - topk_weights, topk_ids, _ = FusedMoE.select_experts( + topk_weights, topk_ids, _ = layer.select_experts( hidden_states=x, router_logits=router_logits, - use_grouped_topk=use_grouped_topk, - top_k=top_k, - renormalize=renormalize, - topk_group=topk_group, - num_expert_group=num_expert_group, - custom_routing_function=custom_routing_function, - scoring_func=scoring_func, - routed_scaling_factor=routed_scaling_factor, - e_score_correction_bias=e_score_correction_bias, - indices_type=self.topk_indices_dtype, ) return fused_marlin_moe( diff --git a/vllm/model_executor/layers/quantization/modelopt.py b/vllm/model_executor/layers/quantization/modelopt.py index 01a23168bdde3..8165673135910 100644 --- a/vllm/model_executor/layers/quantization/modelopt.py +++ b/vllm/model_executor/layers/quantization/modelopt.py @@ -696,7 +696,7 @@ class ModelOptFp8MoEMethod(FusedMoEMethodBase): def apply( self, - layer: torch.nn.Module, + layer: FusedMoE, x: torch.Tensor, router_logits: torch.Tensor, top_k: int, @@ -717,12 +717,11 @@ class ModelOptFp8MoEMethod(FusedMoEMethodBase): logical_to_physical_map: torch.Tensor | None = None, logical_replica_count: torch.Tensor | None = None, ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]: - if enable_eplb: - raise NotImplementedError( - "EPLB not supported for `ModelOptFp8MoEMethod` yet." - ) - if self.flashinfer_moe_backend == FlashinferMoeBackend.TENSORRT_LLM: + if layer.enable_eplb: + raise NotImplementedError( + "EPLB not supported for `ModelOptFp8MoEMethod` yet." + ) assert activation == "silu", ( f"Expected 'silu' activation but got {activation}" ) @@ -740,19 +739,9 @@ class ModelOptFp8MoEMethod(FusedMoEMethodBase): ) # Expert selection - topk_weights, topk_ids, _ = FusedMoE.select_experts( + topk_weights, topk_ids, _ = layer.select_experts( hidden_states=x, router_logits=router_logits, - use_grouped_topk=use_grouped_topk, - top_k=top_k, - renormalize=renormalize, - topk_group=topk_group, - num_expert_group=num_expert_group, - custom_routing_function=custom_routing_function, - scoring_func=scoring_func, - routed_scaling_factor=routed_scaling_factor, - e_score_correction_bias=e_score_correction_bias, - indices_type=self.topk_indices_dtype, ) if self.flashinfer_moe_backend == FlashinferMoeBackend.CUTLASS: @@ -1459,7 +1448,7 @@ class ModelOptNvFp4FusedMoE(FusedMoEMethodBase): def apply( self, - layer: torch.nn.Module, + layer: FusedMoE, x: torch.Tensor, router_logits: torch.Tensor, top_k: int, @@ -1480,16 +1469,16 @@ class ModelOptNvFp4FusedMoE(FusedMoEMethodBase): logical_to_physical_map: torch.Tensor | None = None, logical_replica_count: torch.Tensor | None = None, ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]: - if enable_eplb: - raise NotImplementedError( - "EPLB not supported for `ModelOptNvFp4FusedMoE` yet." - ) assert activation == "silu", "Only SiLU activation is supported." if ( self.allow_flashinfer and self.flashinfer_moe_backend == FlashinferMoeBackend.TENSORRT_LLM ): + if enable_eplb: + raise NotImplementedError( + "EPLB not supported for `ModelOptNvFp4FusedMoE` yet." + ) return flashinfer_trtllm_fp4_moe( layer=layer, x=x, @@ -1502,19 +1491,9 @@ class ModelOptNvFp4FusedMoE(FusedMoEMethodBase): e_score_correction_bias=e_score_correction_bias, ) - topk_weights, topk_ids, _ = FusedMoE.select_experts( + topk_weights, topk_ids, _ = layer.select_experts( hidden_states=x, router_logits=router_logits, - use_grouped_topk=use_grouped_topk, - top_k=top_k, - renormalize=renormalize, - topk_group=topk_group, - num_expert_group=num_expert_group, - custom_routing_function=custom_routing_function, - scoring_func=scoring_func, - routed_scaling_factor=routed_scaling_factor, - e_score_correction_bias=e_score_correction_bias, - indices_type=self.topk_indices_dtype, ) if self.use_marlin: diff --git a/vllm/model_executor/layers/quantization/moe_wna16.py b/vllm/model_executor/layers/quantization/moe_wna16.py index 2090c86f78dc8..cf348290a2716 100644 --- a/vllm/model_executor/layers/quantization/moe_wna16.py +++ b/vllm/model_executor/layers/quantization/moe_wna16.py @@ -359,7 +359,7 @@ class MoeWNA16Method(FusedMoEMethodBase): def apply( self, - layer: torch.nn.Module, + layer: FusedMoE, x: torch.Tensor, router_logits: torch.Tensor, top_k: int, @@ -380,25 +380,12 @@ class MoeWNA16Method(FusedMoEMethodBase): logical_to_physical_map: torch.Tensor | None = None, logical_replica_count: torch.Tensor | None = None, ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]: - if enable_eplb: - raise NotImplementedError("EPLB not supported for `MoeWNA16Method` yet.") - from vllm.model_executor.layers.fused_moe import fused_experts assert activation == "silu", "Only SiLU activation is supported." - topk_weights, topk_ids, _ = FusedMoE.select_experts( + topk_weights, topk_ids, _ = layer.select_experts( hidden_states=x, router_logits=router_logits, - use_grouped_topk=use_grouped_topk, - top_k=top_k, - renormalize=renormalize, - topk_group=topk_group, - num_expert_group=num_expert_group, - custom_routing_function=custom_routing_function, - scoring_func=scoring_func, - routed_scaling_factor=routed_scaling_factor, - e_score_correction_bias=e_score_correction_bias, - indices_type=self.topk_indices_dtype, ) return fused_experts( diff --git a/vllm/model_executor/layers/quantization/mxfp4.py b/vllm/model_executor/layers/quantization/mxfp4.py index 66ae2e94c60a5..255b5aad17853 100644 --- a/vllm/model_executor/layers/quantization/mxfp4.py +++ b/vllm/model_executor/layers/quantization/mxfp4.py @@ -862,7 +862,7 @@ class Mxfp4MoEMethod(FusedMoEMethodBase): def apply( self, - layer: torch.nn.Module, + layer: FusedMoE, x: torch.Tensor, router_logits: torch.Tensor, top_k: int, @@ -887,18 +887,9 @@ class Mxfp4MoEMethod(FusedMoEMethodBase): raise NotImplementedError("EPLB is not supported for mxfp4") if self.mxfp4_backend == Mxfp4Backend.MARLIN: - topk_weights, topk_ids, _ = FusedMoE.select_experts( + topk_weights, topk_ids, _ = layer.select_experts( hidden_states=x, router_logits=router_logits, - use_grouped_topk=use_grouped_topk, - top_k=top_k, - renormalize=renormalize, - topk_group=topk_group, - num_expert_group=num_expert_group, - custom_routing_function=custom_routing_function, - scoring_func=scoring_func, - routed_scaling_factor=routed_scaling_factor, - e_score_correction_bias=e_score_correction_bias, ) return fused_marlin_moe( @@ -989,17 +980,9 @@ class Mxfp4MoEMethod(FusedMoEMethodBase): ): from vllm.utils.flashinfer import flashinfer_cutlass_fused_moe - topk_weights, topk_ids, _ = FusedMoE.select_experts( + topk_weights, topk_ids, _ = layer.select_experts( hidden_states=x, router_logits=router_logits, - use_grouped_topk=use_grouped_topk, - top_k=top_k, - renormalize=renormalize, - topk_group=topk_group, - num_expert_group=num_expert_group, - custom_routing_function=custom_routing_function, - scoring_func=scoring_func, - e_score_correction_bias=e_score_correction_bias, ) # Backend-specific preparation diff --git a/vllm/model_executor/layers/quantization/quark/quark_moe.py b/vllm/model_executor/layers/quantization/quark/quark_moe.py index 30772c3665b06..8be0299eaa66f 100644 --- a/vllm/model_executor/layers/quantization/quark/quark_moe.py +++ b/vllm/model_executor/layers/quantization/quark/quark_moe.py @@ -334,7 +334,7 @@ class QuarkW8A8Fp8MoEMethod(QuarkMoEMethod): def apply( self, - layer: torch.nn.Module, + layer: FusedMoE, x: torch.Tensor, router_logits: torch.Tensor, top_k: int, @@ -355,24 +355,9 @@ class QuarkW8A8Fp8MoEMethod(QuarkMoEMethod): logical_to_physical_map: torch.Tensor | None = None, logical_replica_count: torch.Tensor | None = None, ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]: - if enable_eplb: - raise NotImplementedError( - "EPLB not supported for `QuarkW8A8Fp8MoEMethod` yet." - ) - - topk_weights, topk_ids, _ = FusedMoE.select_experts( + topk_weights, topk_ids, _ = layer.select_experts( hidden_states=x, router_logits=router_logits, - use_grouped_topk=use_grouped_topk, - top_k=top_k, - renormalize=renormalize, - topk_group=topk_group, - num_expert_group=num_expert_group, - custom_routing_function=custom_routing_function, - scoring_func=scoring_func, - routed_scaling_factor=routed_scaling_factor, - e_score_correction_bias=e_score_correction_bias, - indices_type=self.topk_indices_dtype, ) if self.rocm_aiter_moe_enabled: @@ -609,7 +594,7 @@ class QuarkOCP_MX_MoEMethod(QuarkMoEMethod): def apply( self, - layer: torch.nn.Module, + layer: FusedMoE, x: torch.Tensor, router_logits: torch.Tensor, top_k: int, @@ -630,24 +615,9 @@ class QuarkOCP_MX_MoEMethod(QuarkMoEMethod): logical_to_physical_map: torch.Tensor | None = None, logical_replica_count: torch.Tensor | None = None, ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]: - if enable_eplb: - raise NotImplementedError( - "EPLB not supported for `QuarkOCP_MX_MoEMethod` yet." - ) - - topk_weights, topk_ids, _ = FusedMoE.select_experts( + topk_weights, topk_ids, _ = layer.select_experts( hidden_states=x, router_logits=router_logits, - use_grouped_topk=use_grouped_topk, - top_k=top_k, - renormalize=renormalize, - topk_group=topk_group, - num_expert_group=num_expert_group, - custom_routing_function=custom_routing_function, - scoring_func=scoring_func, - routed_scaling_factor=routed_scaling_factor, - e_score_correction_bias=e_score_correction_bias, - indices_type=self.topk_indices_dtype, ) if not self.emulate: diff --git a/vllm/model_executor/layers/quantization/rtn.py b/vllm/model_executor/layers/quantization/rtn.py index 52656263a601b..7b51b828009fc 100644 --- a/vllm/model_executor/layers/quantization/rtn.py +++ b/vllm/model_executor/layers/quantization/rtn.py @@ -356,7 +356,7 @@ class RTNMoEMethod(FusedMoEMethodBase): def apply( self, - layer: torch.nn.Module, + layer: FusedMoE, x: torch.Tensor, router_logits: torch.Tensor, top_k: int, @@ -377,22 +377,9 @@ class RTNMoEMethod(FusedMoEMethodBase): logical_to_physical_map: torch.Tensor | None = None, logical_replica_count: torch.Tensor | None = None, ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]: - if enable_eplb: - raise NotImplementedError("EPLB not supported for `RTNMoEMethod` yet.") - - topk_weights, topk_ids, _ = FusedMoE.select_experts( + topk_weights, topk_ids, _ = layer.select_experts( hidden_states=x, router_logits=router_logits, - use_grouped_topk=use_grouped_topk, - top_k=top_k, - renormalize=renormalize, - topk_group=topk_group, - num_expert_group=num_expert_group, - custom_routing_function=custom_routing_function, - scoring_func=scoring_func, - routed_scaling_factor=routed_scaling_factor, - e_score_correction_bias=e_score_correction_bias, - indices_type=self.topk_indices_dtype, ) return fused_marlin_moe( From 839c6b7b72bcc7197443019aae32be409f1c0363 Mon Sep 17 00:00:00 2001 From: Chenheli Hua Date: Mon, 24 Nov 2025 11:24:37 -0800 Subject: [PATCH 05/43] [Multimodal][Qwen3 Omni] Make Qwen3 Omni work with audio-in-video inputs in V1 engine. (#27721) Signed-off-by: Chenheli Hua Signed-off-by: Roger Wang Co-authored-by: Roger Wang --- .../qwen3_omni/only_thinker.py | 170 ++++++++++++++ tests/model_executor/test_qwen3_omni.py | 221 ++++++++++++++++++ .../models/qwen2_5_omni_thinker.py | 25 -- .../models/qwen3_omni_moe_thinker.py | 110 ++++++--- 4 files changed, 467 insertions(+), 59 deletions(-) create mode 100644 examples/offline_inference/qwen3_omni/only_thinker.py create mode 100644 tests/model_executor/test_qwen3_omni.py diff --git a/examples/offline_inference/qwen3_omni/only_thinker.py b/examples/offline_inference/qwen3_omni/only_thinker.py new file mode 100644 index 0000000000000..88a61ed694c2e --- /dev/null +++ b/examples/offline_inference/qwen3_omni/only_thinker.py @@ -0,0 +1,170 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +""" +This example shows how to use vLLM for running offline inference +with the correct prompt format on Qwen2.5-Omni (thinker only). +""" + +from typing import NamedTuple + +from vllm import LLM, SamplingParams +from vllm.assets.audio import AudioAsset +from vllm.assets.image import ImageAsset +from vllm.assets.video import VideoAsset +from vllm.multimodal.image import convert_image_mode +from vllm.utils.argparse_utils import FlexibleArgumentParser + + +class QueryResult(NamedTuple): + inputs: dict + limit_mm_per_prompt: dict[str, int] + + +# NOTE: The default `max_num_seqs` and `max_model_len` may result in OOM on +# lower-end GPUs. +# Unless specified, these settings have been tested to work on a single L4. + +default_system = ( + "You are Qwen, a virtual human developed by the Qwen Team, Alibaba " + "Group, capable of perceiving auditory and visual inputs, as well as " + "generating text and speech." +) + + +def get_mixed_modalities_query() -> QueryResult: + question = ( + "What is recited in the audio? " + "What is the content of this image? Why is this video funny?" + ) + prompt = ( + f"<|im_start|>system\n{default_system}<|im_end|>\n" + "<|im_start|>user\n<|audio_start|><|audio_pad|><|audio_end|>" + "<|vision_start|><|image_pad|><|vision_end|>" + "<|vision_start|><|video_pad|><|vision_end|>" + f"{question}<|im_end|>\n" + f"<|im_start|>assistant\n" + ) + return QueryResult( + inputs={ + "prompt": prompt, + "multi_modal_data": { + "audio": AudioAsset("mary_had_lamb").audio_and_sample_rate, + "image": convert_image_mode( + ImageAsset("cherry_blossom").pil_image, "RGB" + ), + "video": VideoAsset(name="baby_reading", num_frames=16).np_ndarrays, + }, + }, + limit_mm_per_prompt={"audio": 1, "image": 1, "video": 1}, + ) + + +def get_use_audio_in_video_query() -> QueryResult: + question = ( + "Describe the content of the video in details, then convert what the " + "baby say into text." + ) + prompt = ( + f"<|im_start|>system\n{default_system}<|im_end|>\n" + "<|im_start|>user\n<|vision_start|><|video_pad|><|vision_end|>" + f"{question}<|im_end|>\n" + f"<|im_start|>assistant\n" + ) + asset = VideoAsset(name="baby_reading", num_frames=16) + audio = asset.get_audio(sampling_rate=16000) + return QueryResult( + inputs={ + "prompt": prompt, + "multi_modal_data": { + "video": asset.np_ndarrays, + "audio": audio, + }, + "mm_processor_kwargs": { + "use_audio_in_video": True, + }, + }, + limit_mm_per_prompt={"audio": 1, "video": 1}, + ) + + +def get_multi_audios_query() -> QueryResult: + question = "Are these two audio clips the same?" + prompt = ( + f"<|im_start|>system\n{default_system}<|im_end|>\n" + "<|im_start|>user\n<|audio_start|><|audio_pad|><|audio_end|>" + "<|audio_start|><|audio_pad|><|audio_end|>" + f"{question}<|im_end|>\n" + f"<|im_start|>assistant\n" + ) + return QueryResult( + inputs={ + "prompt": prompt, + "multi_modal_data": { + "audio": [ + AudioAsset("winning_call").audio_and_sample_rate, + AudioAsset("mary_had_lamb").audio_and_sample_rate, + ], + }, + }, + limit_mm_per_prompt={ + "audio": 2, + }, + ) + + +query_map = { + "mixed_modalities": get_mixed_modalities_query, + "use_audio_in_video": get_use_audio_in_video_query, + "multi_audios": get_multi_audios_query, +} + + +def main(args): + model_name = "Qwen/Qwen3-Omni-30B-A3B-Instruct" + query_result = query_map[args.query_type]() + + llm = LLM( + model=model_name, + max_model_len=12800, + max_num_seqs=5, + limit_mm_per_prompt=query_result.limit_mm_per_prompt, + seed=args.seed, + ) + + # We set temperature to 0.2 so that outputs can be different + # even when all prompts are identical when running batch inference. + sampling_params = SamplingParams(temperature=0.2, max_tokens=256) + + outputs = llm.generate(query_result.inputs, sampling_params=sampling_params) + + for o in outputs: + generated_text = o.outputs[0].text + print(generated_text) + + +def parse_args(): + parser = FlexibleArgumentParser( + description="Demo on using vLLM for offline inference with " + "audio language models" + ) + parser.add_argument( + "--query-type", + "-q", + type=str, + default="mixed_modalities", + choices=query_map.keys(), + help="Query type.", + ) + parser.add_argument( + "--seed", + type=int, + default=None, + help="Set the seed when initializing `vllm.LLM`.", + ) + + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + main(args) diff --git a/tests/model_executor/test_qwen3_omni.py b/tests/model_executor/test_qwen3_omni.py new file mode 100644 index 0000000000000..c92c61dcd3bc2 --- /dev/null +++ b/tests/model_executor/test_qwen3_omni.py @@ -0,0 +1,221 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project + +from unittest.mock import Mock + +import pytest +from transformers import PretrainedConfig + +from vllm.multimodal.processing import InputProcessingContext + + +# Helper function to print input IDs with coalesced audio/video tokens. +def print_input_ids(input_ids): + """ + Print input IDs, compressing consecutive special tokens. + - 151675: <|audio_pad|> + - 151656: <|video_pad|> + """ + if not input_ids: + print("[]") + return + + result = [] + i = 0 + + while i < len(input_ids): + current_id = input_ids[i] + + # Check if it's a special token that should be compressed + if current_id in [151675, 151656]: + # Count consecutive occurrences + count = 1 + while i + count < len(input_ids) and input_ids[i + count] == current_id: + count += 1 + + # Add compressed representation + token_name = "<|audio_pad|>" if current_id == 151675 else "<|video_pad|>" + result.append(f"{token_name} * {count}") + i += count + else: + # Regular token, just add it + result.append(str(current_id)) + i += 1 + + print(", ".join(result)) + + +@pytest.fixture +def mock_qwen3_omni_config(): + """Create a mock Qwen3OmniMoeThinker config.""" + config = Mock(spec=PretrainedConfig) + # Token IDs from https://huggingface.co/Qwen/Qwen3-Omni-30B-A3B-Instruct/blob/main/tokenizer_config.json + config.audio_token_id = 151675 # <|audio_pad|> + config.video_token_id = 151656 # <|video_pad|> + config.image_token_id = 151655 # <|image_pad|> + config.audio_start_token_id = 151669 # <|audio_start|> + config.audio_end_token_id = 151670 # <|audio_end|> + config.vision_start_token_id = 151652 # <|vision_start|> + config.position_id_per_seconds = 12.5 + + # Vision config + vision_config = Mock() + vision_config.spatial_merge_size = 2 + config.vision_config = vision_config + + return config + + +@pytest.fixture +def mock_processor(): + """Create a mock HF processor.""" + from transformers.models.whisper import WhisperFeatureExtractor + + processor = Mock() + processor.audio_token = "<|audio_pad|>" + processor.image_token = "<|image_pad|>" + processor.video_token = "<|video_pad|>" + + # Create a real WhisperFeatureExtractor instance for the feature_extractor attribute + feature_extractor = WhisperFeatureExtractor() + processor.feature_extractor = feature_extractor + + return processor + + +@pytest.fixture +def mock_tokenizer(): + """Create a mock tokenizer.""" + tokenizer = Mock() + # Token IDs from https://huggingface.co/Qwen/Qwen3-Omni-30B-A3B-Instruct/blob/main/tokenizer_config.json + tokenizer.get_vocab = Mock( + return_value={ + "<|audio_pad|>": 151675, + "<|video_pad|>": 151656, + "<|image_pad|>": 151655, + "<|audio_start|>": 151669, + "<|audio_end|>": 151670, + "<|vision_start|>": 151652, + "<|vision_end|>": 151653, + } + ) + tokenizer.encode = Mock( + side_effect=lambda x: { + "<|vision_start|>": [151652], + "<|vision_end|>": [151653], + "<|audio_start|>": [151669], + "<|audio_end|>": [151670], + "<|audio_pad|>": [151675], + "<|image_pad|>": [151655], + "<|video_pad|>": [151656], + }.get(x, [0]) + ) + tokenizer.vision_bos_token = "<|vision_start|>" + tokenizer.vision_eos_token = "<|vision_end|>" + tokenizer.audio_bos_token = "<|audio_start|>" + tokenizer.audio_eos_token = "<|audio_end|>" + return tokenizer + + +@pytest.fixture +def mock_image_processor(): + """Create a mock image processor.""" + image_processor = Mock() + image_processor.merge_size = 2 + return image_processor + + +def test_qwen3_omni_get_updates_use_audio_in_video( + mock_qwen3_omni_config, + mock_processor, + mock_tokenizer, + mock_image_processor, +): + """Test the get_updates_use_audio_in_video method directly.""" + + from vllm.model_executor.models.qwen3_omni_moe_thinker import ( + Qwen3OmniMoeThinkerMultiModalProcessor, + Qwen3OmniMoeThinkerProcessingInfo, + ) + + # Create a mock context + mock_ctx = Mock(spec=InputProcessingContext) + + # Create processing info + info = Qwen3OmniMoeThinkerProcessingInfo(mock_ctx) + info.get_hf_config = Mock(return_value=mock_qwen3_omni_config) + info.get_hf_processor = Mock(return_value=mock_processor) + info.get_tokenizer = Mock(return_value=mock_tokenizer) + info.get_image_processor = Mock(return_value=mock_image_processor) + + # Create a mock dummy_inputs builder + mock_dummy_inputs = Mock() + + # Create the processor + processor = Qwen3OmniMoeThinkerMultiModalProcessor(info, mock_dummy_inputs) + + # Test parameters from reference video + # https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen3-Omni/demo/draw.mp4 + audio_len = 85 + video_grid_thw = [6, 36, 64] + video_second_per_grid_t = 2.0 + + # Call the method + updates = processor.get_updates_use_audio_in_video( + thinker_config=mock_qwen3_omni_config, + audio_len=audio_len, + video_grid_thw=video_grid_thw, + video_second_per_grid_t=video_second_per_grid_t, + ) + + # Updated input ids should align with HF implementation. + # 151669, + # <|video_pad|> * 576, <|audio_pad|> * 25, + # <|video_pad|> * 576, <|audio_pad|> * 25, + # <|video_pad|> * 576, <|audio_pad|> * 25, + # <|video_pad|> * 576, <|audio_pad|> * 10, + # <|video_pad|> * 1152, + # 151670 + print_input_ids(updates) + + # Verify structure + assert isinstance(updates, list) + assert len(updates) > 0 + + # Verify start and end tokens + audio_start_token_id = mock_qwen3_omni_config.audio_start_token_id + audio_end_token_id = mock_qwen3_omni_config.audio_end_token_id + + assert updates[0] == audio_start_token_id + assert updates[-1] == audio_end_token_id + + # Verify both audio and video tokens are present + audio_token_id = mock_qwen3_omni_config.audio_token_id + video_token_id = mock_qwen3_omni_config.video_token_id + + audio_count = updates.count(audio_token_id) + video_count = updates.count(video_token_id) + + assert audio_count == audio_len, ( + f"Expected {audio_len} audio tokens, got {audio_count}" + ) + + # Calculate expected video token count + spatial_merge_size = mock_qwen3_omni_config.vision_config.spatial_merge_size + height = video_grid_thw[1] // spatial_merge_size + width = video_grid_thw[2] // spatial_merge_size + expected_video_count = video_grid_thw[0] * height * width + + assert video_count == expected_video_count, ( + f"Expected {expected_video_count} video tokens, got {video_count}" + ) + + # Total tokens should be: 1 (start) + audio_len + video_count + 1 (end) + expected_total = 1 + audio_len + expected_video_count + 1 + assert len(updates) == expected_total, ( + f"Expected {expected_total} total tokens, got {len(updates)}" + ) + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/vllm/model_executor/models/qwen2_5_omni_thinker.py b/vllm/model_executor/models/qwen2_5_omni_thinker.py index 262ea771d9cdf..7506ee8656fda 100644 --- a/vllm/model_executor/models/qwen2_5_omni_thinker.py +++ b/vllm/model_executor/models/qwen2_5_omni_thinker.py @@ -23,7 +23,6 @@ """Inference-only Qwen2.5-Omni model (thinker part).""" from collections.abc import Callable, Iterable, Mapping, Sequence -from copy import copy from functools import partial from typing import Annotated, Any, Literal @@ -387,15 +386,6 @@ class Qwen2_5OmniThinkerMultiModalProcessor( self._validate_mm_kwargs(mm_kwargs, mm_item_counts) self._validate_mm_updates(mm_prompt_updates, mm_item_counts) - use_audio_in_video = False - if "video" in mm_kwargs: - video_items = [item for item in mm_kwargs["video"] if item is not None] - # only check video items (if there are any) - if video_items: - use_audio_in_video = all( - item["use_audio_in_video"].data for item in video_items - ) - if is_update_applied: mm_placeholders = self._find_mm_placeholders( prompt_ids, @@ -404,7 +394,6 @@ class Qwen2_5OmniThinkerMultiModalProcessor( self._validate_mm_placeholders( mm_placeholders, mm_item_counts, - use_audio_in_video=use_audio_in_video, ) else: prompt_ids, mm_placeholders = self._apply_prompt_updates( @@ -414,7 +403,6 @@ class Qwen2_5OmniThinkerMultiModalProcessor( self._validate_mm_placeholders( mm_placeholders, mm_item_counts, - use_audio_in_video=use_audio_in_video, ) return prompt_ids, mm_placeholders @@ -640,19 +628,6 @@ class Qwen2_5OmniThinkerMultiModalProcessor( return mm_processed_data - def _validate_mm_placeholders( - self, - mm_placeholders: Mapping[str, list[PlaceholderFeaturesInfo]], - mm_item_counts: Mapping[str, int], - use_audio_in_video: bool = False, - ) -> None: - if use_audio_in_video: - mm_item_counts = copy(mm_item_counts) - if "video" in mm_item_counts: - assert "audio" in mm_item_counts - mm_item_counts["audio"] -= mm_item_counts["video"] - super()._validate_mm_placeholders(mm_placeholders, mm_item_counts) - class Qwen2_5OmniConditionalGenerationMixin: def _parse_and_validate_audio_input( diff --git a/vllm/model_executor/models/qwen3_omni_moe_thinker.py b/vllm/model_executor/models/qwen3_omni_moe_thinker.py index 61f218f16d79c..f5f88f66eff91 100755 --- a/vllm/model_executor/models/qwen3_omni_moe_thinker.py +++ b/vllm/model_executor/models/qwen3_omni_moe_thinker.py @@ -68,11 +68,11 @@ from vllm.multimodal import MULTIMODAL_REGISTRY from vllm.multimodal.inputs import MultiModalFeatureSpec, MultiModalKwargsItems from vllm.multimodal.parse import AudioProcessorItems, MultiModalDataItems from vllm.multimodal.processing import ( - BaseMultiModalProcessor, MultiModalPromptUpdates, PlaceholderFeaturesInfo, PromptReplacement, PromptUpdate, + PromptUpdateDetails, ) from vllm.sequence import IntermediateTensors @@ -87,7 +87,6 @@ from .qwen2_5_omni_thinker import ( Qwen2_5OmniConditionalGenerationMixin, Qwen2_5OmniThinkerDummyInputsBuilder, Qwen2_5OmniThinkerMultiModalProcessor, - Qwen2_5OmniThinkerProcessingInfo, ) from .qwen2_5_vl import ( Qwen2_5_VisionAttention, @@ -807,24 +806,8 @@ class Qwen3OmniMoeThinkerMultiModalProcessor( else: use_audio_in_video = False - if use_audio_in_video and "video" in mm_item_counts: - assert "audio" in mm_item_counts - mm_item_counts["audio"] -= mm_item_counts["video"] - - # Special case with `use_audio_in_video=True` - if use_audio_in_video: - if is_update_applied: - prompt_ids = self._get_raw_input_ids(prompt_ids, use_audio_in_video) - ( - prompt_ids, - mm_placeholders, - ) = self._apply_prompt_updates( - prompt_ids, - mm_prompt_updates, - ) - self._validate_mm_placeholders(mm_placeholders, mm_item_counts) # normal case with `use_audio_in_video=False` - elif is_update_applied: + if is_update_applied: mm_placeholders = self._find_mm_placeholders( prompt_ids, mm_prompt_updates, @@ -834,10 +817,24 @@ class Qwen3OmniMoeThinkerMultiModalProcessor( mm_item_counts, ) else: - prompt_ids, mm_placeholders = self._apply_prompt_updates( - prompt_ids, - mm_prompt_updates, - ) + if use_audio_in_video and "audio" in mm_prompt_updates: + filtered_updates = { + k: v for k, v in mm_prompt_updates.items() if k != "audio" + } + prompt_ids, mm_placeholders = self._apply_prompt_updates( + prompt_ids, + filtered_updates, + ) + # Derive audio placeholders from video placeholders + mm_placeholders = self._derive_audio_from_video_placeholders( + mm_placeholders, mm_prompt_updates + ) + else: + prompt_ids, mm_placeholders = self._apply_prompt_updates( + prompt_ids, + mm_prompt_updates, + ) + self._validate_mm_placeholders( mm_placeholders, mm_item_counts, @@ -962,7 +959,9 @@ class Qwen3OmniMoeThinkerMultiModalProcessor( def get_replacement_qwen2_use_audio_in_video(item_idx: int): nonlocal audio_in_video_item_idx - audio_num_features = audio_output_lengths[audio_item_idx + item_idx] + audio_num_features = audio_output_lengths[ + audio_in_video_item_idx + item_idx + ] video_grid_thw = out_mm_data["video_grid_thw"][item_idx] audio_in_video_item_idx += 1 @@ -971,14 +970,17 @@ class Qwen3OmniMoeThinkerMultiModalProcessor( if second_per_grid_ts: video_second_per_grid_t = second_per_grid_ts[item_idx] else: - video_second_per_grid_t = 1.0 + video_second_per_grid_t = 2.0 - return self.get_updates_use_audio_in_video( + placeholder = self.get_updates_use_audio_in_video( thinker_config=thinker_config, audio_len=audio_num_features, video_grid_thw=video_grid_thw, video_second_per_grid_t=video_second_per_grid_t, ) + return PromptUpdateDetails.select_token_id( + placeholder, embed_token_id=video_token_id + ) video_replacement_fn = ( get_replacement_qwen2_use_audio_in_video @@ -1004,14 +1006,50 @@ class Qwen3OmniMoeThinkerMultiModalProcessor( ), ] - def _validate_mm_placeholders( + def _derive_audio_from_video_placeholders( self, - mm_placeholders: Mapping[str, list[PlaceholderFeaturesInfo]], - mm_item_counts: Mapping[str, int], - ) -> None: - BaseMultiModalProcessor[ - Qwen2_5OmniThinkerProcessingInfo - ]._validate_mm_placeholders(self, mm_placeholders, mm_item_counts) + placeholders: Mapping[str, list[PlaceholderFeaturesInfo]], + mm_prompt_updates: MultiModalPromptUpdates, + ) -> Mapping[str, list[PlaceholderFeaturesInfo]]: + """ + Helper to derive audio placeholders from video placeholders when + use_audio_in_video=True. + """ + if "video" not in placeholders: + return placeholders + + # Validate audio and video counts match + num_videos = len(placeholders["video"]) + num_audios = len(mm_prompt_updates.get("audio", [])) + if num_audios != num_videos: + raise ValueError( + f"use_audio_in_video requires equal number of audio and video items, " + f"got {num_audios=}, {num_videos=}" + ) + + tokenizer = self.info.get_tokenizer() + processor = self.info.get_hf_processor() + audio_token_id = tokenizer.get_vocab()[processor.audio_token] + + result_placeholders = dict(placeholders) + audio_placeholders = [] + + # Each video is paired with one audio + for video_idx, video_placeholder in enumerate(placeholders["video"]): + # Create is_embed mask selecting only audio tokens + audio_is_embed = torch.tensor(video_placeholder.tokens) == audio_token_id + + audio_placeholder = PlaceholderFeaturesInfo( + modality="audio", + item_idx=video_idx, + start_idx=video_placeholder.start_idx, + tokens=video_placeholder.tokens, + is_embed=audio_is_embed, + ) + audio_placeholders.append(audio_placeholder) + + result_placeholders["audio"] = audio_placeholders + return result_placeholders def _get_raw_input_ids( self, @@ -1454,7 +1492,11 @@ class Qwen3OmniMoeThinkerForConditionalGeneration( ) if not len(second_per_grid_ts) and len(video_grid_thw): - second_per_grids = torch.ones(len(video_grid_thw), dtype=torch.float32) + second_per_grid_ts = 2.0 + second_per_grids = ( + torch.ones(len(video_grid_thw), dtype=torch.float32) + * second_per_grid_ts + ) else: second_per_grids = torch.tensor(second_per_grid_ts, dtype=torch.float32) From 97588c4d1231287eb380cf9fb95ec77f88479b85 Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Mon, 24 Nov 2025 11:28:56 -0800 Subject: [PATCH 06/43] [Model Runner V2] Add minor clarification comments for Eagle (#29332) Signed-off-by: Woosuk Kwon --- vllm/v1/worker/gpu/spec_decode/eagle.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/vllm/v1/worker/gpu/spec_decode/eagle.py b/vllm/v1/worker/gpu/spec_decode/eagle.py index 0f11903e14540..59d0f313d96a2 100644 --- a/vllm/v1/worker/gpu/spec_decode/eagle.py +++ b/vllm/v1/worker/gpu/spec_decode/eagle.py @@ -65,6 +65,12 @@ class EagleSpeculator: # [num_reqs] next_prefill_tokens: torch.Tensor, ) -> torch.Tensor: + # NOTE(woosuk): To avoid CPU-GPU synchronization without CPU knowing the + # number of rejected tokens, we maintain the size of eagle's input_ids and + # hidden_states the same as the target model's. This means, we pad each + # request's query length to include any rejected positions. By doing so, + # we can also reuse the attention metadata (e.g., query_start_loc, + # seq_lens) of the target model. if aux_hidden_states: assert self.method == "eagle3" hidden_states = self.model.combine_hidden_states( @@ -110,6 +116,11 @@ class EagleSpeculator: # NOTE(woosuk): We must add 1 to the positions to match the Gumbel noise # used for draft and target sampling. pos = input_batch.positions[last_token_indices] + 1 + # NOTE(woosuk): For draft sampling, we only consider the temperature + # and ignore the other sampling parameters such as top_k and top_p, + # for simplicity and performance. + # While this may slightly degrade the acceptance rate, it does not + # affect the output distribution after rejection sampling. draft_tokens = gumbel_sample( logits, temperature, seed, pos, apply_temperature=True ) From 4d6afcaddccaf281385ddfa7c6078916af7d9d20 Mon Sep 17 00:00:00 2001 From: Benjamin Bartels Date: Mon, 24 Nov 2025 19:40:54 +0000 Subject: [PATCH 07/43] [CI/Build] Moves to cuda-base runtime image while retaining minimal JIT dependencies (#29270) Signed-off-by: bbartels Signed-off-by: Benjamin Bartels --- docker/Dockerfile | 16 ++++++++++++++-- .../dockerfile-stages-dependency.png | Bin 134558 -> 149377 bytes 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 1b937bbc1225e..e03b9989a190c 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -20,8 +20,8 @@ ARG PYTHON_VERSION=3.12 # glibc version is baked into the distro, and binaries built with one glibc # version are not backwards compatible with OSes that use an earlier version. ARG BUILD_BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu20.04 -# TODO: Restore to base image after FlashInfer AOT wheel fixed -ARG FINAL_BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04 +# Using cuda base image with minimal dependencies necessary for JIT compilation (FlashInfer, DeepGEMM, EP kernels) +ARG FINAL_BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-base-ubuntu22.04 # By parameterizing the Deadsnakes repository URL, we allow third-party to use # their own mirror. When doing so, we don't benefit from the transparent @@ -328,6 +328,18 @@ RUN echo 'tzdata tzdata/Areas select America' | debconf-set-selections \ && curl -sS ${GET_PIP_URL} | python${PYTHON_VERSION} \ && python3 --version && python3 -m pip --version +# Install CUDA development tools and build essentials for runtime JIT compilation +# (FlashInfer, DeepGEMM, EP kernels all require compilation at runtime) +RUN CUDA_VERSION_DASH=$(echo $CUDA_VERSION | cut -d. -f1,2 | tr '.' '-') && \ + apt-get update -y && \ + apt-get install -y --no-install-recommends \ + cuda-nvcc-${CUDA_VERSION_DASH} \ + cuda-cudart-${CUDA_VERSION_DASH} \ + cuda-nvrtc-${CUDA_VERSION_DASH} \ + cuda-cuobjdump-${CUDA_VERSION_DASH} \ + libcublas-${CUDA_VERSION_DASH} && \ + rm -rf /var/lib/apt/lists/* + ARG PIP_INDEX_URL UV_INDEX_URL ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL ARG PYTORCH_CUDA_INDEX_BASE_URL diff --git a/docs/assets/contributing/dockerfile-stages-dependency.png b/docs/assets/contributing/dockerfile-stages-dependency.png index 57a33524a5169c8b56c7de309cdeb243ab3fa918..b327eb2151f50e4d682fe533fa57e12f0b6118c2 100644 GIT binary patch literal 149377 zcmbTecU;fw|3Ch;j&sa|j8Ji;5(@2|nHMb$?LjJrx}7;?94 zxw1T@6lw+Uq3jsCY|ri$vUCZkh0n%sz#4ZjW^eY2M8X~Te#5V z<&G7H4)5O^zDnxQdFoKGWJK}AR`U*)+Eq)cmCtJ#$Jb8?O-v3ujppq5R^PV6yP(Aa zW0~`%>UTtiAz;o2_@nN@kk^0w@=@FhjtldD`6Oz=G5=papgLVwv;Xo*j&Sh*%Vi|> zOXvN(jw^TT)JS8=L~nM`)urnVKL2y|aNfML@|>KUA3|mPo}VjY>#j?YE4aV=mit1= zWZ^x*pT8T-te$M5#(eS9C28M%+sne_N^Udq>@)wE@mO!3$)4mov-GaC&X^k0l!kq0 z-g++EuxHDijeMNK>!`R(vlCUib&_vTnEf;LR%Q_7B_hm>o>qg<2mx zH>xla3tLF>Z<)IY^7m{TodopW+|G}6nd;o7k!<3fmt1QUxoi{vu2C(!6P;BVLJD>OaYl$g)v$8c{obv9@2 zoa#D7#>5nF_tB(V77MZ8TF)J$W&R3f*1>ACZ?o3Oi z7gM}jp6qm!{keG5rdu+9>k#P#e8f4Lr4 zkuMMXg5Q>f?>>M3*i*6uzWk;SF|q}FEt=nok~tZCf6$Ay#X<QGFxj5IY^P#uv9shV8tndWSFe%=zX5zXJ0Z@gzW(7K(LX4OvrV+~u# zaUZTl>`XR3TJxiQ)%>h3w~>sa~W3@w&_yE{VGGXY-+Zz6dROJZm_9i&BuB4$g-{2m-n1PZIZDMs~vvI zqHu2>gZ~mLT<(`C-#u34GF3G>R^lStKjS85T9>?Aa4psOVTDMSr$bLFyPsLwIeN3p zbeoGs?pl@E3vmB>fzhCuu^>&X9F5*urnTm#?d3PLM``L$#V{5_BwGZ zD=RMwn@_og&YcEptNIIh`pB#_daujOgiuHL6Sw6HCAX5GL12i{`u|zV-8?x^!Y#Op zl7Iex!ee7Z!%)sG2m3MahCW(0G$48 zTJf{YQpG)&@Xe! zCpu8bla%K)KDcAg9~g;5mQM9cMv9;7mqptc=rmkY+cdi|7P?-0EakzRl!u>)yKv%{ zncCwp+{H>mibjf4IKPqFcsaoKW9slIb`=*c|G%?@nu&xbB zwM_sj>mDqmR2zRC>SU&4S(rx*P7A4O!BM|$KLUXK;r|zknNZ`y z-F^zs?%|*PSAJW|WEddQ{k+Ub#~ia!3NZU=&D;nf*ZhA(fJ*C_$Z!Am zL0Q?mh`(-^R=Q1*O7?gKsYHx>bR~~H{UGdUO;K^v_ujQwq<;PL`zh}<`Z#pf!c)JQ`0 zM_xZy9$spw?a=5n(lXIFS5rWR2A$NmSXNKT!f9&2m!JhVC0HbD^83q)k>N7W1sg2<(I!=5*8~6u%>zP)gdG?f1 z)l6@!T;0`y@y^(i8p9whhv|W!8ByE%3sMC35v;LW{rS<}kD2&*EGQKhS5Q;)WhEG@ zShzDvu4t5drk`6a{`6t`nZgHqQ@auhK4iPhJU43)2J(JKHqYksIrB)-l@)FeDRIoK z(s6O>?i3sMciRX?)Hd4W5i7f6fb7*;97`V`4iZYFd9l?v z^k!zCHKC~R5zbpHzK=3a9JRM4I_2XrdKBs$kh3h^5jT*nOE&G~b?9%--{Dh0qa&|} z;2P(uVziQL4E)lrf0zFwj{y&i%`%57E1VednA|@fbLhihd)Ywt%tZFs*C!r? zb0{e*>(?acr+&J;^aFC0a8Y+-_Do)-d1}jj#{s{_sN>CD*)!7!s1{CR0?fI9e1(g| zP!#(C*h5f)&ec9Y_fWt*kV`AA*(ZBaKcE#zk~JT(t30ttfc7C)CmTRubg5X1VM$=p z_F^D2QLOJ+hf22k#3-&uFAflCLx9rg^;Tsv1kYNu`WhOHe*63^YVfWNMe&mT`1OcHaG0vl}SSru;ELbV(StchjrPnDjmM_<#^u+|bmOVyA zMJ2Ycuq1(B+;b9<+k)Uw*N--D%(4!g=+v1J&YtQvFN;u)Bq5DNEfaY%x}RmHkA>|2 zfVKr-`LB?ePYU(cCUvfNHoLxp>7hHA5Ng0wiF38N~ha&e?WTW&bYD?A#vOGqn)Am0Er>&`=O0R_7mMH63N3! zu}-@^MEj3A4?bbh?=3H1n|1F)#$Fi1){E7IaMN8L(XmW)qaYpym zzr3=z4ZJ)OxWTlJU*;h%&4){?W3@nlh;1=~{A3toP%H3|4!b40T%e{y#>KubQL6EO z?K~M3K2V?CdwIY^J;iM5S-Q9L2~Z&OEG%PHU!zTo|jVXEfr#|U&$KsvLSjQ)4GHw#%rx6G$p zAOBE9a*lj?UM5>WB0_B_Gk4KwGRu7D5`lJh)h-9cvttmR4IFhCm{jm z0nHTf*Wn*2wBs3WM|NC30^p%LlZXN6UQ}7e?v(m|J!7z-=(0jq5`C6mA6>7sTakON zF1ee%Cp<0GrbgdqvqM+BuYkGsN4vHF!Xe3@3Hb|4EfF1%&g%D$B?8A=Jh~sUpD;;V z?!p}$B98MZNv7w* zCkwLAG{3!TJk+}rv+xtJN{S`owuXo?wS+UY?poWdfdFhD59lPzSj0s&Op}y}(gOS$ zQT$YRl=aGQ%ijg>zfw6~kLn`E$H!;a8J*G(0NoOw#~hg%L{zM()Q_1Cu@=)Ek+CsV z69Pu%99W<8*OqU(AM4a@Y&TdctimU0 zy1>VaPl_$2K-eK3aXmS5XLic~%{)b#w+>v|c^t$%fjmIL?iA@~N)cL(27aa;ytXax zyrBWq+Si!=PEj3C#GS@NtUD`{+K|nxjEs!3z72rozK7O=e=;Wf3nE2rh-RV0sUPGt zJXSA+GEfABko~R}A=ZD@k^+%F^Mi_qKhZC^-*R=m_FJN+U^x0YuJf!PreKhvp`mQp za4}O!5LsfMK7W6HIT4VqEk8~tn~msLo6Ixr0)}PDTF#7DWrxZIiPPwamVICr$5YH2 z`sHQ@<=U%abpk9GP&wlt!cRQmbN;x}`P&suq$C<0*@hjlu1jQx5~rTaiC9y9rEoOdzA&4VYNCU-LRG+%u z0yQskpo>I*qCwHSzyDa%Pm(!6r9FUA);fh(*PdX~+v`&;hmoRr0sLeWwK1aFCcw5< zAnZcEDoro^xss0!F`z=2dw1W5r^h9RM`Z*Wl+yKDG2+a3dSQX~Mnqe5nC_jK?$!Ak z2!^G7f0vqqNK;v%p8)qt(Qi3z#eQNA!<=pD-N|)f+7lkJDQRiK$W*E=eW=Hg!$W}< z?{)+^n4`Y0|4{~Xmt!>x{e^e{RAgmjWV*+(JW9Hpx-us5x$6k3OoUMK^x*XsNu2<^ zyZ5ZcII~B~J->I2( zk`kRm^Rb``YF6I^{cN}m!~-x>ks8QdiTH1rMv7T`O`>6GylX`{iv3_`RqQZ9M|SN` zeL6J1_>0(@xr+@~4}LA(h6$4PU>3s+`|ui-hsWHV2cB_fv(P>jZ99E>GuF45+qv%+ zNim>Ug)c5N86AHJne1p=ae&0|Brvfk(o<1;SWqCuMG{=Igb)z3T-uy~K0 z*pO08T1Uj>^hmZG-3me~vg1J8bkqa@%61H*kkQcp?xTa(4UnW|ut$kVOaz0}Z~rVE zfathX<}5(SeQupV?M!=ep~HEX)Pd3$Gg-;^fX&NghN9%y zAya=qRg=qXMRCZdu?O7Co*s$~awxg7LYM=y+rpV%JM(1i&Xdh4*;A%mXr}sceM!VSuT- z)Sq*p+~-4<^dTT%uuQJxq=xBGswIKGB0Vp*HRXAfls{AC!OHAXOJI#wPkJ|8XC4&m zJQO(wEHZJ`?Y@fuc62@ThcWOE|7<*vU2o<{g5y~KbCa5VbtZ{2gyRxzyvAi_O2v5b z1hPR3QKvzw?vkWLOUPZPamGVz_$`?+=c#e0&uVd8b~BJY=szDc3z5=_Wz^D-hRo~B zV<@}-rt6(tq**#6k3sqLLx?nikZ?iVL%yCxmE-tK#j*W6Usn31-<=g*tvch_OhR8# zYr<@MKenUp+0>p3gFUh75%=A5j*BbPe&`r3G|$H!@Alxkzj@`;0IjC>LEUUb{&`I0}RP` z?yS+%WC}nD(=c^$r2w*rNw6bBtYcc_`Ws}#wQ5<8cJwpwVRl1vi4*@pXXMS7Z5E76 zew~U%3~`y6t^wsOL+!cjHLsinRl(8*7-ci)Dvb`%c?MtB|lwPlR-&e&}61&dcm1DCqFFL=KNIu1X9cmw`7*RUIaIbqhF zD&L7x)t*yMi@U(wpU#3?NLQ}oW_XySJQo+1c@+POwTBmfPYSQT;r3yuyRtmceIIP| z5LLLeCYT~lP8GH-KoXbJ#BdQwQu$u&>zS5P9Zw0CqLJ9nKrLGoHZOSV#m`sQ8+pu3 zi_fP_Hvgz7s@s4lOPce&EZ;C1h9JrawItDrD2w}z2y@zhustbH%zlt#Rti)8q2st-GXyuNfkKdQCZDcc83qLuA-(?I z5?8(o$OiT$7g<(=5&*~ zQD6u|H7eN&-!D=n*0KG#4kwDAam0nDH?y<~(2SLAcZ->+k)5pza>7R6sbncQOwN+q z|D@cG^p1`W;sw$Ox3%i7O@c_9Gp_WD>iYhhW2Z*rJ2pQPI;jZngZI|}MiY0Diu(w~ zg3AsH4GAk0`Vya7>f5vF!7vGZ)Ej9!p~!^*LJH^qqIAVJ{&YSnZ>;p2_swqIE>%yR zsSXuJJ|m@n^)@9dR!Y^F=r(OZF5=KXeFZO^b{G6|gXpJncbG(yhcANpLb#>e{uu)xwMUX&@Jz}x-=6g}{K~8|K zsqhuOSV_8_w|KKW8LE@wcNw($DE6iVNT4+#s#C*tv0nvYtCGr6+eB;zvXPJ6-f$N% zBsxclinGC<+e7RxdNUx|+d^NSrLqepod#hBZbd}c(3|0A7NZht|Idwb08+GS;8>Qy zBP10*6=`eW+pCqpQXh8+Z=^Q8IL?*ITBT_zbHshKuMB?;D=h&E`uyEh& zH3O)x+i7GYEMpdZfo&>z_n7{S{eQJnp1UYeOoRYgFd++M^|efksgA2GQ(vW0zqsub zPj-eZs}B`-=ayB}WDya$u-w~OoCZ(8bm4)2-vwU^W{Iv?I@0Gr3G87S9lD4pk72R& z#BldgS@=_De}EfFF;b?9mkmN#jAW|Ff3f2bm-*GuvCdr zx8#RWCm-YwzK}Zl<(7(CB(N}I!wX0WNtWseBLXp!fHXF}JHXQ8CQ33QGLi>f4YS!U z0h?7HEOde#5_;L7bZVcQ;m0#JzQi05*(KkyyFkjh$_yqNjomc^UTO9H0Q zS+V~v0>EE``1pccYBy0`FzkW1+dNol#BLxv6s+^$?4`~;j~)2oaq%0)W>4Fbx|o3^ z1(2Y-AfZHjqB+#hEMm7Qky%JaCj^jK(AU5&cmQe*O)n(^8vqB=v1z~U{Q-vnoVy9^ z^fe6h1@&%OEHSztlYIiUfd5Y-G_XlO@VviUgN?=}>;Hf_z##J6$a&U%cy8i35+9I0 ztE<2*3jz~Z>%R=H;E$?bNhg)gUoigb1}eu$75aZndQTcF4FTee8g&MQq5#uKx+l&H zQ~V1FW+(T8?8ZhjD0=8kY;{D`F5EOS5=uArW+8&c2jrNImS)ra5GjBc%xM?EqrA=! z2v%N-WwQj)3k3@l`sBKxKWD*82Btr#(aYF)WU8C7zC}mUfTALs}q<(#TtDkrWDC-4;PL#=tM1NJK zOmwHS6zhnPRGWlu1F`Sz9kmDkq=pLU+mBoHq_%~UPKSN^$LJxCAO9sWGge}Wuzd=k zwupTpJkwun^5CuNNCzZdXK984r#ERi@<;t3&qEqQ_>>hDz1)^GdaTSz8-Q-l&mQDx zK}?(4BzRDJiTt<%Ju{@Yrfi-kD=AhLDmnqA4+g~~QVUWv2vzIDE}-^Bp$n*uo~6@I zSN)4Hb(cdB{)=4jA-YPy?}ZV(CRp$A9C)Vwq$16#W<+Zfg8yh-MM40zb{CR~9vGHR z3&MEG6{$?=@(sxwA>2n1nKN_|Eh5smRiS~?8!>D$}k zS(FkIeAbm25M;8MT_PXO*7JAv;4J&l*+Y~%7&fyVJjZ^I`|JQPyAL~+Kp7$xA!V2N9!DP5Jh)he4E~QMi{Mck9 z#Srxxd;D-773@yX9g!P|Bn1Pv4A9rwxn#YL<6Z)q%(BK>Q7>!*8dth1L&@8X%ykC& zl!a_#4%FE9-+pW(h&nQ4iN=RIeR`dG9fU^eEz5vL0Se=;5hsUa;|Q_X@bKBjF&K>y zvv@HM89X2H9f@&41VALHhs08Xq;{?$&6>aFj2E#!0W)aiC(&_6q-atXx|8tp_{a2Q zsx9O-QjjqH(t)BvWFUY;FS1;gQv4TDvW^nh6rC2gH|+fj7u_kmnulj`q_<9erxq!8k0kiEZAz)Tak zPIrMkb{R;q22Mm!{mCbQlcU5)vWJnvmvusFtN0?!{OduO;p^xu4vbL0uw+E^^iP*xhT zdjauV?`xO=0Rk5Az{@qttZxG@wFl8;1jwaam*4o_>*IQ-`1Q>-1e+)+jegX?6~!Bg z_kxNOl{iMC;lW-#qE$pEI^dh~H-6dR-x!ey5-Lt=v_Krx?o32FZL7B^*iG6_s>Wfu zQcDDCtKR)=Udu65g;0wJ&#G*LZ`D`?t>+P$__%qIyME~P6cuC!!5s$U32xUR)9J}E z!o%L&+2n&pw`Q_>#)-%{0g*9joWzK&s+GpAI@o6tzLR)LUJmHkkZ4E56VsXevi=<8 z0Ma5#LbaeeUXKTDNYZ}%yIU;mq8s%_8f;5|IQ^lj#17TqYUL7hGTBf$>9KsK7-j(c zPMS2QC$eWmAkf+Z$n$RiEH5OJMv4;%A4~t6P3DeYtclHqOv__j86^r3ZEO__7ix{H z;8qw#C&(V~sm6_Xe{_vXUICDWvF+wQkBTeC_{k0=@`|?eSc_w~u}%SWAb(P0NWK(i zmb@}ZJ16G*6-x4G5#JJBhLaXKdfLBe&n?Tv~uj66HDe#LYy&e!@4&dZ)d? zWCj0EZ*5jMB#pLvin95meNAO}V1QRD%92P~uw(qLGuv~}K0t^Tof<4V_c6l`;B2Ei zUc&x5rLK@ldz1rJjC{$etiSTp>gzewq+KWUOPa=$+o7HgV&bj-x2uu zUcJjeiqA1b8$bp2!1`>t!2xIe_(InS|!K-Z_AbNUQ+>S}}ZR zZi7mXTwHDp!cG)tk}B&_tHHab`)^j6BGnV{vk$$S%b0|DAy$sp*G!EMO&mzS0A-06 zJWiH)x5yPy!^Rkj?v*`h2|uz+P#Xy2+f`DV?~*j-Jp?bw<)qb;Xc5f^c{+!XHc!ipy@q;)9Uoa8YA)H!BmiCLI0>6=T4R0rsvzF+$ZZ-8^WZ) zrb!8b_#DZ%iR@30AN$k3SEF(KB&jm2G(Zc>my6an!ydS+MVj^aEg&VRqm}d?VM7Rp zBDb|W#chaaPFgw0&49Xfi76o5FaT6AR%p?BSBlve?0gprV^o#Cd#GQl+s+s=gYlI;pcRr=1m;d}80s7mq>^EcDxDl}B%QsRyhs3G zr1{vXmU&FJL>~;gh$d1~AJT04`o=0oVXLzbdCw)t_5+2HCxt3&*R9@|@hn|TmVF(}wp)i3!M@A1A6#M&)U12w}fxW)N}pY^B8?> z>ZONMb-7Hyy^+o|3oVhv5yz2_MQkFCHK}comy;$ZC(F~Ujrq>L&-r`VynBZP%DgCGkFi*ZU3P)7dgc!u+#m>Y^}zIjg8z;O1DKmPBnLsnK+`??Ek zY-~m-h_&gqvDN`TK0Y!h!h(W=IF-WXJ=^DVyaAwj>goCC)oa&`01(qG+hiYZU}tAf zGHXzO`SN8)T}rG`MTGu=|L!9u{By<0oh0`@w;wVZ`dWIBJ_r`|3`!Qc5hVF1P-_nE z*s+7r%s5`PJb3>CT(MTY9Fu-rNJM14(m3T7ckdnpljz0w>Mv3ujaxNkWMq`5S{g3; z?Y9UlDyK?}<~&1keua|E@;kEXKS7B69bXIwA$KdFth3Ulr>E7^ZK8*=!oxX?_qRbF zc#a+~zCw0hJ$?L3{MK8+t_4S?mf-)bY9C@-uRN#>HRm?53c<+r=faL6J*T(kr5S`-bs#Q7HQEK6@x101XBa5T zQ92O&69jR~#VFthW29+%2$D&JMaw%o`Zm{pc8iN^*QJ=N96fr&d9qJR>&5x44KIYP zJHi2CmMmL#FFQM1Q%h^9pyKRwFM0ZFLC(#vX-?5`2{KTujrDwy7 zL!IM$t;aKanrmumGGM18C3b+LR(eQGheObN7=X4c$(yAiv^A+|X`EZPZml}(Z>RR> zk{rkU{Ctw!4jep~bUJs*6darOH_8!8w0ZO9Em^WeK~j>k{1;9px{~{Ux zaZ8Dd+rxM6!=d8hVgsmpl>8Ub(N9-MSITB4rnO8=y9ESNHs2x7k1=rW-Fu?QPe4Ug zRh8Lr$(HQg*RKhJgn)4Kz<~n{jEuf!ny~N~Hf#w0^hr0#wC?1J6)P6`@0h(`?i=2@ zS>vrO8YuRN5M^z3B&LwulJ4cT7LiHRE~%y$FJ8dPI3jG(d;{{1z;;&Fw-pt99*K)Q zt7L=3qlY@Hc4CUrVXk=k^y!Biq@<*h&XpZO0NcZ4uZTN!aLqnk9JTu5;~R5YGRZ^l z)zQ)E>;|cCdvQZCP2*4d=C`Z1Z{OaI`f86O83TVErtCPjZ$G?vC9@LZluYbwWDFJe!RK$>x>bfY9TI;? zv|3q?LJcnv$UFAzkte&ZI{t}&K*0C7KgRrju2<0cS8>M1rIJ0G={7Qe(I?KH4L=#J z{`A3v2fTYR&t&A$SN(wK_HF%l^Bm*o7;VF)J?cB9X`r4_tr{kN??!wPm zUcG)@Bi$4|i;CAyh1|SgYpa6Pu$F-#1RCBm{0`fh*(6N$Jo?}16vA`{NVsUFX7y=}AU<5SMOp`Hd26}@Qf zO!W^9m*w3AFV%}!cibZpj!`*c|v3|hMipb|OZy9EQmd?&dC>sSOC2}A!Cs981<5fT0_D;L8#?=YG0UpmrY9nmzG!*w=YU49S_Ew$Q zXJxps?$w@UiC(jRnf*U`Ud>dC!%QMJ%r1!t65RY`ov_ z$U7fybH(x+%!Hkt-3}okHRJ(hO-=ue{3hYR4NqJH=gANHi#ePG?pDS|Btp#V*ROf^ zy1Tm@m4)6RWjfu~oX@Pj`oCN%Den6VL`X+Rl@v1Q3lQ9`w{joDRyf9;@$GWWIgDw` zVmh79fP@3JSk;9jnWrHkXVSFLhS%KI7KSp<`PW|uu(jOW+!UavF5$2Y{1O{sE2CQxaL?Es?H%$SRz#5T;$Mz+)i|T&| zf^7Zu{Z=$J>r9OFR^d^t0iSlI)FHEjU?@_I^RFsmZtx(FkUxl$x1L*DTA;YDP6N;f zrBeyisrkzng&bexaeS?j|8B+MFI$X3%#{G;&uqAc`kLf4VIu}T)gX){U`LzbF17BD znHo4`QVM^Z3@tY|x8`+WpqK{mGVhLqIijadog%%rfJ|?AUDm@q{S*ws$bs64^kr20 z{MjFB|K8W@#LxSBt~qhy1S+rmnKL0~A2ZI`2uo&7pidT1_Ai<}h&U3=8H*zv?{=-b zg^6ovZ-0h;^$ZbYfJ3C)0V)Knn%*oweB=n{zI`9MtmGGNXJ_w3CyDTdFMt2@>Xr52 z*Arvo<3?y9HVFHzS!D6z#huP3fOFQ^Ap}3q)?GQ#zQ6y=0aziwd%H0qOL$t_C{N30 zZEX+@_KG}>nloFT-mhODGHpmx-XvgF4snxV-MUb$vM)+7!zO?SrCaNEH)H$2+YE<_ zU}0m!FS48B=>PKN%U}U_HmQlij-zq^{PU01P=|72*?ia2udqz3R;@Zlqn$ucHm}uJ z`Jt|wL=JyZBvse>uO~NC{XQuvDQHiR0Wo>k*rh>Ud10kQtxZSzKC$gR5 z*h>nTt1GpfSyUAX`tFr0S88@(L*%d)mdwi$y?WkR_GkXhnF6{b3Pw95%MT#@Wq11e z`xU?vl%v)8>@Hq>e`*0`@d>FA+k|}Idi2{VWae!RFMvvn(INn8F$AD~iQsXHLzqiD zBYbAEO@cJy*p*{6Q|A{GC}VKSc?GrT$&I2CU@!cA_RBNm?c1Y85B45}+vjW6m}P3IL+CwL|HVr{OA(z_3Zz6x}nA=bmRMM}IqRY_8t$+F& z87ogy11ieD?*%Z%xP!m#_F)Xh1@p_J%%5D4zfk1bx%*vMlt*6+AdSp z%Fh&XUw2Wc?_Xa3kqOybJ>z10(P0|gkSFMIivWrucc5G|a-_e zDMJg~;}KXnQE6JAR+f`d-23)|8c@x@ z4Omhf>e7Khia#^rvHO4jy)YBH<2ce_bZ19pbR>E&6cF`$@k_2^gg#+?I*=VItpQQ1 zpsjCRzkWR#%B1zguxTcY_HkhR=uTK>5u~62QT+S6fGC4FY~G37K=09(CRy>}UcGDn z{{GU%F?^3w#iFq|W4_F7e?2Ghly7S`30l zJ=NkVDRG6%sQx_c_O*Zh8H`^#mgaD}6wTLj&y!nWX15xf2nxS~;rS{mJ`GU{1Hq)H^RAe-TD)Z6H(i4g+) zpiSIm#W&;tY)+!4lYv_FVwZP(czP>FJClwC8Ms^TVO(u**xjQBxgYP2m^b@y z-sqqyK)Jxl=CF`-_7kR!Sy~`}dqGa$rh@o z0wNFlYzxh(^zqeXlbXNe$YuIjPL3ogO?c$yLTFaay}jmQ`-vD{fkGj)AIQiT zey0(DaD-x*JL?XR5jpX(kS|S5WDY56HHnWMOHTii9Pbl?xOsCFAjwfWnSp4v#3M-S z`^W=1gAF|-2oY``jqW@(q=H5i%YDmc1H@tV>eU1)28h}zd}vs~A~7?W2>K>X0*i(0 zL4ZAV4;8rh+#AImU*xMHFkHuksC&D{#O5XwhU4({WBASLIcO2;NfDJXT10Lk7;cLM zg6cS-qX5j?2?ZHP$Cym<5-On$&U6`I)%K~zn+ID8d<2AcVz}rkzf1TV>hrPL!*u}X ze*X#?Z;-~01y1@0X)NOs`ImKJO5A*rr@>7$z}jkR)JVwTtW=@wQc7Nu-=vxpbbzA* z2uXw;LKh?7N_Y-cdih3PoUH8WiGic`uBxi4If|kdk;8OLR;;*xYJ-KfHCeotwl+f> zb=+wC3NraM{imG8@BA>#sbJOA3r*Lw?c29Eq`SAEacRe=g|4d!E_x_xm%vKk`<2Bj zamef*W>2ye*eC?edKGD+maWpVe=fq0qB~?92U59c$BrIF^Wqf-78Y}hljMd=aYISx z!np-J_Z>QHY_&4&mFeYSaxCaKtZcrGVt)}wVh~N-?L9YDa5~;SpAk2MDWngKc#MwL zzzAsK5B&b5av0E5br+|ucGJdTNDV?udFYR}1Ihe_Rs(0F(v;G@TjzT z)>tTOJ7_5mRHaB4qsKXTR1dlS=x*+|HmjegDEA+90R+PpR;QOoDDOmWms@u$mxP1C zaaq~>v9Ynh)Z1wUQ-G|K;GHfZr4aaorhx8Th_}LCJo71r-ymv|0LCrZX|?fs*K3na zjc3;~cRg84`~I`evU4775X|ZbRd?9;w1THa12+4BmrYuQAFS6NCfZ<(769!fBn_m)n$T= zb}abDjo-<4psW~u%t!?9=p^u{Gosx@!`pa3gw8Pu_gWlTkoG zz`CpY1l_9d%i#bqhuI`Xxh?+p4n@CA&>7VDxAy7C8f*shDGpA&y=LpIOzHRF4_Er_ zDOF0+reVfz|8)+6bzdTjd1?s-%tcUcq%^9UPmGU0B(oX*VrJmWbjd1;<7gKKlMsn+ z%*wi1Hrc#xJ38KkISAg66 zVLhyMKQq}PHZ4}a?8pR={T=N+KAnMqfoz5gD2^qZC!@}0I`vvEq?XiX&Pd>M65B$X zD9IC_T3YlonHU+#U9jf}H2l98>~Y$m5ThxiwriaiPRTMX;^N|hbmsLszn3bs?49vI z9Oh%Uxw!#_YnUgpAkaMp9t16*5_X3Df_Lu{5KSX`j&$+D+){KzZo8FB?1le+FAXS5`CoTk^>}_ zmM>rax`{Qy()=@7f6}JSzp%n}axc0ImdzxiX<>b0+0H%-;a|{%+O<6KFBR$hP%m8=6dkXTW4fc11tv!-^p>Z?0J1opukXXqYn1?taLSdcaFb)9VZakIdDF=n>UF@4wy%FyXyf{bo-G!iLsj{ z#sJEX_vNt=9`o;)ywV&6h_N*iJtCG@}95Lqr{5VD&0^O`YiLB`0aHP{vx>7yC|+jgG$e{_anB zZk;gs9}5A1mW+wE`|XQp~xDzcm$ zARQb=rhxBvpFAdx3y0j4qtQq&a7%A*EK2CR+}y+9OD7Mbo#3A4c(sAoB* zViQ>28^HVxLec3&`?=E9C2N)NhCYbasyK#z8=1jp^Bvzn@G@g`a*zOpR@U6n5rKsx z;yl52!@Z3`>>id76Umzg$jy-MP|u_D7UE?p9=v^}rKRKe10Z}D_yp`*g$S+mWYXgX z*t1(osH-7e1p<^jw8yAkt(7ZR64Ht}FC%&u6H`E4*hB6WEaFi(M|0fS*%^WU5H=(( zvz@?8NN)0H&z^O6R}6uRAclGH1Wak^=-|YaNOdFKFvE?syQj$l%ziPPKpEp>WdwpX z2)n#ml0IYz_;&;BXzwUbXGW1|Z3Hck(KiWho7q@3gu>*e?yEq(i=8 z6zgs{x&h3(px;go$ASztIn-x4)osALR6=9#u5jCW_GGcS0% zTwH<*8HUodL0em!KS&X}{2LvYDeuqxpzC|d#!Hj|X}pc?q(62sCMM<@YZEzPT@ZFj z6g@nTca0cArwHgte%2^AGDA);HJB>&nj7Q>PVqVnF9J9}B`7F(W&^RW^Z<%0_xd2C zyhu+s+@KSov|}49D{H}|mOK)_=U#IasutTZ>-w3uTT#w{06l04sjtL_IhqT zcsnG76@E<<0P_(<{JuLQBO@us+TLs8UBSq5Ec>Hgp>Tw<6(J9=iR=2n-@nQ zjg+EPOJh>?&`fn3bIGS+=Ylr|9eJkkbRSa+TD8a`#6yQ$rgy}HIpDM2W*`~J;CHl2 zKI080`-sr{Srr)T-T68Z+nx<*=G_PLC+O5|FE6jXhQ*sZFJ{k7r;|-6ZAn~)Zu{M+ z9JkSbwNYGL+(t|Vfu@I+e4CjVe)EqeiBrf%Wv~4OFZ>ge=YH$f@kG-@hyL!v3sY)_ zn9V@MLwl2=rn8p6+Jh`|N1CM*XKE5BlJ+XD5SI`S`F^MfbLlB_?Nm4(l!#L}%SFVJ z9m2xua56~(g}|tH#}A;Mh^DXvyb`%KV4^xopIpzH8rJAcBiTU2rt3YVYSL~?EEE*T zvVX2FYmlGRb%1rT0$o_;|%zEA{Gwe^ukv(ni3X_2OPw)-{WRxW^~eB z_6++AonqO9sOp0W015YMP1|D?o^0UPzg!bTd zY6DSIF*)Rf?hfc(_^M5{48zgZwL(HdgrpPk9k$%euJm*hv}Gelmtz#f&cPvH3_;87 z_H88v1v>CE>D8Xax5RJsK0&pb$J)FcYCnB{`TN=@oeHMzkeC>VG^sdJ&Yo6TMu|-^6O9$dYz7AAfXLN21aF?Ve?*ibsh;h;T(05A&*T0*RVm8Q~>~Z4gIrcPad|UX$7r3tlkcFM$fN*0Co*0hNuWNG`YWq}55? zSi&eRuEdV#7iK2dV}&n-+RS9>LVHVZ+8(R^w3kUuD%e z?53d?5D;(&BBH?tzwsn;swq_O$@$L%fZJQl*H%ms=2g>Gh2mM)W2%FYJ4j!}z;N%J zrnB{zj7FSoGD&_0UR8>a*#_xQ#soX56k@7zl%JnpAKn1NNMufI!+@SqY^k_6B0@rp z$h%3McGbqfbd`PRu`;XAu;Y1E4XZ*!#Ms`>PG7iZD%Qz9K)Ls(hlhauNRO%4K@$^` zP(2s;7yPkfco5&ud)=|aDmWBFZ}*{duf`-1DL|TKo2Y30d0X3*Hxm;Rn@ELz6DVp& z-j;HWud<|syFiZ%lOr>bmH$;Wm&IlBl%R-+h8{xFc;Ui@XGXJaPXFGXtH@#O&YoNm^N>|4Y|<3*m1{{tp5lp>b1Vm#?#&)O7`8pbMt0= zJF|M^$=yvbLfYTpYa_(VH%8AC4o#6xKKc~pqVVX8+Z=wAy09rFT1h@+qwf@ zeaRkY?52%-bX_5Hsr(5vy=8}Og8US4EsD<9Cgs|xB2jYS*5rqSp5)Btg7pKY}G#l&4w@b%T&BbBB zOV6-db(GWaTEA=7c2J{^fpZHCqnBdI(xr)9h81_Gv2G_24he-9u=470x|gi&CP({x zZLICkpgV*bq2Z$gnHExo{+J-ymPcWTQU z090NaR7-vFA|y2yDS%jS)(C0- zKR8&pCmFktcxVZHS!gRI#}htJ0a`tjaLEDzV{)QPBx5Dq#uiMRDcH5}aDjUw6hz{250e3j`lZN85c_pgwT9k zc0o&16H05|E*2NCvC29;={ODs-anUtP6C-iTq~OW0#YBvu@S<^5DvYm`gp+H#l7`Y z8_>P6cGIRv0DMWHfr1hS%VzjO-X0!u*U+BbcCROEg|F6@ys9b{6wM?qyf<;n2<>Cr zw4L@zOHrDy_IY-(;mwhpI$0TLsx{NXI}d_*ZfiY1d$;K78(_F|1LGlvFll|)g@q2| zSVWCx&e`g?Q~c|19Fs%)6v&IN)-=3VZ#w<-h@L^4>eYKwBfgovewf1gn5vHHrr(hQ zl@aAyO@^&dzX=%~M#Nzejfr0c)8<+qs6>1!hcV)|904QE@&xx#e)YM{32z2cB&M|cooC*;W9TjM~?v;OX7kLJ^(tPwAE1qgM-i8Wxdzn zCDC$arz%g!eDqf&MRKQIB5arT*S1L+HqvR%V5gbCXqH(k$8eD8Kb-*Eh&T2B+Y4~$$dL}v zZ<)j_6gaiO*Y_A-?Ric*ggQIRpXphCQIze%Yg~%*4G)j?HbG7ytsr=B&bds|PX3U` z&>dANATm$3TU(_V?_@ZMx7Jjk=O;=_Xz{WQ!RVn7m19}C=m}mXVviGRIMnY*BQMvV zMP{Ugn$P`wTu=idL+NI*&|kC&Y-^kkSUlV1xKRkUmrXsmXZPL^X$j2S)k zF3Uvgh|UAno#+5bR1-h{$rb(fA6`EdziWwT9tW1=>3-jbHn~n9#*%N-K*(Rwa8lWV zj@;S`89@Lv13SEtWg`w<+^-|BmP3?q2OKihxYdBo=-s}#WY02mROF?Z8d$DMJj2%} zEhgoJB>?enU)HpH-7V1P=)BupI$8b`ti6^qqUKOqNCzx=nbRpp$aY;1&9iWxhoH)? z;-+b#3kHj>qtqm;tyr?;orMV+0!Yxz3`orAsc;cNqR1lKd8(_HIPH~932=wV%hbEZ z-D&TpC)n8Y`1g?$OeRcHFJ?y2)*B52R1p)M>@ZP|#|lN^Z3k;X>%s>VCUeWJ34?gQ zUry@_CmUzE5jP2>WSrKB#37IxwDqFBMNHeG1YrFcL3T0mZk%&zT+Zg-4r7V@LNC+`P2~kNYYce8Q6j_onj4ewWDMX@@P(qUIsZ>%S z*`rjlRF;VTuk+zM%#6N`HHX?;iI-Jx^li^cAJC?YC(diY|0_`W*42iyGoFK0wO= zwo;6x1(_rzla57xZ?e({ixx>jP2T9Nw`ke2WoM6xpIYg#5#5GDfF1Jc8@;_kK2Kx= zFeDb|Cd6N+BdL$vuJQG3fi3OCgv^js!vv9h6wFKL2L@ULAQKEzp?FIgY8=BC`$q`Oe4 z8Mm)&Z9r6fL}eW)gA4f-9?E zYG56`+r@6_R`uL;)AHq8+rrixTF|UjtHT7t5>(zfY>elnrF#x<^9%UAZIE@MhYb&} z=dJOSQeVG(KAK9qG@xbB!wkC4Q7Y-64}o+30Zv=aW_anb<#XxX6PI7XYxa2b?86DK z?4%7iK^|-0%IVd;*m0y5XX!ZOZFE4Mnp;|_%W}Og36Fddgn+Pgq0{r3Zr>P(ax?mq zOrfL2&jHa~9MisKOaD7X&FhE9@5{|ueRq?%>(q?=DL(UAxUZ8u3g}wg;S^Q`c5MyrnWTOlsozS%1X^S zlb43yuy4nX419jS<)3+7hkr=Jc_K<+Qs4xk3EaPb-xh9d)%|Kc?d1~zk6w@gMGvxc zVeJ-Y9o|N}DiRG7EDQsvwh<39y1V}4_j1Xs^VL^gkp;n}z46LH0!{PZ0AQjYPJq@& zIqo;>CCQ}fgMotK4}aWZiu8@jw-pBd9xWAh6R3dO-Skawllt=pkCB>SwKz@5XT99 znYTXQV@CLp0=p&cyJ$#@nY=^DouU(f4}SXdyY!;Bf8Vd#>zw6q^33B~mhxJaU-~+s z=j$)FOYzf~7qfD86GJEG)l0Pv%ig>>z9R4CxxM<+)oCQ}p5WtJoSppc40XZ;mzu`K z8v6=I5lQ=_Z|h&;GVNCzBGQs18IR#IAZp2nQPD7sawAhWFS-KwXlS}2zo;mn#OVuX z!Kz-9XB-_G59=r3)fqvpa?Z5etoFdE6J9{pUK$UE);r36)hXHZ^wAi=j0)=2F*y18 za;6^QFM4wG5;6!UVv$EW)aTB&eS2vTHtI0g49FrTs{HNSO!A<2*kHF9>(s97A*EGh zhjBD*Ptp?*NPqs*S9{vG`~COri(j9(pJKT5=DFe?XYJD31~#+N)*@9NAEetaYgg6X zfz`+m`Tjn0kAz$ya@j7ML46cEC+Ro2@I!fT;bFz{nZ2f;o7!@-0WtG7FGb|!#xu&Z zZHCJ^i@@1-$UH-lK^^LfoL`}5E@fqD&zN4SH0`@2ol6Ef%tg)X(Cn7;hs;=xt90~XcO+Zul+RZXYbBeKDqo)4$`@j*LRbX z4QT~uQ%7k~W5UusIH+C@ZRuB+UWyP7b=qz616gcGr=GY}@XU<7ZN-O0f>*JZG8t>2 zRh1a9(`EC94IA{@Nn=I-qNAhZO>pJNR;Q%x4!V>Bqbl$WW&rAtk}0RXDgD}nx12^U zE1aNfTRR*fmP|lNxz_E~{rTEsr~nV0uPPfSi%Pm@;?izwHLCLI<~}s$-ETi9dzz0} zSfVoOk$3%*{2#8aUKO|~s3=($hTBrjaF=WVzZY_L!c|pGPE8#rrLySjNWsG~&Cq#^ zzHDNam-Y~(r{UBP+_vHO^cr?8D7P?_g}j_qI%m$D@`|U^ zUnBfyN7)Qvn`T;FD9XmIbuVw)XSMW82uUi3ywsuxJgy~ypMCx--^ zj_MYDZ=f`)fDIyR2eEO5P{w1*rbR;QtHAKXOvB=v``1c!2?a~6^MoKo$eQ2G1!;`% z#DGJMmu|`4WAMkwk+!WzZ}o7#cTqZxo^g`=n{FRS<`VcBoV|jR-0Uud%x{(9{h!)7aPq#YJpo7n1E156eP-El>8tz6F(;f;`LY{? zQs9+?_Fy=Co>ez_2OWm3oJx9h-0e*sMLj8YIMuptYO_7Jd>}tdSltKYy6dX)7z{tAy+$97dxyM36ip@0 zaJ8HN{`<@i9|z4s?Rsc@yH!w-=!|X=sUw})sjmk%`~%P9P0%9(?DFVz=4uwsy`yaZ z8my&(_}ALCM^i~*?!cj(Z!?_+A~k}9%o5xi2XwuvGK+l8GJR5Muay$htnVTJ zqjHMvxqAq*1L`CHSEp#nowWS=GA5(!*@2n0-*%W_7!(5ZIfMSi?dM=@5oCvzpLS6< z-J{D~_2mlz30eF`@CPZDyn|1txbOD0>3E_W~eml&b>@A{E+5dhy~wv^a-x+pcx24wyQeiMjL>< zIXgLJ;`emJ=)|bx)K#2F=AgJiX$h@=Y5jHuUkLYwpm2;8V`)lc{oTOa8B9C>B)zY7f3F za-#T?4(YqEtE=)m%`Y?Z%CA=t{1f-#`XNrYdo5bEXffC2z%nKhWRs^Yne*Vm0|)=b zh|->aB!4=tvF)!nfOVA z5puA4$RHi^$MJRRKihWh{=kW=X4@A}*i#gDXNQ`GG$g>wp-}kFjf5k{yp}%#T`{nn z(CkC?-T3k|Jxab*S7*%HG5(hpA8Lo*cf5LH)Ww9RE~9&?P0LQXm7po{O0SDFfZ!PyRZ3I_p>4_N*GY;e>TJA}Qg*{o&F_dw z^z?+~k~zD?pE)y?5UOrb{d~{T`1tq*n1!d8KKj~D^O*Fq`J8S($(4h~xi0OsudDUq zSw|8RS8ptZv+!a@^{1WXF=>}BjeG$0XKvJ~Dv~Pil#iR<2s*3KTl{bWC*gNdyf? zIxa<~Bo*@dv;O9Tj-C8eK>yWpRm@iU!VnnT7WJ`=9#vFSB!q#3-UfTato+;;J!Drj z&0{Z}vrnX2cL}KdX!S{8Kj&DW71NCl!-prP-XoVdd}ZA|vT0G?q~=tMduVZXPeXO5 zj*L%G1lxUDwZm@o==F$-3eIKT#5Lh*)s0$yudNE)L(|@JrQ_~vmh&mkcQ5WZ#4XY~ zf8wsb`>(tgVub5`VIbs8@CN_2nmvSC;C_+^JtJ>bXeK0=5%-hY6j==$)8qA{BPv3? zrSUTOG|a$Ah(gvUzTib^6nSHLuhrcR+v3Sm+vm2!PeLX|3$v+H!#K))bFV#d{FBo^ z-2vv0$XqNIJ|d@RTDucKgGNj>u1BY>HNKv?weM?1cUDs2`_VU7n$=T@n3g2Um8gk>t4Xte_@=8M=K7MHwIc34dz(&ZT!>k8RkIr34KXSV&3B)8w zA3ZC8#Mg zgvco+u7{$g{Y;Tkzy9EX4ZU2A@&;+7pprbKm(JVkJ{lw~VfG4MuYrNVpwlEruJkx2 z=aHJ&Nc>}&MRi$~6UU6PaVv}BX;3$9m0G>y!6Grvf?xh>(=vs^I%n|_XS%@C$ymI< zE;dg`K<6L#so?eN>D$gefqT^Z#K>y^jcQI#kw?r6;wm}B_Ef(fC&1G&7V(S|k5IZw zqc_sAfL>7%4Iqn^=Yxaw@%MLHB%N-AmzuGqXcI ztdQ>O=S}C8#L=8B5Sw2mR(lk3#>cxpxl|ja`1+AY)K4LCEsQv95zQuTT7FyOsP{)) z4E^qUJl)vccgK!N_IXdA&K?TG2DRa+IOk85mGddl(@VE(X(&9Lb?erJo*E8nC&qE0 zEW3@ntpbfk%e-`JKQuGAUfI0eMPE^LzTO9c97+I=dVg3pIvx?za}K}z9{FAaPl70E zoT`?iAfcwc0k#q)r0u3u`az$?z_+KGT4?rrggz0=-@h!nzZYBGfFmh(wzfjMO1W{v zH`!JiDIYzK{q>)h`~sIEQ;Kj=;V`7qRNC0W%4Z_jDjG#?>2(5_4Vd?t)GpzQ!Ceb2 zC|rfD_Qu7<(Jt6^p7jMO|ZdBT+iI>51w+Gt&g`$8gXEQuGFXsHn(hp9rz7@VqPr(ukX61HOCq96fhVPYCRN5-NnN zK;j=-e;$(w%t66oY2k~KmRnKp(L;1I?|AYNLiE5QL^Yh1E6jkDbA6pb=o!usulnVz zR*}up2cbClOq2wS63d(b1+*&Qp(zFII|9DpC)ahpWj9PE0Ax1=R&nvz`Zf?`b@HFj zYm!Nx!H#$lc1ue|2#+38Siiz90FjJjc9&a=R;|W)9|#E<04Oo}vy5&OysyyB-aY~( z{3spwV&E2aO-(am_Di>TVgJFHKR&9-lblgw#z+?Ex+v+|HMtdN%OdFZ+qqM0>bigFoTr;eV!Iuasl~yOduhe*mG}?~1C+)_j}9lVXcxZ}duo}XT_E3H7| z)Q)F;1B|zF?C6S|eyDi-m%MrNrbo;p>>4iZ*thTX83DEK7O?7;=(hhYNhEi6GZL%e zot?b=#8H>LJ2l6nvg~>2`Ssjx5@%P`@|8T8_Aut0Wy-0=!e-5yxqMk|{J8P5YK2J? z@?^)q|NdL|Q6PnI2TI(_uJ4K<|80NvYl8+y4*K>@A)d{6dVenOzQRsw-aR zX1U*p4LS^F965517>7M?n@@}Bw|_1_Ii9=lxqYJJlS>P}4vqhG|Nf%WL(nWoecDZQ z&%b-lJ?8R*2MO7Quj5}2o3{_)_*OpuQ6?+KdEdSdFETHmAdEPF*^1Rm2SU3GHHvlY z4E9YD&7=;sYuCP0HE>NoE}iLL*0FD^?CFWArGEiY-|5_GTIQg_*c%HFlw0rbhb3@I zjt4kIdcP>0;l9uQynp7cYju>Vsc8q==E3EOA!X@TX-22M9H^m@>V(OmO>E+$f;fD} z@3+Wn&;HKtSCFyq^_#bE2#Nk4x2GS3jDZMyI>&!{O79o-MQazNpBHTE-*6 z1bNgEuFEGt=UC7WQHLpoF+{adKs8ZsimgFICLvzOW-^R^2`r^KvOB2+#HnNqZqr}~ z=5M{#y*Oh85g5w2OkMugu@OR)5*!8QLZY4E_d6QL8AHFy7j8WDlaq8Ejp1Qdt?kNjEp*eqkzUa`n3z z)uG5I4?oB-RNnND38+KpZ#P_wA`us04p)kR$Sa@ ze+;%niI9?{KLmU&)FlwQ!5&p-L~O{C6s{KSi!6a+@T>D%(0dAxLEgmfVXMwc-6FV( zG$E!>-&?a*En)IDYOiY|I%bZr3R>=Fx}WLo9_4H>B}>a8wIWq%LhD<4_d*i}Q#;1$ zN=0{H_qX;voFeHjBg=7ea=OP}!UnHh;Zg(O2_N1YKz}F`bquLPvOrU(ha~oqX{!>; zCBtG%@w>jr!6^cCfIHsWm;T_B4}buLD46Lq?$4XSYd9c=N_$EwOOXcBT`v;9rZ;a) zI4T4Oen2yb0oNq zW6ME}IiARuoDKAXt>SP0N633kOr=!mem#7ws_@)>fIO0(qxV;tMr!TFW$PP!`8ss1 zT)$@uni^MgC#MsX(O;oSYEX8wL`)}5+QF5boR`s&VFe3{b7Kxe%TqO!0a_TPG@Q`L ztnr<}!#{*=>Z^PR@{g$BfB(I6QFVQ1kN7!S3WY`4=RS8lY9`wP%H!BhGqkM3CJ8Wr&dl%|8pAr%5PkN z*z}+En**jWJ}GG1hnKm3;3e@mf}F2(W!2{oO%#ryqBe&! z<*t1pQDQvAPl~q!joYg)boITK94&V_9r3I5C6r%}>nB!Ee)eI8ze3@tW?l7L@Zc9T zy2|?%GQ79!Zp^%G%70ZT{MI6~3_cL$`m;1C>;#EcFDgyLTNIY_7^SN}{y4Hm!^z;y zXFhiDoVDm|QMbZ5>4=*wOwG&=ifaHKTAsA$XEG>4;dyV3e5^{;y#IbI>cNAwcnohP zR)1Lx(2r=jgX=k18HBBsFj7;EZdmp45BSL@vr^0L=EDV7P zReP$d8zI?~|EUP;AQC3^r^^fe^U_NpDJ&g!DzCH-(>KhP>I9}$E~8T8zdWlyFAXDR zLkHAh22JqbeC5U|7D1iwop=9{*bcY{*1TsM5!IeC75c6Pj=B}3bYs1ZyZJ;;Il?sO zY}X)kV<{nQ8d!cORKtN?+V-3MH-AO+ZzU44vNI3=v)@Kzw0H!*G_ufy6{jf(8ctxr zZ54$kd_N;^+&iblE`Idz@hC;{S!5?l!7Y@L(QxNl%Wmk;k&BU<|l4$$i z{y|WD^yJC@VDN33b|acW>0eq}S_;>TZ6(IfZTK6L@nC+VpVftQ$rKLxOQ0{GJZ5>Hs-%g&!9)@_BCO&xd5th?o&u03uO)#=j z`s|gJm%pR!r;H#rWP9O@-%0=D+QS0Y1)~g&qvA7QD(#Ll_becD1y$L_Q7~aD-h;*lL$O6Lt)ZmL>7}?zd!Zjzc&dv^{>>P_ zLH}70Z6!sm!+~%3DP#555W0gGS!s%{Uog|oBp*;SgvY|bfs)>9o7K=SVz$A4*0iCw zl%#}Pd73j1!fl=m5#LW06342u81*({4TCbtCaRh~H$7uK3Hv0=i^^yu|JdU8=J5#0 z+hFpEDiK74M%fm8za3AI0m(vUweiNXs1d{9PPAi%MEwbtkH>kSnTd%UuRx{qK`&*& zow+N$V+_L#Cqr4|uo1*a9)u#hgFN8Lp&1#4(LV;7gvlJqV1}m|C>M{kFd$}JThAwD za~jB;?k{c=^J&-Y;o0R(r$l}SENs5&SAb==;m0dzPeAIavFg@5w@S~ z@`zpgVt)E{u51M9?qOyy&RYssQe!-e9x=VbGlEv%phm6x>Z4irJ}8$0U$q~zM0_W5 zj$(zY!;Knx^NzwOgRbZ0<$e1pyj|d3WVm;Y0Mis76}fyxSO@`7|ADzdd3oGV$DUPB z4UKWIE6gcKRp&E_TWDLAe|1pUp8!*C)M=PCV1I&YI4ypZ>46Zu8cwLAc)pY}_gzs| zlKYPBIrcds{S;lv+8T-oV>`P;bsBe!q%<|0%uc7AJxC(mhT#>8;8)_C;ySTKdPd<{ zo2B2p6nX2O_SZ4qkRBO@nyR66P5}3SHUGp+Uf=wBMur7?rx?!|78b>NDBITy*UUMf zRTk;y&m43DK~^JxnAZAm1tZYi0_!hTM6-Cw-v{GsTHxQF!rxHdOsUQa#at z^Q}in=LuU=9378yt0J;%)8W|PbQ^{Bn39s>t%_z=a%)&w19aPvMA^=pw}2+ARl@RE z)o^&b(f}f_lqYm&5jn6#cvi8RAq;)Ijb3z>V8??nv6N)sNKOPK!f)Q2boqrPU#B}~ z^M%Forb-L{s;spB!3NV`$3Vq7$ZHJ4GiZQ`oMLB-rKdOmk!9KEMu(zev)R3rYJl_8 z7fGtb=grZZ|sI$ADx@_OSXYV(0>C;PYgK$#-x@;?&c5rH~R6 zF$NDViv$rBQV7MXnjzJjQng6uQ{H`44%wb7s8I<{H9J&6Jghmtr?5*K@lF(;8scyj zyRE^0GQUlEiK`QZ4579v(6}38G%m$$DqO7LUZ-x|Ar|HQx~~J?B`}{2D7T31?X8MS zV**N*vtW-JS>tr?P(YAw{+kl>ca}qtfPAt`I3`rKO=< z1xSBRNjB| zRL$Km;aCExt6s_rgP6!_9`u1ho0DOiOhG3Wq0(Cs+*De)Vzb5X$-m#KO`CA&-||-a z_idvvJ4#|<&ttzw>gO_pEhoGX3YWIKShF3aEhhVnJg)FO-XJ1@ts260nye+Bd+tsP zX2G70DE_zQpFN#&nAi|AE5ERypqL?$;utk!f6WO+cl_?*{Subcam9@|p==7lN{|8K zOUhQNm0w69gz^p>Vfq2j@`XPPWKD)vaODP>C|#hK9{*vo-}b3h8EZaur~ z?E%$r-hh}qiT+l(@tZ2FFQa$bx3X-baCO)lhTbSGLV(-CNRW_O@3{P=udbRx-8rSqQy1g(4os424t|zg_)?~0EEM6?ggdz7j zvHCo+fhYH0<4A~PL}?m{8$v^4yj46?uCTx(4%_Pt-#6}uQQnzu;zdgQJhJkt5$6hp z*?94f6R<$No%r?ve%=CDFnW26T#<^Z8;+Dv3DtFV&!li-*zEN8KZH;`Wzp#cge;Kj z9&fAYatPibPr8oUFP_EoSQNVlxg0{4WIa0(e3c1(QGAF!`M<=|5%p>Wzn9PedJNNA z_*s4*Bd^$M6){7Dd@54z+!W#{)FEwaTmj6a>8e&r4IU5 zim(8Z>1TS|nDrPY?&8uZq8qw1_8pA^dJX+)VX&y%YzHk+j2530^J8`XQ>Qw&uzo^PVBp6XXd7`tz71i{*&fCRj(Na5S3%7uyjEjc|YvvOXHRhWbfYX2L(nPiq2y+Uz7IP$EHro!Y7=+s0E zqv`D&m!z1=ETk)Y^>O3#AD=dOKuaUt0o6SM#p?(&Kpw%5_lr;+Z6i&J>(H=pA!Yyj zuq5Su<>z;W&4h`23V(&9TpWJ+tC*v(T!8e(ZAoJfS%=VmBnQo6W~e%wL6&BRq7Of{ zu@d=lQ<74Tk5B3nm_({zVYB8Ip9uu+r{w`9NxTvlkn8x}nqe*=BRQ z^Mt3mA5F0=w)0S2YMa2+`@&cqbGA4O;)3hP{Hk+Yf_MUC zo=Ex06WP^#`SP(baCVrs{7o!gO-Neu$h?C1@PYb|cAM1$w9E&6`?`0)j$kh1YY zMr-3TToG|w>=5Y^PxqF>g7pwiQY8fdxkh`~#_pH*NrwdTftiQL`SMB%2OA-{uBlgJ z&PwSWVvd4)qIq#SAmvR$lx?gPQqwL(y;fq@b78TR`NGdX{#MR#j_$FP!t3WM6s>>q zvoHQjx=_GV+yi_O0cHunlMDJ$2v`s@_80`>#jF1Al^@Vk4goW&=T4w*gbF!G^cD>6 z=vhMHE7cY0wVx`)k_F(b&1Z0=WNXpP07Z=tDo^4m;l}XC+>S9MflzR>4eG5a4uv=Z zF_1QUbY7{Ev9V^X4G5xiMy(eJ4W^!6QD(L(7wSCDrKLa@jWj9~{s8C$~ zfo1AD`0m|1efGLEGH>6hYN7pUe+$nEbUt_C6q+D-+q&nRkoFhkhABN}NN1MpjZhLk zGO0%2DXcRpsZ!0FGf=7RUG}t|vfg`aMHV|BwL`kOjNq(U}k$hOK7E+Uo1*2&KyF-l$^{pZF{0P1Dj>#O!NOMfi#sOGp$0&pfp`g42i_EX`tTt&WOds~>b=&)pOcV_{}%I9+tWO%=rd z*s?90RXi@$nB13|_g+$vhuZwU3><>0^<_jUtxVD0VO(XFaz?Smh2w!=aGR-y6s^G! zjC|d*_T4JjMiqRPBV=4;dL@@JjRem}Rnl9x_=J?0S_5pTi2=!MPv?myj&sTHUT^5e zIBrm(nwyz5A9zu!;87ImLDQ*j)H@YoACS$}U~zw09j73Zu0OpkS2qF-qKZGJom$xQ zgL+S%qJ@|~>jci#2^2?D0`ipk>A04#al^6;1+)ZF84m&ScMVW77RCsVua)yKdOLgN zJdJh{8m8}~S(l1}qq2%6CH6BQ@`FB5&b`1ZMz+7IA?spEvcXM#2Ood`Zh>jt;vYnj zyhR*8{w7)njR3f@FFO}ajrh*h^5&khJ3{XoOzt!;2j*4?mk*?3?)=RqsA z^U-v&n;HaxnTc%;CsyZLf&?iP9zH=qFIVcpO4Vwh+pK9*X}F};*HsJWC=#-V%MeY? zNawXRT}=RySdlcfyY(D6(1Nugdq&Cwj=~{%rI^Ej&I#$eIr36Sb*OJQDht^V`mHV`g2KK5*SBuJvE6e!WfNGdWLO z)fkgCY4qsP;Th93TS6NwhV~$gb1iLc`hHX9#;rNq8<^EC-bN>IJQ|B^aA)wfe&0DR2L zEC2<8bYW4@k7ivD$n(rfi{xaD>+$v-#t?S^4Ufu!2{~mT!xQ;>+?Y6W5Z2OltP}zQ z0%!=HiA)n<4wn#`&Sjgv7?V*}f4Y+>*e}rzKL!W$HtjCFuA1Wcd$VRC!d}SE*iEG@ zT|`bO8BxtVgaMs8HC1eeY=3hf#~8f zO0=?@A!=&MOAOs<@ttJQx_WZM;DxjmZjs|kw~nbKd)RQ+CJ6V# z;Nli7TgnX+%QK-mV<>cDa^3(KL1KT*!!X*@%I_b#V3cnIpd6($^XB=04`q4b%3UBA zsTCR;I>_QK2Z+V@=zaGg#~ybk-Eq!eN*NF?VEuwi+23xfSg0WB(^kAIcFjdaSrr5N zw=$>Z1Ve}`PQzY-zdv91i%A#8PH?&W5zlGNoaXYFPv@a3d9hHcHTa97@fH8A@AVwT zrv?hq3l{tu$5rsgj{rR6F4TdEqpqzTlkG<1M?P_?rep`)6(=hQWF(w=s5B! zKW8~T@#)VralP`};+wI`Jwu=3*@=QPtrumqfs zUW3&W%BbuYP^ESR22NWi>7{qUi(~#QJQY`LuGEbh z2CG+{|8+JbrNM-a@KQGE0lwSv=oE1vBV}IdL}F*MbMCx(oIn$ZgK+(kqT7a-VhoM| zQSNNno0*7#%>E1lX|zPx-8)8R&x$81WwUwMVuj+X=f5rm`VVL-hmqb5vA7GhHe1pF zy=z%nrj*|%*yqGJid#r;Bk>W9l%?fQ&fCQ>7*|6JV>-SC6*50 zfL#OA4$(+eA3nS~S_kfF{l<-V0_qc)=uQ`wv=wpX+?Mmu4}C$V&}86A&Xu30^u(He zE1@23(s6+^rdM#7*@Tu(3w4TM8gPISO%A)o{1g+)bjlqIoQ+ts=XT zkm<7b|54ZQ7T7}t6vJYMoZp0uqH%vGTYK@&K#}q-hZL$YKtqww0Xte!5IJ=)KZ=Zu zOucs|E)MhS8*xwl2oe&_!+No#MK?zZg$C1a%Dtz6-T;@3tg@m+narG+P_NgHx=M}+ zr`i>d)vH&Fybl0*MyOY9MOgWdfS&Cj5+fmpayfFv#v&iUY(D#0g1IBGqI8{Ry&TUe%caU_gO=nQu zr=+L*$NZhg1jTP$09C>rL2;6we>44eJA8aXI61k$RZK|}AEI#%yqqC2!B9fTu$5;( zDhi*<+wB&tFz4uR=F+*9yMB1l3SvG&r9tiO{zo0+*xv(4h%e>IVYknQ#dK zNzn0nu%+x`Ecqj;H0B|jo7Pf@;!Jc{m9O7%ga|kETPw{&ZUe@nrs3J={?nMfe0ISr ztkfUH4SqxpgweQl&o(v87PqiblLQZVsE#U3nw&J2I%d!65<9G7ji@uvMSATxvg+8k zelKB5SWEy~NnX58^EE%V(>wqF*ILTPw>8`b=_s+Vq(&zutZT8IMQ1$|)-UDmQQz|% zpyq%P7cS_}wY7yzVs4bEu-}U8?@taB3foWcAaNyJYT1qzOL5Fyh=7txDyD`Z;;2b1 zG!8S4G+&G)0EJWUeZ)NyDCOG$C2(R5!%!GF3|lh5j_D>PW20o2XtN&+%&Zd)2GMr1 zx;PfYc+ph!J3l>twOFzhbdP`%WEG?X%H#JLlmT%hli>x_7i~N6NP{Q*38stplXdti z$~_Z-46=vS$kzLjLr8T5{mcJeZ&lSRanZM-jtPNUxn|TUkwtS$#KSM}4tbT4!yU{u zxUghy8CON|A=vEW-@AMFF=U&gli0h9esmMJxcn%ZoLV}Fi}Q^+V%jNzV4+UxLLE_+ z5XhDg!{y6#s>?02t^=WYV{Sn@0HG&@r~@gST}vyUg*Es!jf zwN436?(|}u;(dL)Z8c#n{??~o zOooszCELXa3J72V(0nG+3vf=QYga~x;B7TkrZg&}?YWfludgsrBMnn0{(x~F7SL0& z=!gzKG8FT!By&JG=`D2f^Yi_Dd`y+uG3ZC{NheW1=6}m3Df%Rn6~4UI;=N0Ht(p*^ z6?*GaEt7#kFwH6FBeQEmY-7%mG-BsHv7-hEFP%ESox=0T4~ClK7aB>z#wx5ING+3z zzyQnuSQ60d|Ex0PNhq>si#vo=KrC{j(t$cUQwXHWCDw~$Pddc>qLcIzImIKh-N-h? z*g%pW(lPNCQND=>aLmTh{_3PViZ%!YHNp#95 zLNS~xAT;gR??5{J6 zX7A6am#lc~EU6|&t3(uuj?@`%qjg9$S+XusRlpC8#(vU(oX(*yTHmvbFmISzLvbNC z?1fizgwjW)b1fHX^;5(&ggfxxsW}lN;!AAl5gec3a!=yHib6W06-;zS`9RxOe z;Cyi4j{`oqz`jSmU_M(*(ZgoK1R)`HkAKjcgSDcfVsf6)km6MCBC&l&6&}4sNIvAe zIZP>KYRM?+ey3imqX<4gGdz|&RSq!x8!dub{ZhBC3^rmzpDjI=hkwL#+0fZW|Hc%3 z8~=cSa4JByDU~^RbgtI6dcXd9lgZ`JH1BO;A{Hyqnka9wlb?3V{2kELRTu*c7e-1i zgoSAKVk#Q1$re{@c}7h6wj;O#P91pr{`aB?KXPo5*YyXHQQ|sLR0bk3aa%NzHz~%k zq)-t{tQCrs5_qV>k&to>#VGW9fF_^`Y!31qHvMO(MuauFAc{MIQRSj%(JI_@1-z}Axvg_p*^KH(5HMC{ZT?ZZ;7F6edv(4QmdnQ zobr`Qym;~I&w;8%WFFbWt1Blm7OHg+aB#4l*efnN^P~==%X(0R@mon+LtBpgatN$U zVo3wjniq1uXW{%6;oYLDN}DIZxAMFN1Vx`3wctJ~QR)3<7xJ1!82thttb`~^xE|XM z98i&omy1qo5Ud24jJ2WP!#pfCA`MwTww&?0q!+Gj|4oQY+A*ih9;+@k3|fU)5Gk!~ zZ!0Rypi`1ZKheDZ5^A0#L?-<@t$edajt8!C+$UFaKV^$Eu_~I5PQwY*X^#if~vH}sZ4!P3r3N=kx71nqVQVN-NMlohuc4ZQC-epS$5S0l6n zrtE*(^h=QbQ|S)chxhh@VILpYNKtB#rOaD^gqDZN7x0?FZy=7`Q% zp9T4F=%9#Fu}RND4sFT9^n3n$Sa=rq25q4&!E-PpW2eb0j{YF-qzPi)3Hl^1=#iG% zvQUGISrA1`j=w3;9UvyGpbGO+SlaHYXEggtdfokM{bs-RSC1HW^wi25x9yMU zn8%yvFF0*5&g;O@YdvPKyW4N}olSnft2W)Zqw@3RjQGErYE4kh_*{BBuk=RA@@|LA zCZrEKR)PS7%C;HgA0a+C(@h_-kv*^V5DYmvaNx1bevE({b&D8O@*+2;JM5K14*1PH zVO1w3Cl{@|N31(A^~CJ^XI&d;XlMZZ1Rk&~#4l)swRL;W?Nyl{Ra64f>EA%muM4bH zMKrd?Kqx#3zmzSkGHKv}$N%l;*Fr%adzjeQ=l-#6+ZqEU7`-KLzJJX=$jRoy!Z*z)MpM0xi}Sq z_D5<-`u+pJ4>-;WHqN<`mKM$!tZ+wCs4UNeT#I*o^{3p+_pjt0w0?v1+`TDxfJ!(^ zA5#Ocn7)<_YT2@-Z*QBnygSoOJ0SZR#RlWQ|Cswb3?fbTkF33^Wv>)=2}Y+k#>Ki+ ze_cfg{=**kS+o9@Qh*Y0r;jnurJ3G6TD4&ffBKyZ5 z-mt;8PoKe_7uZ`HV`8YKSSPApVH-?kg($yN699gq7e5-t`=6yQ$NjQ`L0FLBhTnp# zq;R9*+7f+#8{etle?%*UcW!#daTb!`>1Vy@i)XUm6=YW#pg>vbdpC^rm$7*e6R+@u zE7@If9OS!C%?LGWK_}|3`1lpOPie5mCazqmhknS8hhK&8&lsb{Pg!Hwhe*$7 z_Ua~0c0~SRAX8Glf6ebQY}j7Nd`&R0P`{_=^z@49nDkZGZ{2E2^_bN#kcl0>8Wz3H z``7|pD!=r4cj>YQr!vQa8*LQ46CA@fT?`*JYI+AM0d!#7!HZ%zBKj{Fh+QHG_eT1$ zhb#kmwS%y#^Dc;8g#nqQZCzb;%qb<*t?QSBs7{&Lyy9u=1q&8{ zWE!#!u{3XFu&H^!&YVgcZ2PDEw8_fa6JLL+T|2-|LRMX}i`4~#PMtbs7G>S$`zyDJ z*THeDO*47rdo}v^Ct|k>nIn(#U_3|v<>gZE@(x{Uw}OQ8M2ItL-Dm(N#oU*U~mhj>C4hmZC$z6*2daS z|9fp1`cOpw&Eq2;fa1gfP-Cs_ix7j`u=6RfOH8f`3;yEf@?^AkA2w0oT3M-EQm)#e_bRZD^E9OF^`X8kQt)FBci%LG3|eSO87U`04B zzdGG{wgVs$1d^i98#{yJaVDl7x}KiZEK&mw3@fTBoa7=T`R5ce8rcNfBBH|Slrd%WP`9+Nzj zlJpAs?gmWu9r{>AwTH9AZ1vW=Ca^-H4?z9shkqFRcO-l}?URea`EaPLrH^)o9J()2 zsm<;Yqf|A{xR-Y$x?p`gZ|c6<^_xG2+Hr^|-{v;YvMQeg2lLAyI!7)H_T4+hZPvVb zV>rm>Ga7;-p&N0ijw-e;jNs*R|)J?6B~$`*C*c(4_CQ2J~q6x4lU%u&g-j zOk+zctEZR*@1B*Fmlw!L?V@#~`O$@{J$h_lT4Z0O6kfi4cpCFndiLB1ZtKPFbS`^l znB|7$J!sALS$*6)4O_X3)CnV@zXJo?fi<75@5zdv3FN2mhF!0&?; zLw9a1U5laR?FnC;WaAA2!8GpPyJu%I`}?7r{2L9)H)UnBX?h=WU~@edI{V~Mqc;Be zbVp_KpyI~v0h>ui=1qO1qGD;daG@quV|R7+l;aKk{r#KwvhpR^nBZH7E-!r zB=qU6qLNH;kpJSvhLDi1veMa+zUt=cx&3r>bkMf$lfXw*!4=BZ#7cIjiq?!K^Oh~s z1tsp>ts82W!3QIbqUv9V*7b0N7u0-U#J9H!|5>*#Qy9&2nd#b8#ENai<2o4T5Yo)q*!}Z}D zQ(;q2sNmdv0LckR>rATfC6Q*Ew{0t=;yu(xr&g_6g8(kHGm>|7tqZKTY{iPe`U3}O zMg1=x{!Jh5Nq+txEcn&~7qu6^!<%2J^$j@d4abhXkeeF-GC9Y=fsI83*#zP3CV+im z$3mVMd8M11+q3jN?Z{6#8BWB;UQJ1<&vd%Tqep2Kk?%KcpntFCg zPjJaDI9}$S#}^D()ydIuS<(AcKaDZi15Wlkp}(eP_~1vkZ|mf=ky&*SY~s^8H1|B5zY`#JtjiboA&GD%QAoEqz& z{oW*5o!B@A11znB$EYFuLak+ajF~$1uIaX(Q;&D%LOFBuA3yfov&Uw=ZL=oHMh6ZY z;5d9n&Py`YgrRWvT6OH0&H-w9ZdpIIasCI1J!t=1tXQ$4Rx{iQuOCU*gk#k+WXMq0 z;^uI4R6BR>?A}^io9;t62QIP6x6h1@<5G?LS%%3z8hdJKdBJ$#{Ru#s^c{u@7P4qU zN+R@wSsbq`Q9eofn74D_;_38QN3sUqzJD)~woYjCCdp@^8|9LBK~|q79t0FIxgIeUS6vT23_6UDk(3o-M2*0oi121Y?nG}E!igSr`Q1s zuM|0H0!H$_0tPPbsDRuwm*E4FsO%_|9R^cp9taEt;K1DTFd&)Sq%Ai=D40BS*(s7b z)F``1XV?W))_31riSJER$fv(`kp0Jetk%Ahxv%N7^0iKGpvC?dDDKw8ub(!LiBI$C z-WZ)^O9-2XsFm8Z7{fR6trt+MT_-3Sm(Wo?#5+9+g|1HzBQ5ZsXHdT$b6OvAAgzXL z^*2OtCcT(yYHBt#?xEtHfEMNips6fP0x4B+(ENP_ZQ$(zFv5*bXO#cHRHgl!o#CC? z+RJYQ(|L*zAB?8=?%LE`qZ$gpi;12A#|J4N3!anV&n22onpC!;T4adQI&5<`@p}SK z72XK&p?10nZ4vBd9q{#gvy(XcMRrjM_B$;S0nwHF`j1Guq?m=svg~GL{ zbI9k$W0vre*AMpq1q7TT^SDK4OlfV*v({Q8Z5u2J2w+!12#oU*5)vw@-6V`e+?zOd z>|+!>ztpMIv~}yTWU=_37{8xc?(~1wy#vPn&BHpU#Zew^vcD#Ij-oSBLVhhydcTmh ztj*fU4sgqY>BEa@bb*Fm8>OC?(&|zBLC$}}ZHl38&C;QeC%M0hAayipTO(t#1O*7c z9Sua8l1y^9>(pt0KeVWS*3^)2A70Vt>6VQfpYWPyvu6%D@Jg79$$AgC5Q)eUmSmZuKYLDQWvqzyI@N~-k(n3 zQ~6*z2Zu6TIHsVHjwHR95s^kjiJ{wTyRY`d{Fg6pFob*rg+}izM&;rzUObwUl{neT{f+ljp!QmeQ1r4-lYdNdCd896s#VLaK+md3ntkRDu(0 z<_VrqPh=-4>~Vo~6fAqQhT6`tAMzgm}Ev&N{_;Bs0HAOZQe3Pf@NQEwH`1t%ok}PfxkO0|BND%BE}g?%lg! z>%D`tqRrqXJpe2Ex2VCO4hg0YJw7|gE4^Nh-OJ6;ajhmWtMlG%ATu6v`K5J!a8)H9 z=IV?aH;#ZfUVGv4<;!J`CMd%#)kF!0b|jNs&VPP2gw$y?8;LUa0MTc3Qd2IwIZW7m z6ti=nev$aq@+4)`#x``VPpCDdsDq$-|2L0`~SEA zy)~zux^eO-9{!l*G&VanUHMAA1!Z9|t>s)$%(Sz+#^XwXp?Dl@Uw4n}`PoI&u8^X- zF}QEy z9J|uMi{GjInwWKT+CW&34fOGne{ttnCz?}NP?kQn?ODzW)`n=SsME0Z1*B~am1_Xy zj~=P#Gpr31Y$0umSZNwNYH`|GQXW4iimU19wDcqrpKvR+g7h~}{<2B(-?tJsXpoWMD*BIMZ18*A3KZGC-#5(g}3DKWLX&&ifxL8L9Z9Q+C%G9yEF;>mIOF%iSt zis%;%O2MH}%OuRC3$cF`uWCyp?p*ohosQz;BhEiXIY(M4Spn`{1}=VhB`3#^LxiMv z(9&1!N#SH8?5KXt0`LFlO;50k8YZY&32Hl^j%W^KR1U#iG^s2|MdLbu=boJ3jKZX^ zZUAou)zyse7*Q@+PiwRT^*G^c1yc%3iA?g-F39+nk*%#NR{+xS>oSx3qEb*d3!5oV z#887YSWZ`_Cn@UV=S%db`qx%|y7I;ArP04^J={Z2K@6OW?~gKN_=h+?1{TQpwOuU2 zOx8dSGvyhq_1U;#tkod5m#t~*KH;e=Kv9lkg4Di4heom_R01|^q73It)q!5eEx4vc zlzN`=@k0m;msli^ZN07NX`z$Y4nAF@@#qZ?7&;}hS}!K^Q%9khVEEgW+_6RvHYB~| zXQ?|g~r8vF`eW&UEZ{LXNC<(k~_@(D>S z?4Y)sp9BiI36hI%@zMBCa3F?Pwev)mru;0QGdQn^=>+H)Zpvg&qich*TB3z)hf`Ab z)nBU=0xScCwhfXKNUPcZV8kMo{{4MPZznMbH6&zg+mCbz#2Q=S=!J7gyK0U|InDS;72}O28c`?jYWwv|dJ! zNj5=`!_()7Mi9?*SAFU)>q`0bVfW3XVSQokrM>KQ**t4aSvbp46%OZQ)oyfBoC0O2 zjib8!&R_nA*&S>QB+$aqt_iQ50Q;)~Y(uoTH75Y8NOL68I<6^ENC9wkgNNE^f*Kd* z{f;%U39ildJ~M`8oe~)Q+%l(H{K^fsy*c-1LpW!hK7R6K1ZQ>fqB*$}19iNpdKBUm zCGe7MQx-%27LJ^hWjItFk#`yxW)-VWF`RVcMng|@b|;YJ%58Aq1qXavkng$o(ezt}hcVa*5_%cs-Gskt!?Gyomf`R}v{C>4i+sXjgw4fqu_UdJc+jZs+ zd5`OuX4-do#h+wfJ`OmTc1J1Ew}<&P>WYgT2E6<^vu0hSo)!CVISitur^Oh|4DxP; zbVy{_*0*0TK9Xc5^*pJATsWTzN8cV+om^GS#S|#+c)Vd2Py)F z8jh4Wkgw-Cs_=q#yzIg}m~%K;u^QI{WTm~(l?5@H>DI=zgMblH43qyf6im}GdW#&E z7p%k=hroUR_6xBn5!1%6=&jv1xv* z>gf2B*E#}xN=%j7ph>^={{shi9R5r$3v!~ENmeLGc5*@e<~=TuZI3U3X52SzSr?v^ z%CS0Mz7ifS*MXsc>f{E|MFx$P zCp1|_OG_Fu6smuGZ%6>@d<#acmV9_+W8~Xo1PLtJDfP2F=a4xYH`uN~-?5(pP9cGt zu-95XO=~UxXA-lBVFLZC&&fiq{4I>sI``2yh!#d6kGZG`gRJCBYa_XCfd3e;wxKMF zCjH3#x6F5kT}Mkt&J0zT@^|ml0B*rB88*}%Twxgv@Ieg2%FN`i1BseX$Z$s|Et|`5 zq<^S+6q3MxdvX9!TRIMHtH3TdE1E|X8#@5{|G=ZeKMmW;f6{TYzEX+bgmkcoaetVZ znJu@KkjGao6f8T@sMV4f!TV8WApz1>pgOMkm^37EdcwWOqvTb3H|5yTrTcRmwniGQ zmHv)CeEbG}nKqjjYGH=wVUkK~WrQXXpmAW^|vqDZcwmfJM!z}D~p3T+dPb`+Ay3H0Lre^Ta@7*tK@bP1nS|2Ac!u-!L(W!{n@t)tz53p4SZkW(@jW zlo?W28cezmj&ud6rCe^-vgP`9>uOVC@5fVF)_C%JiN%zOHG0r3fw-m-$%&TyE-_R=X$6r53%gwp|2QJz=ME38<*d4x$WqVz(${yu^dPEt~8( ztTk}pVUUFjYyV0g{`KSKZ>y#303qJ|0=@J$Qp?d{*u}D!0*(x)4W~tY-5EDY`s+1l zP%D`FQW-*2W#m&wVpg3XTuEGT!V0!&78!(@^J=_0KzC9!c?3 z3PvmI{WWk?r7M#f zMp<*iijP$1J*S^L^KG%Pm;?OZtz=5_|&V|lU4hwtCD23l|?M8DCbj@_m$ic>>Lh&)G*T8u#hbhosDFtMAcO z0vH<@)DZk7;k25Hce38f^Y-)S8)C1cTujIQqNnoxVXCe$oYbU{w=q7Iwrf$#Z)aUG*9Ed+)W^ zI?r>R%gjNHCil2@lr^Wf>P#3}xj;8l}4Xwab8c!Qj;34FyTAD^6j z8SlzpwBcIx)}1pi$I2fT{=!Fg?D?gA|2;xWc!eiixQDw89G2$dy!q9eH_O}`2d7wL>NHHGuiOdJ@|yO8RzBK(7v1|rO=egtVvok>Ze&Og80@sK z*S*lozqWd3?s=tp%Zu z58r~flks?mC_f&%M`5|e;>CgE*BZiNwH#lClkB*)hzadpKYl#&ZHo7n@@IRPoZRN; zheMjb#1Spl4l)NXNJmdapIc`Mo~ma^LdLwc+_Gf?Yg0#e2?FbIp{42VIrJSk6~9^_vegM-uTa387HhF96oI*I}po+KGyZ3O><^dm; zbt-uE$_VupayG}UTPKbVIp>3k)yej6e;^PPScXVWrayVuP||hdY<;NWz^X z=-xThVwR67QTl<|u?FH@fazz zzk$+mVXL*Lu$IYx907(pVN`Qbb7N8R!I{;U=q70gt}MhoQT7-Q`S9wMJcT%>!f`R5 z-th+M)otP=sg*Up`A0@B4kEiX3D@#8tv=n8#D6VdFAA=oK5l@KCA>8cXg`*EVX!S@ z*+89}Z-tn@9Cpv9+PzOdYs8Wla&D0keI{*=-D7EDUN=?YHf9j;Q1{vVAWjf|j|pv+DA!{ujn3hxOMje;DBa^gUyV2ll~v- zsI@#*WR|M0|9G8Vi%axc{$Aji=(()zj!#NX@$&Fc<4-2o&C@Qm0pw8HIRw`HA%d>X z1}$-(q|BJ_o%hZ1E%p=W$%ZhVDxB|@aQE)T*Nu~U1p1lZ+OQmHqb`7lsB|M4ioL&^ z$%xOxz-ZW9tyipFdvP&1!Xw8qRm(PV5Sf=M*-esZ!ixR&8y*zSwh)vxAg~ymFmhQ! z_i5Aan%-)lMd26!Iq_m*NU#X8FCvspK8yM@6z$n=Fn~QkUVSyrmIBCwnY5FDWUc@M z0r|~)W^kwwziGSIG?JfPXw!=P%CG-Kl_!>uKt{(2UhQTKmkm5!Z(t2BhuMe$kBzZP zqsk=9By0~N06wJA_%SrFm}YTewaW>CCnN04vkhU_UI3s4q#uMX0K)h+n2$&ph`mCH zg}df%c;1C#l;VHYMoX-^^$2{9G-4D|_DL}($;6-lQT)$gby1iqlgKAGH=U%v`Y;)H z;E^MbcnIoS%DYqdq`ZAQBefA7(gA435FU7IBce4?fB(gc%5d>aH9bM!jt_?r66P_p zIWx$#MZuN#cb)rl3WPcu+;(>;+DBKC7m%INIo%#yJFnOp;v=&*k56j~f(4DD44JUW zTYU1*&Og0a+)XV%uODFab!vTgJ+5CdYmYp(i-Ds2>r|vg9i^m7SR;V?BYcVxW;G3h zDL`kGs}{bxH{VxiX(g@<_>viE@$7%)0$vz%&=huGt3(@2^AUkN*b=YS8UF{WY)eZom4# zr)fD^tw1E6@c>T!d=8`VYmVk3Kh%t z?FDI{H&zH>C|42F20{i0CnTtg2F>0hEoR29OwIix-PFuu0IK)3a|2w+Y(K!!$%*LH zl}#DOEkGLjfd2_mcan#0(>E((gYCS~hRDCKK|-J?lc2 zC;qdw&6IWkZ~A=JYuB%Tz~}x9abwP0p#+Jy&zFKX<^N9wAl$I!+-jj-l9FnDp9>ML zgVN8^oCC@2dy$eF6-F4>WF?ZQ914&L6&Lv(aSJSNRrbNR&zPqmk_KPNwhcnm_Ar@h z!jm8l5ZZkkmE!oDH`maHkSV1u7tCc3BC;^ay)*ifH6XeI-=?5KzIFcUKvcex>K<%kCC|*gC~Srn)jh)E*>M z^Y^)PjJ+N)_=CfAGyDn67LWBzK*mq1u{9w{r7k~a-ms_D*P>4jxcU=pKz~jix zK?7ld+ypt)Sr~&Oqm~{8`4!k*ochk44a!pLt=L#AXZFwhCZKCRLo2Z}TPPF(Dtb@f z0J^lnfZu!C9@%OCaz{ZVFOx z%Esko8w%vB3CXc-)GpOw2M;%f& zgRcSwBOkRpVZ&h|56FstM~IW^J+k)kW)WYZ4d#DEIshm+a{lI7X=34z*;rf80#Dc%46+AhlBpC-STc4d{(jV2waJ4wBCKK;u3u7gcl#Lb3PaE{_(TCns zcm8?i-x`Sn5qE@5mIEonRhvQKqt4(0-wp?M9z_%20iXGkIX!A{xeO?_3sj=yxsv_W3q*?Qfzk~us9e4(?%_TeuyVKb~q!OxD1uE1rTas4$QNXL=G0E=>DBYRjS^%DhWq*m~ z5I?$~Vj-|ap+k42-BNgtR|L~NQQDfT6r#^0C4A+zY|K6Gfh$ z!-K)jTr?k8l_&OTJpR3`Z0(Yp*RI{q%d5KW#yT32-sja5AxMXwN`G`c3iJYPuwe-^ z%3m(r^6o?W3=Qx5RUW5j_c8rHEr6F-3tLQrbEITehesAn*I7w#H8~oJSqCV;;7`f_ z{aZVvUH{F~*C!;G_*6uVsO{NfmFOvelo^Mo1z_lZU=1Rtaw?=dXd@o`N_=dN8p5ti4WTOe|_AyCs3B3i>-cxZ?rCr({?Nx(8f7Du9d z#qwSj(J1rS7&(~zi|io5I#RQ(w!Hx55sQGA2b$>yf&UxGiyM|y#7r>Ae+z;&o}n7C%S8t5n=(OOce1CgO$m(R^U75EcwiiLvcFSX#%rD7b&%OAdnRmK+XAYnO zqQeO>$A%lXR#lCbsa{C$PTuf3jsY5mlN4_Ev`du9k!HN!p}*F8G#oRG5Naez@sA%1 zz!4U{`9ZiAXk<=KATGi3ANMFOBsp14_+DR>6b8@ujA`ez{LQ3MG$UB`SWcyOZa#(E zaTqv;PL^^#2}T#Tmm!~74EdWYbR=C4{+do^FK!wPt#IMDtuHL(E$E>l@#-SFpi1N$ zyjXQ{+X0HNC65zIcR*=@mM}kUj#x#8=Lj>K%Cel<$d^`;Cks-Jif|+{wm97;z`Ev zzI~pasq=1CI#+HbT{jIF)o%^?6iotco+Z1xxA5r{LqSux_58sX7@Z~HmhVeT zwW%f}m);t^F}ZOPNUgzEO;M_$OA=3kY`>?R+yag()?kC>+HE1-|u3}c7 z4@vW zXn1sJ9z(e3LGcAcDRO^0yrhuJwgTwq%7}G*uLa&+|NN#q-UVobe?R)epgTY4*cq|+ zbOYtFfRf9u`!ZN2z;D3hgJ44%v16uAJpn6E-`70o%f9N4#hVLviV^A|s9u2rT_Ucv z8~nF&VDH{(By)At$)bkl_Sdh{N_+gRQvSbq*Jr&43^@6%=rY?|+HE7_=?rv2f3Dc_ zp8_cO~5c=~$`>d4EFINu#ORUVk%VN#4`_TYvobPit-c_O-8Y`nUV^l(5)ZS@~ig zgRotE4WaZ;OG_&se$|g!OkAuWYq7xZqM~sqJSu#RC!bcFr>Czkq!=_-DZI|!yqn}l z@L-UkdZcI;364X_HwwlVaxV;IwN`Pq-%x}3G}5vKNgj);R3u8|vPd0v`D*;fo$&ct zcpGxwT`R3P^h7u)YZ^VjMf}Sf5)Q6307Gso%l~N?yu^FHsGI%U4Nm(-64|Ey3#~P%kHJM3Y3>%5Y{b z6@x+!Zx$Kx;X&@~9@4io^7OvrR(B+17@i(e-aHL)Dd>*tmz|N_P8$yfiJ^T(2(1`# zU-!Hwi|YA4!{~lL*O=qi_kmFLPO$9`j_X{qJlCs8wC(3%qQGH2QL<-k-WQjTpsIBI zx!B8blto7m9d>l$=`rTbu86E>ic?8jV5edfAfkps{ZxPZc4K2B^g(CtNY^iSZ;(s0 zM4^IsAIQfO0LIKbedX@fO4h;#(pers!&AK&Gv&=u_1c#v31aL(gmC#gkKSYoxWj_(R z$eIy!E z&`ybz3Jff6(?@+&O%HewgouWP%^`;KkXo;%{zUvjd=rq4sFKJ*IZ4BKkQ4pudkq@& zgpvi-Xr**@uReX|QYHL6V{`!-PV)|OmYsD};q%*t!eXuhu zkD>NHiXKaR_fL>tU8H!tNUrYVt}??lw}*{C?(wp!@~87-pUpqqvUjZgKIKPM8fB3q z_x@ilCeFj&T+RO&g0ROvZ#5h>1AlHI(DU>=9=xy~I~`}yMO?lLkS#lpOa5A}wPVNBQ=0Ch z<4;apcj0EqPkwip{kp?OmtHM$vT7HeaFq?H|IS?F3dYqGVzzITG&C%ARhqG}>wKa% zhC#z)c1+?5J)HnwV|gSJJ7yT-HaQ*?>Nz^2{@56&?+?=AZBND9!^4MOA_?N}4TUyHhiY&0M=)DNAGLD*E;3l}cvGEJ1<%DK2e?!4Zme}$;&-{Au$ z=f-;n%8<@JduU}n`<;DvoG?KO^uBGqgoI-5UETTfMRNy*=or%ukF&Dqa-W7+?*NcC`F#(X z{)7SzP9u=DVDljCk!o0ebA3PI8htB);m~A@9oS*^5X4cuZZIXaZAZ_}*zI}sV7I++CLCrd)El}aT``NKcgU@0Xv4T?|7KVH2J-Y}; zk#iR=ln_*Kf@sI|r4b`j@EqDp=oyBCQ^l^l+}vlFG$ADGNTU>N_Cr36BkGrP=g+?f zT^HdphrSre?fj`7y7X^LP7~E_yeK$!W%@>P8)bd__H8xwY8;F59|rfM?`|$Tn45$s zP`19{_3KOkyogQ7+!x)e-!gfkG=#QL8}a*G7q+s=c=Q$|QJwcroj?Y=*xdYRZW5j} zK@7{vgz?iUWY^pI(T(g7;b1xl+nG$XfkS_vWK_pU9NTP)n5wY29NuHzYgAej(}K#_ zgef;+c#K~|wXKcg$aQj4HV7H=Uq*(}>t^W6h=%@r{Hxhi?mBLi$>+zuBYiR~FGA7m z-*{})Ql+VBfXJqQW-u_|@w<1Hzu(-n%bW9I?5}zzfi3Timr7g_?L4cwwR_Yzllu0r ziJ3lb%bDU-J|$<7YgXScbpe@D zWp3flmefZFvXY=s;^^z|Q7)f8ru-;${>;@EzX-x`>_=v_ilV{&(6UG`Rb z%3kvk6w=4!lbLImwHZ-bq{->;-=O;L5Z@^?G%U;}`?Thw0?lE=W={gu%bCQvm{i*s zkomi2`NbhMvn}=Yeak~w*t~=%R4dwrbP2C>Teel$(&n!{#)FA$2Eb92`^udc;n1P$ zK#ONTaZxqEw{BX_uEHDZF6RFsUGL-IPO6t#7*7&cw3(b~Xh9KsDBjm=%U5BE-RfDU zCc1;y^qQ{bYuYFOr)vKuA-41G#{~sT4%Hmdy-f)wef2$eXa)}6Hr$+MubW8-#&L;Q z58#L{X5G>nPcojaQRC;YkMkCjQ+hQ1!T87F4Y9jNJ<07Qw`|x^BPO3Kv>5_Da=M_g ziGGcUnrc!)vwx407+O9Jb@Gh{rQC_9G+R=_-^1`0{Zss)&1;e-8?f3qXzmQgbR28_ z;}&3c%0^wBKaa=6#9ZcsP4y1Z9m%=*Lz6=0(b3CQgn8b2B%E>d4rdTRrQ(aYeLG#X z2B29?WwM%B)<69}Z{G+4(EI9T*VoKnjOAPSY)<(o+W9ji85odSmm4t?)!^vi!^5Jj zl9L=aM488&_{O0Py6M*R_x5Ah()KfZfZ!Qz>10Ar>UtKP53U$z(1#x;F-QR zsg+Mejnrh2zYW4)2uZbQr!rjS1C#F?t44&>hM0Ii8Imb|1 z;Pj84IB_X{BO-vykWX4goS*2>ZozY#DnH}=^_WGgSeD2ZIB=8Z!&3!~Uw0LB9Fp$vX71ey->sUanPYL7wTq_kafJ2oWk+S&O85wkz@_4EQUpPp0B1-AS~rSN_%U$(u0lPagL?EDQ!q1D$JO{|8;J|=jlf=4QK(PVOrs4?$zId7)+l!y@{x^6!3Td? zmdRZrkxJZAEhVxU6jGo1;$E25#D+*kL*lWGp{0>K)(v($tqHDk2*60Xf@|t1f^Pp5{3OfMQnASHV9=6~PMf&KO%rwr#4713*kK4n#z2-{sEM)O# zlg8x#S#)`sC5&cfdXW1Q$xK2`sYAZ;n?sYvwkk;93VHv2ye;a}C1%6UmsD`g2wqn7 zPw<~rlN5db=2aQxwkwUxih<|MM=Q9pvktb{bYeYTn-*AfVE~-J(CU+`i(Ww&d zh8Bbzd4WwccY*XR(#^|%YBp1wJrkjQEO+t>GI}kbxgLN_=C6zL^UZ!+TwZ#sD{xN# zL;qSewRKLOKd&fMucDQhmbypaDMHuK`p0CZuO^H-oY{TQAb&va)<0_aSBMVM+5Ap? zDc-vq%yQ}m*T7AmSH2PRomuvgOaT+^B=p)zI1Xf6qo3{4Z1^@FVUH}Rj$CeDUS)0N zDwxUst_LMK4W!Jc91Ag=0=;bSx8K{_cN#GWqHmV%+&ObfsNjNbHUgjEox2bDg>==d z13`@YpuJtSF&Wb447ZThGzEpWB7xzGKJs+@jkvg;{;6;8?ObGHGuHt>*v<@L40&mc zF7GeeLX|QfVUh$P&?iY6toKsbqKjfmWM3x!6gcn_?YVHm?|}ja2X~X+nfQxznfiGm zp#1sY%>yRb4lzB^c2l}z(Y>9@c14Jsdq25rLIL0bz-MmTP+L2MEPrLGXBl^{*oXyF zybGqM|JqX5SBzB;=~IZUB50TX$6Xj7U!(l-uWTjy9j|RJGYl7VOKJUFY$HZNKvqas z0Kv->wsT`e2>*TFLcs|gObXM~KQ;@68wj6P1F4cq!-(?M*tl-9i{1 zNxIj`!Avg{x!8RYL62?XbStTT`iyN~nnt}0bDjGK99eM2R7YRVM*rx@s6SqYgX_vZ zv%~d!<(JYt(-E9PhXR=VS$sJGFWg%>!2_Cp1+i3XC+XX8PSsPht!_Gs&i`UW@{KDN*@RXZgSTO zu-EuBH;4e-`hTcgv^&ahRaDGtcmZ7 zZg+4tF-Fk2{&y6IIoB2rSEY}77Rv8|IduX)c$vWZHQ0ZHR_%Zn(qh}_hC#m(Cv>_+ zjymD{(zi2BP7!sypZ25H&2xTt67S^lX%zsD69#*=T;JXrhJ4EuvW1+x+c#3Ux2LgY zCQcAiiAB87ARf{jasJ7&u|AUFn~wQe`V#As>2CEhq7cGAbAg7(1B9r&zPlgO zTaFA*09*DiT)k#Z3iY`}4T09ZRP*kD5@yqW2%d{_!8Ymz1b`*Wk0EBD!(4FYV~Rl5|fTx5bHrCjO~h224D5__m@@lR~e?6 zuFe3t30Ey-(b}s|o3`$ovSzm*d{>=q50=%HE$HWP>cjLqyZn&scov$ZWph49+ZorB zHb{wT9640y8;$<{{yJ{JmBCM?MdOu()uv7h)l;*b~#~0G<}Y!P`0|!EhEf8m2*65g}uFffc-jZdI0Rb&qkBA zoXd}*d&r9`+gGX8x`-!L=W6>BMvH`yI;#m~YxbfTVNZLcvl)tln+6F1P~X15rR5*h z`OKbVwm)|Oqr|5j7%gliRi~(!TQmg7q&3LXaw!Uz0-koj5$BET)*WC+NU2_Gq?MH( z;8{u1?)6jM#TB!~_%nGxz>&*WEM|SdUMoU7?~rIOQ3yLmR&xjvu44t*uD<@6;Un)f zU8xF(MtB53;2|(shHNEJ@IT{(RTaZqurxa%?Kj@W19@AT6e`AwrUt9V2DSf22&>diLqxP z+yvUO-oe3|XGa#+d$?spw^@dUU1>E`h+lNs!ninl9@|?YdPKm}hxkn<`x;EvtPMkB zwn6wCB<^bL&my)Tb1K~Tg3DO~(DO^lC5eX_tyLp!;ECW%@A=WVXX`CB>sCrA`(}~w zOx&=D1-+pc?LxgL1r-Q=(y0Mwn_YI0*gI&G3$#!sE z%4Eq;GPV_HEc@RQoe8 z$-4&e$m%X#lVt*fQWJS&I34XHUDR|d)4jt-C{3j&&CZ~tbLY-78=^0z91jcg=WV4O zxJ_>K=~cSe!lGjv3(pF6U_@ySRE1vBD@#g@m>~5V*7o*N{F?QBH6~3sG;`t))R#5J zsFTck8GVjJ%FI#P^X{O{h7AX)&}npOGjoI1lfR2!XifM(EkIorN;%~v`6Czb?2!K- zv8=5N=oKgR!L5>7Ek|kvP0E3;eiRkbdZ8|-v&lPqRp-2{o9$$F2~F=j-`52N{b>bB z(eVQU5krkHTT~AX-0@BbrFAMW-XRAH8~xa*yEvRfaaDc5Df4_;ht65iU2b7w$519_ zNTnv-(T`i?r&pM&B-0S>`;~MrWv?6vb8e>Z=lYu@DtfZNbti*wcE<{W63+nBGp1+V zhySY@RXl6;aA?t^@dZi&cO`Qy8Ild8Gkku@^P_*)UcU3CJNdwU__tQ2N`XMfGO%l@ zyua}UgMKlG2K4W*a!Dl@Dm+uV#{>p?_LZ5xV8L8lTiZR#UV@q6M{gp9y}G*k5Xf|% zD~^PDdRiv>`ua)9%uVTCd&hxtnB_S3;|KT4NXMRKygUi&(E37dOzFSV8Y%GW(t@|d z&OC|6fu0^~2Zt_PBA7XLg^#B)8uiRV8yTkJ{BcV4ecQEd+p)@8xbkr$^lQ2C0_a55 zJK}MR8&6CzwrM72wGp-jW~&c$x2->DNef|2z3Q&)V7b$(k;D`0@A3gl2jx6{YM6VI z=qz*N{AknYC$7RR@ICjaWYdrB3>mUL6*@Fcl?(Q2tce{Ak4^m_!)U zWMpNPl#BqRT&daR>z*2)s$bb-_%VXnCoVrL}heo}fpwFof zaya=bjXkSC5Pw!xwF#J}mViJg^G8e#15MVF%$QoV!)3cO;P`zOLU`*5XCqkc1dQC^ zUiOgN({?uyrFt5on9<+LW?OE(srIhVFP};#f6nQWnQk<&ILW;^DOb8ry}G;1gzt%^ zH<@M0YsY6O)_*Uv9;UP?=>o_Kux#@6d*mHNw49Xv=$X$=PaT(2i{9r_THq!Q)i zgFcbFh!(GBmMESA7rWw6pwSt4Q^;~epY)LJ;fNXJ&Tp0zod9*eZ2I==hkmk?=b%fg z+1CpjntczvTleLv`e~y+MK7zw9nT)nYB;<=!+1$&t>Z8_XVA@gA3D@QA}*#eR!Ps* zUtVD|2Yooa;AfjnR&9P&m3AI$xfl|`-%Unt_SRJizX0|{S5Rx6JQuAV*N;}5?yN5-<1Cm9H@!6cfj zaiZDBL?iB(h!mrdLF(uKIvX$hsI$e_WbLBe6^?Wi6u<8sI;(0j#&pv^uXaFR5E<_IQWGPO=TE4X&d4 zu0$yV#3WepI*rr5m)N|$52Q~0{h2E%!hUc48V3h|^3@gS^00N>5BUXVV`ZDJY%=%c z4`1SR#5N`a8p`J<{`OqVx{l84%QoOL+gw| z%0kH^NMYATHc?v%I__D(x>6J8IM^;7YPv{Phc{Au@|7xF8XZ#?iF4H_|MKSeuw)Ca z$DB8>shDjIN}hkLy~IUv{_%gtp$jAI9@p>;1;B94IdRgKiDSf6Z`12%w)PAMl)0R06*`8EIni31f#&b9ef-7Jf{{4k*h9M>EHUaSVWhmW? zv2AW&)oTeKf#EU-6b#)ArZX>O4i?yep6w;Kt+FVc5jF2LPDZpPstKA+wMZAX^xs#a zqEKKe^}J^FUxL{~W)z3GF5#O9+al64lz~f<H%b?j2(OM+G}3c0!AtuOKyNyeZ=M29cx zolKUUUr@RC)Kp)$8&QKVA3T1%9~;tx2;U~7@roD)I%VkMO0DFT%K+a*+lNF8$uY6CSAZp#8I+giTxYLoXCCOgQqq|MY1;dIGm+X7{6<^FEEYsvJK7=E~|c z34-`uQ2afcK7IKjMG*s7^pG}GtKcpO_UNE7H0exE*s){B-E@%%E*2K;fcd0$yCI-b zXpiK6c?6%WhUDjb8{@1JcPrnrE`&YtQ?E&wdReFS)J1?^#~OPYR~`!r5@ytr-GPBU zqKk0?dIzc|LB!HC=oaq$p+wHHV|!qur@H&ixdpjhQY$qadPgmKVHNAVcbxC4u!3(^ zV=ZUt;)HQKDM1GWc3!rI&Sdh?ZF^~aJA+VFMW2vcU_GL>DT3X| z4bmQ>yvd_Y&DPJ+_7;~t*U!{^m>qr~eJC)!Xc3bLqUSyp-;E`sj4ZjER;*kpe_2xVw{CN0 zp~|0pn;}QFJ8DHl^@pX43GsF&PXFy6VW-2p+Fi3+KJ9!4BfSBn>tD;yVId;ioY>w z`U<}tadY${Spm`g`+11Wy-!a&Mlec!oT}KP2eS*_z7@u4IV+$2(hCLaYcKJW(`>re z`(;yKizQ2>4xKjjrHrnuT;gz5y=jC^_?qPhulBB~%%XgSZ0}S7irjzXp1jml9b|PX zR9Iaq`(jF&Li-PJu@E=WquW&shJfbwcHt@lTrf#;IMA?8_`eT7bk1ry;zI5xZ8W8^ zQ69sCT|FlpH`q_Km97vi2@-SyU13PWr>m!e|l_(Mhn!M&IF5SgFllVH4gXv?Q+W0h6pVB8zpB*ek{nR z_ayD(i$9>YwG}|muNrU8P^Vk;$!)zOwTX~mHh5lGa&yyCUEOw@lIM@Y&XQ(S` zA>g-vdu42=siM(;@&-SO$qyVB(I3wpe*C^(s>!>V{vw<2aYMF`6oa!w3G{43;mw%x zTh7*^+z@OA`Im1~5-aHHmRwsRZM^iPPRD;hxj4t=)Ji=r4PwN2PZE=Hj>Mwh;nC=@ zx$sBE<22(%RwWU=E!_tX?h2cW>rR2u&|LunySg-x(q)8;aWgGEIj+NLkCJi_J8+oFmld}nr4i{BX0gZXSucE zNCd|+BFloM3|tmw@MZJKkdWc_|23B{(pjWhFCz;U_=rn4;KT_jVZ89_+EA;AGl>YD zcs=U(c6pG&bq`;{f|Oob*epfy-ctn}*0)YTrCNvij10RbCjj zMmAF5XO1l^Ejn81TFeG}^X_QIA#qxY{W6`s5hXG(6HP1*A%Rxwp& zyQvyvp6$ykVb6|ct`7PX%~+?eFLfMfHqk5e^zxeb@rqtGi*+$LH9ufnTe}inGPXxP}{xzZ-U`J~Zk0G+kKVdAZZUBhPDk1x!oMeet3< zp#+8S{?ZEZS%1GjyP%})5dAm-)*~exYHCdR7;KpwKf?UoU4}xPt6+%6SHu4a}#zSlYFD2L{(N=DI}vptKE zf)k2b_xq)~z4dKrHQk{fuTNQk5RQ`Na_niiNj`yMeDJ8AT+Dlkz?~|HOfa;1zQcGT z>Q;cJVz^Rpyg<7D{d_*g*jKgo)3HBl@tWnI=A?^R8j*{yUwGC8Ls|)b@E-J5a>l80 z{SHma{a!b^4W15$m{_KmP4|$Z!*(#F@T^Nyg{Do`lETm^Y08{bx5Qs=X5yeR`oAb7~&X0aB^1tZd zZKS*-FRl0kk$_~Cp>@B;{;r#z$xMX42=M6p9(g&*#uhDEIy4u~`V#gjEw|-~ch;ao zMZ~cW?b8Y%)F9P4`}gl>9){kkWLWM_K50bDRI=KM8)!IdjpqOovB3hN1eQ}sH9b<^ z;MPhqdw=IWfQ8T4U(x3($Sg)HICSYZPCIwfo$2lwRtfVKEI9W2l@Jrd0ZTTGIn{+B zu+gi&oLHGZxlVt7mhL_EsHc?F6V1nxb^8BIl*|NI%U4Ql*}#9>#jvRoch(sGZYIUh z`iS)VrOI0|GQgt{Dt%#tC8*jR^UTpMLORN=3#HTn_J^%K8?HD0-60imK$lxIA<3!y z{dr+<5q_bETvz{P@F2-4fwz0P(JsnNSr?` z5VtW_&FhMZa^3yR`N{yv*H9T6$r z!Pt7q&Not4Rh8y5n--#TvStfu6+U{@mak`Ca7+ErB9+ngJ4-Wq_!pMSZWx}$By$0X z+;BUmk?vX9H%!BA$%lmLlAP&R@C5Vz6rH)0qS0~M#EG(0+|Lt!KxzvM3<+fKiK}0f zd0%7eRXvqAML*n%;L=v}A1Hc_nG`g*pMSZE20*PXPLn9EVTiR*2j(`xYT$>VvnP68 zR338GkdTm$6>x{L(7}sdH*9USC|!To1{irGpx1CW z9uke5RyNJxGj;~ujc-1Im+gU42!#m^<_CI)cYe?Qq7!m1%P+1bM!#?(EH9>e;{u9v zEwfytamKf%7XgeSAf60JiRoPtlX5;&SK&_}2{`E|pmx1ZPlR}yWKYPvL2^XjeuTbL z>Yk*cJ~KDi#DQ6MtRVGY$-}cheVA(QJx?FnAn+Y&eQxX)m!?|sYHigSjnKR5+rNiq zHBrmcX*!E>C=yXfVv%~uJpK6%`f_~DKNld~&1zdNBo&}CJr&<1b>qgts>sx0)I4K8 zCv|TC#M0jnxAh7a37RR)$nnpgq_XXpP1h1D!MuQ6QYd@YN!gy&cmL4&%1c2TuamK72>rVM9<#}Z*`-a z3!J0_bCe?fZ1yXkE$$M6mvFvOX+WLR6G^C6A;V5s!zV9AKXf@-pJ#^p`pC`iW|?U* zzgTfpT!bzCo47OHDFA5HNbx-bS|!Z&a(8i5dX1VR{A%cuTv)Na00={9Uph#H5esoB z_s?j0ZvXuR4n~NrYvH(+ewL}rpBqQ2LUYuG@RdN82-F0L2d9^MCd{Y5ZbMq1kOETY%|Hkh?qy6 zBwS81isuk^MTU(J3k&N4*cLwX5tp|S(uSDtZTLEYSH}8OHFF#Nx9EYGe!64+(Z_i3 z`;j0^S8#<1sZ3> zWH=sFd163zbnOq?z&EbEe!=&Ay$oT4iHZy*YCrS)rCCI|o+_Dk^^>7{_0nDFOHERE z4~L!U)V^IiYx?fECuZSQK-?840b6rHs)IXg+1slCckIJBXx7K9S-z#cuIzr3`SHiX z>Zi|s*Cl1-0nh-t&USY1(6+6x84_}T(WxNTitx%)dB|CN|M{~I*QlUZh@3CKT(>$_ zCOU6Epy_BIu(O9X9V)f>?KlD@(Qb{{ZvamF4~S1f%_^+RXKsiHd;ap4xV@|eO4UH~ z100MB`VmhSkYVS=>=ax~sKH~bj|S0|>^ zQA~=a252$ipigt98hy_TpIhZg?cJXsSO1fC4&TxD8|vzYq98@&lYq{`G3R+!mcGq6 zSIQP)KNeo`@uR1BEeKmKH+U=2FM@vy#kI6w-?3VoTtzQC*|hSq$az!*pfC#V7v{yp zNs9pw7T4Ew(Woo!CAkmQuST~ayT3gzBDyc|A{H#92$wUzyS!lhcy8*n)aM7u_JV2%#kd_D zvhS2l3g{-pwIHUQ2JO1|Pemjm$1bLKclxk^c9us#2>-YvKT$PMDfl#*3)%#a!kInX z1|l8WE#Ph_Qpu)XLutv{{H@@IG{H*H^q(Su;g{)7ibfM8{FC`q`Wi1t;&b4~=^_#y z&4M3?kQV(s0ARQs4G_q8{r0afx?rYhU$b~wy6Th1kFPPVhPLKT?kB0t=%d^NtS=4k z0BJ1jl)7N~$lA+KoR~i(*8U*^YI0v4j?Y-E16#L1u#J36M@Pr|JOK|EH&Sxv-{n0p zH0y}8_Ag5$*-edLfBDL7GAHX+Wk@A39+G2krLo{YAtc3_WH-|YJ&TmH60W9IM%cV7 zpQ{DFZ`#*y^_QbMbr(R=Zh>BANr&;$C=%^b zLX{t|rI^u9$f9li<}uAQ``XG>De#|56>Hb8_d{mC0$j0wuPAB@5B5Riz0;!O2|=}p zMSQFg71wF*!*yeFQ4vfb7H((b?wFtA8w%2xW)&f<3I1dJXF9E>ofrZ#%hffpps}!^ zAccQVy|8GB(*<&hx=U{Wm%3Ah^oTjdsP|6LV>i+{AAQ8Yv73j+k!Wcl{4{N9reuw! zRXe68Is-Ya^HtR5&YadRO1b)B|Sdp)y5ZR086%PnbC@75rG|v>Y6*SX}z` z!?#Uyn<3XO{6~$>DPrj=(uSa*nfCiZHmSPfFO-2Vr!Vr&MO0=tQb^bos=*x|kxW0oqT4}&bj||< zQXE|r?2?ht{I@7J4@E4L%;9)9jP=-tVkURoA?(NKiP($(eecNDA9zP6A$`3G2XNqp z^kUnys54Lzk3doFL*{UA&_y98>i|H4h4g-wc!0uwP=BEfxpwoWdBX{XTu;22q79Cc zcgzsDD9{JE{>+M9JXtSxGs2$!_6kq!^1VJUp;Vii{l-&@jywDY^5nQlGRWp{#*g>nMOx8jNKB*I-1!b%f)3 zQZ0KmtdB}refJ*5m8Y^t(>Zj+uH^?8(YN1^#Mr(jC(&hd z_=!@=BH^*&FBa;`@I5^wVo60S45AAATQQdRrtWI27Oe8D&(ljNfLBpjJ6C^^Wom82 z*xnM#n^`#3C{zdE!aGOgeV?00Stj=7&^_Qzef~83#iuDRs#mp@@VLZH2U>s8DcMEb zU~pl+zFi{akr%^_`EQQd{bqGOsRs;t^vtT5-{I;lJwUo^U436n?%75Jnjl9ES8CsN zQvb1zVY9*_LI|u$>P7Si4EnnR_U^rYy=te~_jM?%MR;T%ln^=Cg`G&Bt$_?Ve_|M#4uvby z0Qn^o=lXKDti%dajL8LZ75y+bd^F^Iv7;05l5}<^@@{B=%TD>R1iG_?*pAZD57}9w zEtV1%we3Sf10xH>&<6T$o?+u}EITAhXgavoh5QR2BB3~#sN~q&tbUhODWj7gi3VCA z@AnkLA_u{F7Ws&h>l%U1At5;&dilKH=xjM~SdssOiFyivgyt3O%S3yx$Pv&GqL%G| zte~7EBJkvyjocBa!^3%D^-ZN6sRq1|PNXo-1HI*bCdrYX)L8y#Wya?eOlPRNQ){Mm zij0YVU~5hv*Vq{_Q0(LmnI8%s`rS(d?~1m-g~BG=5rgxgJ@V*}0A-oNS@$NoM82I% zMlD3{bmsLzL&V9RNq)_ZX$1Gfbd5HYb|ZV|Cy6qa-dhN&5^@cZU;y#?bMJ)4$JR6PzFM(y;~`;^PXH31n}rk7doeN^LS7LD z?aJ6GKE9d`mtOaD>ntLpfIGxV7P%a|{XM`jxu_>ki|JNkz65bq*~(^%jYyh+a#ny< zBWscqWIg4PWG3=Hp*T>VHfO;emg--o_qgDu!vV5xY3WmU?%o}s9&uu}%ZGqQ&Q(+4g zIgi{6;3}!ci74*HwIt9}VciD>3L-0Sp)!M6Ms^w=o1IT%Wj{-2t zBW{Ksefo4D|2K^^YK4rC{Wgt~<6Qpqp|hF9J6QNtiN`Mz48E+#xmPtV2dgXlzJ`aU z)up8f!t*{9B@?O^k=)}P-vRCumA&vy7DK|t-I4z_4H!zWy|52LdM7qLP}5bbDo(jxqo`LH>MR}uS10__kNhbj7@I?}39 z0NV}4b^F9W<7c=g!Or>{7{(yM-tQ4w@#U)&CyrA{)ejXD(w3=dpZj-XY3OVGuzKgG zD~(WRE?(M_GekZE;jR7u`HAS3O9_XE03UM|VYY?Q`S)htoqhB|O^0Lo>sd(q>wal5 zc<}%8qq`vP`t`BQ$O#wvF|fM6YXAQiA2(s)h%iAQ4eH=l46HT%{yx$>Fmv_( zu6y`DrM$~F|L;#Cnm^zJK}2mO;V#|>8{DEy&Yf(j%+NIPGhe~1BtAY~W&>El=TUuI z{#ktjeeta&443LWuv4p^D_pf_V?hs%qctor2&7X!m9I^0%pCkO#)7^pM&4jXI zWWAF6;FiHPyl(v~QF(|;R&t-$0N~0yM2MPS0)mMe(~za6K)OdfOblg#F~;6wu;U$}4pY znX*}l>RKWJ^4v|&l{ktKDPlP4`$_4GaLJddJLmrI<(7p!(reik?S(ITi^>dT!1`j_ zjuKj7aKKZQ+$R$nDjpK+CcD+WsZM$Ko2uIYax8SqHfNb!0lb8Zh~x~=N(i3d|K2he z%Y^sa-c@hn_slqsPECJZB*LUnWO{8{hP--mj$1x}X*og;F&Tq)w~6*1LLa3r-&#VL z#9F(PgbX1wes>g z!U>}CU6NZ1I}thj?xv4Y07`9yCUg(Fx0YxXk@85vu;s;6w!m6z1p|W2Z8LZFFK9`n zHyS&BN46R^R6Zi&JB2|?-JAUUd~v@1{m3Q}s-+GryJ4Hty1AjK`QLKgO#J!*?VFa( z8eY~0#gx+j?Io}O_q73vjj=J7>}JZR^}eBjG?Cqh_7Hx8;+(ZClTJP@aGr%|=Ekj1 zXjv})%X7@AQC-Nhdlx_v?~fgOk@4ql>r1a5+E{q=Ny}k07B&iETB<}$LZj6<|GxON z#?~K#g*gBZZVTzrLr^g>DVNOe*|YZnp_C;L(6vj%DcE$wLSEkF3-6w=t)joA80c;; zNMXyjYx&~F@0W10k!7_SGI`HMf`3bp?)-rEXOQ4ll4S{wt(dIR@=tw*XOc)6B)gFs zpP>ozUqqK0a9F+N9Ayk42?YaPTdtX+%mltAkE^2}QrB*&ej5bJacH_`w7rBF#bE^SJj#hM{603RE_rTf1{G~Gg=JPY6~ zDnE%Bkt;+2d(LuRB1%{mF{V2JZ7Vi+?Df{r0^_z={J-^IL-F&9>4Or{@g33WHAa zhOu3E1$WYYTLMGm;%h5fwVY>0r;Y0MZ>fmQXMg>CKO5KKE)tQv=2S!O)~g4TtWd|b zk%-PvM&*bHZfwl?)vnrrD+(eXK4sua#ee%;K1+Gpw1(!`?PWO=i+q~XtzNJ0u{fZ+ z%=|)+A&>k`^o$xd?K|#km)?2F*O7N@?QUH9YU^c^m(!(sU9`=Te8acV+kD@sCCE?k z=pCb|8{Bqi(3{d4uZ>sx+oc7)S#Y%MLugZAw%eF7Wt+P`xmQ!|BJQ063}_WTU(eRa zbE|o~)=@>WI|m-@97sxZLV^Y5VLcd@86FmASG5A3l}HjEQ@_Qd{?f}WYWg#Iut!Hp zfWpVp(&4mUqNzRV<>e&?OA##l4U(3uxQ0diOj`4gDcoRKG$O2vPzf`uAL)gP7u&tg z#o5^w%kZbC)BOirB=_wb(owP^J~45+KTLq8X0Huk*)r-aZ$0xy9PriJ7W1ARjJfDy z*j5rLa*nXnU0x5vDNZBLoVj0&x$H0-8=KEO-R(_6N@h~rR&u8QS>BhQ;W?z;6EPRl z6Uj{Vqh1U8hVqW*s96?MCw^wd0eqG`iMpYjDPWd$51GM&aHsC#N2fLK^ z5N;JgM~)u7fbHynmY2Yh-a@nS_hAJ)lA|vFpRSHIyZDnWOr-7^v| z8mR&_d4>K&^#$+$7VqmeVN1EuzJ;7l2k-*%mjkXE_eoVBuc- zP)3RumP@h@nFtlv_w7eJD*&k@_m?U82*FrXtz? z{q1?kk`-qbWtE0KiinVB^x9k;4@~=Q(`s4e_JsI6K({c>=8t3C|9_Od2UJw&7WRMC z7!x&m6MHviP*AL(0@lQ696+Sm5W!x+0thNtAjyqhyN=iptRT&uR7+{TebC{<<@tm~RzbE3l8^r&~swYCzSCW9l@1PE`JNaBU?ri7RGi zmQhY36EU0DwNRA3eLVZW4GrF%1d?7v;OQg+Ny29PJjPH?33`2kdfp( z_+ZV7vipy=4IeDuHMH)g?c0M%l+#}>QX^RW@)R6QQj?pR(-ofrYfH<$NNUa|#nJ?l zOvcVX6HWo{>n?TVciC9%_UA~8rm%ssA3xrQ!wMq;k%w*TBKt@tw$S5jrXj^IrW^St zpy%DmijUhu(hZN?q~SP9O(1ZtHL;G)TM6&8f#NIXCi3O=!^b z++LopnWp`+H$T_+SA6oVL5Y{YOP`gn>WQ6xb2+Pw7?WG`U4_IO8k!~z`~eKl4hEg) z7nGP#bBiY*(;3FS4^Eh+=IN_fN5CAyLHTAx4CG6E>h~Bj#F0lDuz&ylbsPS0D`03( zSJc#CS8Bu@<%>?qB<&70I%k%dQ=kWY${FjIn}(f<8vAl)Z<9ibMn5iS)Ue zKuA*%#MQ72w2!RW$aMcJr6+M_b5N+(1afrF7bTe`Sj#!^3G3d!|2XdK*ocko4+Ao^ zYF97&>5;RqlE6TkK@?ZtJ2vIdSLaI{5Wh&0?Qufpq!qu)xIZDYh}H>M*du;#qP=!o zGl8mq{`q&+92}b1zW#?une8H7_MJQx1sW{?d{k7FcD4Tk3F^vWpOA^1^&N6ZJ(t>8 zRo`5-zsAx%gFD1Rd>amcDrGSv?ZmtEpD zyV+?VyW-sB>z031iq}YpR=K($a#KT5>x6h?*Jspn_ zCJbocPrL}#v~}Htu1DD4B#B!%yPdR$U!Fqz?m-i0d_2@q@o4}H#S(Npq4q2gK?(rg z!|En@*{(OUC$%D$GCz2;tY@NAdI^5lpM!A>*D$;iB&9O;q&PH z((rgXtZ!v7;v%}V*aRzw`(pWBeg~SH!%cuh1{B(%U@spCM$vLd!y`o~Bst-5?9O{d z6xv}R4N**G+yZ~6xCjuw4~X4~igbcA+xa#H>89=5uNdac-kS&&$Nkd_Tn1Un)$7;0 z!J+HCMNK6wq29#`Z*OZWvz!?d-ch|~+r4={tyPb+0OR0-~rHR)5yM|w+d!*DL58fSczym&Md z;#T4YmnAz7XxnJU^yzq~oBYf?sYgAtbj4&fFica(-&^9I4m^%J>9b|A-^#@(4Gmbi z^GK0v0?{tX!%Fuv`d--^_Tl+~jYW5-ZOjF>O>hPiNT!z}gjn9SzM*2Pl$MtI8O++? zu%dK^m_W~RF^&Mo(#u>Yxt8K%dZn*U!k(8@q!=slgJ&&G6&K(H3I1Lj=Nxo$j@)a_ zCB%#}S@>+7e)F{ysMdzx*o9~Avn$@~#qsN?@l2lli>(UtsNEpw3mUg=lv6BaR#8k~ z&H&;qvnT(egzR4N@$GNtQP1MC62@eY>fgUVl}U4hnYS!qP~BYi2%uRH#i4$?M$w&n z4}5)$;|M)9B3dAd+4>#;BvwL=Eu5=e7UgqyT4trp*0n31e-nybNAVyMHHRI8S8TO$ zV8MTg_}o*AIc##q35t7eFL@2>BqN~Y)XJ)wxIX!JVKgKc#(Eyj*}c>P!ob&^al~8K zIh*+>Wp^o_BiI&vD%mp8}TsO(}x*6-S#SRVODnQPsWtUj-=QM6cq!px;BZ5G4xJ$(e_&Ub1^{#(g=c?rdLKQ4A*|t8ib!jJs958$*}!HW=nU+s#%}GT zS>Wn0*cd&|P*@+eXYeGP{y4Sc>v^c;$TcD?!5R;hYi!uicjLy5+G~uaURYR|%67y> zPck(#vl%<~PTy(g&z&=52pFh=8W!!>fpxKZzh-xLJs1=;hq(b~PF(}N4_I*F;UU0- z=Zvg5ms=`H2?G)qo3(-p6H2GX!=%(;Kd!9zWTw!8m|_L%PNBb45Tu{6eekv7s}J#x1!9m~ zhU|9o$Ai}8OCKVGCJ%+EQxo|2-@h>L+qgl%qO+6Wd_*}VkbBICu;WK((u%J0-Q7=- zjrwONAaJt%^91c34ZT`Ij!U>b^W;u)vKSW-20wH$9G1T9`||GbVH_k0y{MOM4G5U$ zK2Qm9lhhKk?3M`hy0HUl)` z*v|Re?!c2i8&s-2G(5f@S^wvA?Ruk}npZ;iqqRPsp{f&(`|#^E|>na(a$_f&QvT?jE)(FI#;$ez%#k(K)xE#hF>Rc1&^U zL?X+SlZ6+KmOogSsMf4tP66W)egn5ywBmvnmgLVZwTEWEEu0lD*YfSRR^!L-z}yFk z0mn_>k?08#jFahy;QEITKESF)PMCj{*N9cxFC_|{_}InBck|}Q`W~ITba94h-Sy>5 zF!a#`5=IRID&niHc`4S~_q#Z+C^`Emm&)x{)hN{dz={kolf1@B&?J>jYUd%so zHszwXHu=t2+@w&{Rb zp$1qG0^=U06-knP9NzxNAAjcR>+{rMQlKq5+g)G2y~kKv+dx=)6ZD(2kJDei`{BB5 zb*bK_&mBuT)vRgLr%$jodo}wjfEE7rgAAHFnKI{Ox-a^o#4?WUUX$3;*#XGM*LUtb zf+(nmxi$+`ZrE8_ZAN_mDCXWw!!`*9!I85_ZZcfPLDGja=EQh?)__$A2!w&PC>RXY`FV9k;smTD*~QBTQOOhX9*}PV(wPR>$h@83e(T1)l44AW)fSJ3&MZHKWnKEtyO+}UcWrPxG*ki%Ne?weKs2)v zq7a~jAib4{>Na0HTrR-7Jc7KLrH5+;&apForUd<5uwh3%Md{V@%k4*6Q4c$D|E2t- z&*di?3U}y2TqIxZ-(?Z1a5t`Bzdn4m_RD=#EXZP~+x!3G!J>Q~4(A{Yu6+g2Wf_MB zFlE@)R^kwNDWTL%w;A=IV+nA^)ZE;buF7oKn6d;hif;uts3@6Zn>BAv1hSlOPiQiL zuq8a{;fi7+9i}I5GCy)mU!@ITn-njbVAIa33oFC6-ywdl8)fW~L@_bwltT5d^eXP;X_MAEk zDql8-@9(GD>$P1`x32V3Z51g$^#{N@L&yS^7cif5bBYK8zRq|`8F$#rN3@3zU^sSe zKW7FstXZ?BkL&>5VPBR%Mi;i1jh&s}+usduQ?H)=RYxN`&ug4Q$9m0ss zc}#Au7*UtCUo7Cz;ZVzS^*!#rT*RXoMamJB0G(}|y}dtq8$Vc^#8qdsEAUYUfzpa- zO%H_I;>F@tnl))Mt-|`bND%Mj6h?}{7JRX2>FO--RY~*WxPKl3@sqpu%bV2ra$t#Y zOeSC;c0eLu=mq*78!leFC=8-4ae7)atX;cs+W?s9lPV0Z zPeG20QPVbU!U<$eH8QY#eY*j7DxSGZA^0PdHU03^u$~v*|3W`VPgi?D88VQ=e7_h# zMEbQSZ^iFlPM+=X|0600N+>NomEgGhZf<^ler4e2dsy&7DI4gh*&L}DcL0_tf`zk< z2f&^4y*l+a6{iTqg_<>MHp$=`6mH3EDoj3I8x2R+SB6H6__E+(b*MUkhvC`qxUdn% z%vtdLQZFwiI{!W4Io^Bya-u=mGqZFE;Pc zc%eCQ2^kra2)^YK%*}n{rfbh!YNIb!H{{6wKbId<+^XN)9fLo<8KzE>COvY_3ajUn zZ%;EJsV;UOZV2W4>!xkfs8EZ!n|V@X&8;b;+50zDe4MJ+K64E@nTc5dzet$Nq4|*d zwtf5dYu2sfV%@fxpvnQz`|9r$xkbhP|FQ8}cBUVK2y0uaQz}IQTkG|KtW-1!@m62o zK%Rd4r!0PYwaI($0oyYTTsR;o0e_^+aJUp>@?vX=k+ZUzVNr+`-ptSj$v$RAZ5v^P z39i-uv+FEfKfbx#*p6ZL9Cj4b4@=T(aP9KtVbWucLOP0(d%2WWo4EC^e}36^nb)Z3 z-Iz&x>I-^pmagvb>XS9WqSnv2t z2)b{wt$1YIV6Tc}Z(G07s$uZFs*V6UT*P~bRbn}|rq8_tZVQN=DGt}x0_C`;%At+&NO-FTWW4FL(2?5R$kK^#p zF)3bm?bS{!fP_LM?tZ=Lr~+@?&UX_j1hbaf^{>D2%lhyA_ z(-L%+ zt>YhLu`B){Ad;_1Fd+Jx_3O9N#2z=@8VNRDm7%8P_grt+e*MN$tJ4Hh8kq>B-j`Yy zFWXT7njlPT831ueOrJC-x!RNH!;K7LrcaHaW9VOB!d=C(H`}@byBlB@P;~;0t54wW zyXmlbfg8{EGL|oj8HssMBk0soW8;&FLEfJShvaCgIDQ0t9DmY=uA%#%K*uGT6eypd zQ)(yT?pIcyzmQ1IUht_?hWI)}T$}-G;5FKkZPczULtJNt${e3Co8S$VL9cx-76*$c zwLBt`Bfh>(BNjY*C{;>=&hF(*MY6lFEDC=sC_Wtwj{g_)mpcO&B-J{BeluLJw_BOb z@_AN2l&xmq035NHZ8JpcAun+pG{j7%c( zkAQwm_QTJNnKQQ;y`mDnc=tc);9A>PtUTk9Kb9MCAiNP^R)XlrxeIU*zkG2JmQjSM zU4aEF4ha0q6b_-;{}a=>@i)BQR5}&a*3^O{z!8YlT4}Q)U~Sfq)C%y4!_xI^hQpIc z8d#)Z78?z#y=Tdzw@kCzzumzruCx$hDH(u1nE1B*3)t;d)rTi9?$#~$Z6@A>j~n#t zc{IhCI)8%oEaD^%M;AH^TIgt_I84n)~VEUbfHo=Z;FE~B~BOO9z zm;`!5hJ5zx&E^xYhUMxDdPI)1&2s~(6-ilPm@|z<|4^0e8A0#Pt&%#5?ZoOQH^z~Y z7YJ6izB}<7S$S}ZG1E=fY}hcC>pJB~=v;V4@HxbJB_Mv(+(Ya0&alXhcX%>En*SVFwm9@ z`)vH`^!@qCpnQjrvu%^mSc&;uZ3|+KiXZ`EaABmLguj@q=Mawxz8f~Q1%Km;34IAR z&sMks+j&r#E=Vm|ofNS6VJH;bnh2>FDU_VULL3 zy1#uKj{6@6907UQ6xetHI~f=e^I&XT_PY3VDLZ% zp2NOMqm|RFZCaqZ2?ZK6wwXk~XU?2a&7rfq^}r=>zI%Ni55AnZGi~`jvc(!3OgZy} zcIJCg3xiLlNC#L>i#6V(XsVIAHS2|?3#CFvaPC?Wxrg`*-U*hb*1Wp-c?E|lHuG1x zJD)Wj|J!_b&Z;)wG$=_}yTIuvn<4LW`Nhv&PNpvlKVC~ot@kCI0F1$IlDXlvfn)Ni zTJ!W8G+4OJWK`!ZuP&6$PYtoUJbd+nNnkdDEDkD&{V5^1r3AJ^Cmbh|O$ptCacatN z8glurpsy8%-KYLD%zkAyaRDmz=h9Xg@nCSt^r{&$EPHT^I*NT`Q!N`v?#1^EC3|-N z%?Wwt78Z99Ndm%wyC8HMurq$hDy@OZCN+8A8HKTMtW$Jh!hqsuyB35Iw~Y8r3t|j^ znv)aGPBBGFLM3(`8l`_l#HBRL0GSr528LbSJssn^$;E&Q;c#vAI(6D_o>o_}HLf;I zy?$YFUMxDx4@<;P4>IFCf4XsrDFJ+_`k=DpYV{;39+6H2QC&U_oF4lxDgUu640?%^ z_X*1k+U}4<4Nj%?z2IHd`Nz;1))jEuAc9d)4!x5e3|H)nNI%Sq=v#Nb`mf6g`38TC z>ls`DH&rAaSyhQg9*Kt_D-CQMdwUhlMwO@}%o#M(PvlS6wUTW<%uNe4Jdm=HKw~|7 zjs|3v;t_Ui-3G074+&&+Km>KyK92oKI%#RNvb40^dGtFar07e|t5{mW{6g-3UV?RS z{LEx5DNhSB=~dvK>3v98govcmp*h4&X8;+U3wMZ>&-1vPc!E(m^1v-U$o|8DS)_qm zOL-8>KEEO{8#-g_=FR(9tL^tc01%|2)R@ASaJ$d()~i0=dr!C+wY?D=yD4M-;%z2K z?ToRtEec=wza`8WUVWa?)YTMaz*U#uhEKy6^re_7l@h%8jMM$K&q*jqttl6)Lh%sn zpK;VOD-VCjAt}65Pv4f`o)y!U80og_Q$CQ2Z)lC{=|eE>PeweQR2v2nqSw#C-#Aj= zd}B#xAqJ5oCkz<8f4~$rd)~wIWghFlgLp-0DU2I@ShPacCQOEdhH~N)9xcF$(C#1% zJzhg++yTy`o*$>PRNOzzVoSn>Nhdledw;29y2;zdEJWNEYc+r@<@CH=plxOWP1?6V zLi0`qEF}>Pue69qLgpK_dtm9xJ~-hq@UynD0T#?|&&T-WJ#Os%{?EaTF;*v`*oZOf z&gLEu4QFu>^q#JKRdRK7OsAdK%Y+G{j{ z63zp%U;I#}y&xrR8eE6gq#8F%Jiv7*H5Q?FA>66#vN}rFuWDS-Z(x=Q);Wp6Uus6? zXykz%GU!aAAgHmUp&#`R$d+RDt_2rDAp<`sk`lBMwvucWD)U$q$PTJG>=_}WRPB=& z!Z0I(Y;)QhtT+PxXx}+WSB`loV?RhuxHs_kI4DGep9F>DQx)1@r{yL1HBk zpPQ;-6vm(~OqyeHmqPSy8gzXF0uGaJw4_AUwHbA5B1E#mk@P3k|K^)sCO<%(TY??| zXs@AKvYv<)sJc;)U_S~F9C?=n1gz+7{crA9 zaRI_{4xY?BBBrJfv42JOx>qxi5&@}F@sR5*Mcn=S69xIklT!FQ?BgTc1NlQrADHRQ zM~@z*B9i^`Wp{6HZ^&Uc`78MHv@vhpxG`~zGugIOP@o?Pn#o}Z0x68n9NkDs9fsAC zDyldm9DNEA)lZ0R9GF&KT?kNImV;vf+gHYE1hV$;*xZsNa5ocs7(IgVKi))os53fIlh@;!RdM{!7f$gS%oX*RL9;49`h2VEoCXRc%-bEltXq`~Z zptd-6ci0y6@#C`Y=tvT|r{M8#l>f|dvTg!j(nsfv5Cg_$>F^~!hSkpBr4YM+imPy& zAc2&ypNe6rW2o%1PyD7P3Myip^&ANLkSE8lo`$nO02ZvPuWv33S5mTg!eBV9XsjLu z1&6e1PLeCNr`1`qwFr@%iLbS}Aa#5D84x+VAYbbUw~l)A7UHE!*Bsef%Dr%j`qC)` ziFd0}XErL2dK`v@I27LBkQTvT5urgHp>@$8K0*Zh*m1dTp+u6j)y;;y%Q0|qHSJP; z)H9z6zJ(XOk(k&sZ8_a=d&fC=4riger!A*CFRm6dZ$?mgF)xCx6iPQq9u)#^ID{gc zuXA%HWCJtkx{~S>am<0#NxGXd=4idIzs`ecoHlLapb?(!si6{BcQrAwgdM>SJRkD8bfX*@!MD0} z$q7qiPlm%1vi2^$>G%|EGjB>hHOVaz-+(WWrM&HI2M?kSRVut+CQd`h1>8z#?$~5YMx7k|)9}bf#1&XnC)8 z%1{mXVxW+X;iN?7?b}C9GDUm@+aFT^=-D3V#l| zwkD8~X=*OLO9Mz)vm zu&#Ywv5UNAFxiL=Z9_%8)w4S#8lfQ4AnL3o7mkPLxZ5jFFTlw4m!$01?U){H0ck68 z-|Kb{I@UPx*TfFtT)L(WqR|3Q!?3>dAB-o6978eh$OrLGc!Q{YeMQ5NLpX=SmEbHV zOVSct#@6BvwG>MWv1-4y_xu$^YO#aa$dUgDqqk|(>Dr6=KtqbJudhl=uSZ$r0FnKy{nFq*2=m@I)ycylA4ovZKggilu5YVx zs@G3s(ga(|-AvT3Lb&QQUjxw0FrG^|Ir@wbfj6P6P^WVcW`!z_&;fOrgJjp2`Vb~( z+93qA?^99m5E(6?354Dv=iKS;v3YOtW-7I4PG{0g9_1443@MfPIjO0HC@Ta?!TnyAzV0> z9`KFum|#Q+9r%4;8cOy)^!~hDtx2D|?dsIt{j>dRXe4h2ol5k&p-d1c(C#dET%-d5&}uc4}>i~0{yk{Pi3WE z!)ZDSqV-rfLP^E(fJTYrAlqh&<&BhCb@5j8Hg8gifGuvvumY4;bOx~elu`mNUWdXUi+L{R8)eKtm_ez@z>qW5 zh=v+=2lOUbf1CH!^ZH{Yv(&y)&1UGjn6zyKY9qX7*ow@;`5BITSO|$T!I|lqj78A6 zxly5VEjiL#v2BYu)~Uj^N6~M&&5(|UH2Clu8pmvtKw2q`yf@B+=0*)+sP=O>0?zOj zB(PBB0%P3)@w_>Ad9@MPYbuo62vYqDG?gX+kan@~mb>Slec4a0LgYeKV1=UsOi~4b zQVU`yRADvQk}%J}{%2NXU5e>DNUlyiQq7U3O9>(++}9l~_a=1>!bx=!3SmWO8O`4ka3epx!CGCP6LSdqbGyBIN|VxK7AkkS(Y`#1>> z`*Rs{qFdRB+ z{N>XIPZY~hLekD7Ob)#1c+QVzRobTZdPzRy;9c&a)({;60CXYFuC%O{Cx(>gj)vB~ zO;!GC@x^3Vfzk@r>u+$w!(h$oBszZ#iOwe4B_4jCbKO!~ew8b>b(6J*IIU8X3{)O; z2|Y)YGLuCj`v3&=s917@?-1Mz9U!Ei>^_13lhKd;8AiE#$&+utTnJ?O-$>3$$5 z2FND)-j*?c9KNCwAsV{+;qsD6)bGb?QgT0^T%7Z^@9xsi*B^CR`|IX^-@E!l_XDOQ zttPfu>g+ht)G0jZ)bRM0GY+Q4|Fg(s&4!?mLAJx3ABEaY?cf{|pmO^5MC=9sM;%r~ zT~w>wetD9xI<4?+Y?iBIj(ytliwEu*Z0WWqzwkykJA7f>YDMwg~7GYgjl+nUnwIsZQHezKB_iZLIvo2u$E7kVSJ9zI5ARc zjbQZkD;|&MSfr!mzttL80hpChY_M3F-7GxR*0fxFdcW`0uET{Xzb3O`3Tt;_9rbOHr1 z#>wcYF-+FrLfUUNd$=U)yFrDw2=M?kg+iHrq2BN>fN9Jt@T-i-XDzPOQgX3l>s$8z zyJe!kaq<;IWz$6|6G&54;L_j}ZG}a~6<+AxG&myn)Opr;3>n zk^EIh$JLCcqj7Aq`{R!gz$I4Sq9r&fo1*Z9gHU$Y1mZ}YZ~&dV)K;DjCmR$~zJLFI z=?){Jq2pus36IjROsXi3igNa}G>{o&4j4U0x@D z5D32=Un!f?;O(SG9{f)e#qACuT`_GG>gaJsLl|qzU=qKI@|;gSv+{+1L#fNSqEdX$ z9l^6)^H@}}Q-5rDs$cT&RUfRhUxoa8vz`w2P&*2Wic)~GrXL@F3*0sK0Qy$8bPxZv za^(#e?_e%&NP`S$uG~G(@0@@0cY_b?J(XIBd9vl*v}mBxULP5FT)GcU^gj>Q#+|Vu`!GEhIDXWxRA5A zC2kJ4xEK2WH4kFd_G~V4*k7$*YU^p2ELegq>l0_5h$fE zOuuR3u*Za0Ub>nQIIal!2QZNBH1cV6?wSNdeg(^J)}cbZ<&(`Ka) zMwYf^b?3i&5GAAmRp_s)fABM2cu<@JuO&t=3~DLuS6-dESw3eli&gd7B3&0S9r)q+ z&Ad*$8f)p|a!eBO)>#wG8Qe%%gAr zHf1OGoP>5;b*+A3NT)Ro77`MZ%zoIsXCZy-QlgM}5;hKsRo&sk$U}CbT}bHX#6j%m zr!k9pCIv`tyFi#gy+^H)c2j(JV%0@>UYFAYFe%(lHnHi*k(O9h;bMN*ve!`R&*pxm zATX&^bt7OC1q@m`mV8niA_!l&w4;MFZ0ue}ws?wqFQZkTQ_`v5xbb%AV92rsVX8wx zD7@ZYZPB6BSP*wo5P=YX_+dY-C~|{BaihsopxssXPJ)W;lHOVNRX*>d0&5LPV+=~$SDhm4;v0QQ_TKc8&!f*o3?J0*{pqeg<`NLU zK3BNmh{JimTz~!|E-`U5h>U6SV<$peX&5}Q>>9pG`X&Kf^uoKx;2UNG4-*Gi_@%LF z$6Q!es=vpBPUTb-HshT!8OfYU1OLxF({6@_hBwPS^yI!Sa*2;0+1fh9^aDQG67&p;+N9;=}rNt;cJ>_rQsRkgaC$y1C%ymhgOG2NF8_RYUip&P|s zzux7t2gSH#!~mp+I~0YzXht5cEUe#DS5&@4EMmgUbVNYJ!a(R5baVMY{H{z~Kv-_` z7MpA3ZRW*oe2|r$9Rw)>KHuG<#a2mQt$NU?(uQ#&2gZpQBwe(ZbTjqQpOU>sZ?P_& zDnx`Iz{kfhNu4uc2haA_I|J(HA?$VGNMP5yTJ8V$L5qMeI>n>|nec2Y{c|FKrWW22 zjobSO`r^poK*Y-pcIZOcuO(un5~C6 zo9cxv|L5lrJ*oLo`)xIRA?78GX>snzdrzW3fqdW{coDz=KdL2`mFfeMxpQ+G(~1NY zM;aJJlR!R}ny8TA4rChA*+Qxvk$Wr_6Y7NHWlh6S@)H>rna3Wztn`H3CvlJ9@o>8F z85(&;pikY1R_>>7?ti;FN59+MB=pCl13+Q9lVN)ON-O9pR7~`c$JE%OSjkSkd3W;Y?sWU`ugUC4ZS%7U=U`<<|9a4A>A7i{ zB{=|uqzNk$am8F3jlelYcsA+Nop7X9`~HK6C><9{&FWl{r2;SL&89kt;VmLK{)Sv_?ZhWh-m+&8X zwr!SeEX?+D$ft?SB6c1TNtvVlzFVrG{b2zVV zk3N#qNwJBxB8~g&MA?X&{~UI$ygn(Mw#ddZCY|rGk2urj_u6$|?OKYBkbXr^z1{)ij1Ss-)*V4y-*A+|vFLbSBuxG(j^Vi+ubYIaElfq?; zLEE=s4qhmPBS&3li)#6&(j2J(wO{gjT|x3{gT*y=g%UdqIbx@|E6oJs>Z70ODiLR0_0Egd6K; z8k$k^LTn)5@lqn^JGA@FT!)}r2eQcu7EKY_{Ic*tItZj!Z`^PCl^b)S^ z9<%S4zYH;Q4I3Eggt|QbTN-ZH)s6$56itwRLtaz%Ai2CviQ9nN6+(I_dBnB%tSyfD zrr~FX2Cc7c!7_~6*+=JcZ3}hS_~ZL|vO78_QlFfc;7ywssoc!*F?tzgvm^~}lZ6j% zIsEK|oxP^~aisC($>jdGpl zki<7PY|zK%9NqVGu7R^zaZv^Vd%T8>(4cGdX?CsTbjmG$b$-UHhBE$Tpvx%Yq$GF! z3`UB8*32t3s7Fn8Mp`Pjeo(K#{Cv*Vdz@92<@Ba~q|_XW`f1~HQZ38Q&YrO@=hn^x z?2L)~yIou3V^+ll(AU?$`KPJP>(Y$#OZ#6wf4XQ*Q@#CEb#-ofva$LdS@0lobC!39 z$&ISh2*t;q2`og(jL_N6%&I;Fm1;5q$US0n6SW8(dywqYbK|v3KM7FC1183Y~oha)Enkodps1u zOm1NVh_~tY`tZQ{)EQS~mSGR(T$|9YV1jJGEW-XF_Q%WvU%R9q9MxvmouG%KHLpBI zMqm>UpYXRiFI4rQF&M$EIFz5_M^`7rOijUVkL~G#X{aMK-YA;GK9p10V zt8JwQmrtsbgIcQaFefxWZCYZo`rwG~XMGx44OuCh8YJx*A-iYS9dP3NAJ>VVS-erW zW9yMte79lNEenfRpdEf&)&UTIiYeo8+kkdT2O0sbvkfgnlP`cv2=x(=P%8*AvvjRf zMT~xW3^vCE}Cn{d+y zm)xFu{O-9HTccyFH89EN+T{R=BcyuQ@C?Uq&Nbo(a{~)=^Ay0U^~VuKUS4LuGnCNJ z?=1x-?b|oc2u><`{o9BU_uLmuG%_}hqYtU&BW8(QfAw8q&BW^IA4S1W1_P}1JKSyJ z=|k~}M#Q0huA~ri?QjvHhFL{zp6G%3N!4rGGI#~%4F)5TX^5(X+WqlYO3XtMwq|X% zxxda_E#=Cm73p-xi*Me&v#@Cxi4P%dnr>7A`%8#I^q4$cQTFnBv)=b1pJ^Y~z+4Bb z%s-9rG_AVm=nS$y))0-pA4gfuMwjtvUY=9(Jrsnc>#F6dl`He8nBH=mwCG)Q(&AH+ z!~4A@9!h?SPVc!<8TvtG4Z3e^H(+LRG|p<4aq#3zf6TcCJF#EJQd)MU%}=^OrI8m~ zIhHf|s?4-}amSP1j*}hdmQ8F>@!U`#-$_eP@s zq4;F!QX=GM&;_hm{Pm7s?Ls{)KHz+XrI?!v1H&*w7sKrG$e$MJa?%#ft7gkP%?f-q zRWJR5Q0&x{eXKPaP%>BKMJakl5#~=nBOL3_LlF zVAk&9it{QFoS^mg4%5lkl`uLp6thaE{2cHz{%uYj#m9d;+(ryUn~a812;=}(;Q~*8 zQ@JhPR2;DOW4WGUSVyh1$T#xw7Mehk*mBY`kT%@y`59xc zEQ_OK6F|K8H3G|%QLzPtwsY1PnK8Zs^E*B272$5b9K=`tLX02LNsLO)f`4c!3JF&wNHC{F-`ViKdyOD zuwigor`*rcZotx?E+nj7Vo0?^u$1-3Q=R0v&9#F9blWhCP~+aI9&}?r`{g#}$lz9t z9q%7+wEPndYk)R)4lXrIqVS8OGR96Oyf1raMA2V!hWP&^v2a8VT>+NMa1ch#MJ$j9QpC{GXw@8g zs=+HxmBMa~Y0Hp=-qDLM7ToC8dcdMXQNLH-=K+E?>IH_^gxW_}htN9JbJh;ryfEqY zgrJX}vvkS_PHa?nYap>wIY9b^1X5Ewzh7=6E;kn3D4NjWBL&dodaG1214tu9A2@#L z=XwN3o;@4f3I{VY>KRu_?v!wNWUp^)V3|XEyua!AX+P(vWfq%<%r5_TXFz^nm?7yO z*CC9d4XS0hr)0b*?OT9ZF(v4tCJ+cbgni#{&;oYJwTM)wG1x;7Qr>2|mP3I`ju}I| z?j?g>B1BeH5G=_Z8cnI#z8lwr1Ce0PSoh~2e~411G=THpB_30LO<)Le5Yjhu*3l&I zPL5RX2)WK?TCRKJfi4g-PcJl>74Nq-+aJ3TlxqZ`DyCshP~s3;U%`^P-7LdqPavSy zs07sU?Qt}Bt#$wT4|Zhxy=WTx_lp0D7c>Ov4uEBSt_h&a6EN^^)22-~KppBVGxJ+X zr7>x)Qug6P9%0wc1jgQ-#^wd$l!Rcgv(MFMPu2^@2dVU$QFha8lD4qvahmiSC}Olf zN=D3>W@`<_AW?%7c)k*P%v2|XZIF$&$>N($_cxd|pRxc&7r*)~dg!aAs}ALn&#cZ_`Sg{@x+ZHi#E+(1SMXiCm;NXlNaQdIB!>OIeNY# z&-hPlVadhM8P7f%hticUIZf|}XBO-MK;D%5k*i!@_VUcepIWy!_^%W2Bs*K^nOWPv zXSeGjWEW4kNd7LmabyfMWOU&VWhBxEm^vql8!~oBXoOS-H>t>8N0CX$V7_9hlw!8! z_fU`S%bXpWPlic{w);^ zGHW}Sc*cpE1>RCaSE)4m0Llr0(2|(N0}LC`+Y)Lw@N`i7cN<@ACVtH&jYUvl%iDW6 zVncMug)$|*^{fw=i6Mh+xnnD;83(5ie|2KKO-aBPWbW8cv?ttV_4z;@5T@HNjE0IY zlv*!!4OOx+xqQB@D8O*d|L7V0|L!7gj@{<-G2Ww z_=s3XDr;sMj$bHuF{5m-&(8NBl{bN{Am7s`(L*h8EAaoG+|LiSM>$jdnDQXK2l70D zCk+QAG?{@d$a@fVviB=yEXu&4U3VC87fJ_OzxY8OdA=h1$UV!<$_WV_dQJ8jT=v31 zUD7DR9?o}s;$2|MV*zO^r!}$34=;vI(sB*tk z{VX&)bD`b3CgKdBgUygVS%m0};9Irmh!SW z4D zA=fWm`WYx`ERIX?H@u7)GbzSl)ntwqPc{^=)+r-8g2*AhLvsn%fyI&zTiPWvXR7N7e1EjfP&>g8m3dl^N9)953pdAcMQBK6#h*ioq?v z|9*=0*O*n`|H#RciBEmoRK^3`WBYm($JL+5ZQ9~CwU46gR8M-pN2=$gGE66eR7ss= zkT~!Fn;>@T@g0@yjB|R~Ie)V8K*v9N&+gcUYG4YJ(i=W%KvNdE@Vd8@y&(I&i`-u; z3T2q2$b^K!O{xbPLVXCr|cMbzD8w zL;IZDcjOy6zv#mDGiKaId&aXq{69Wb@a-dH?Va(CO_KTGOEgTZ&)0r*;t~6({S*#w zI7OIhq*P2e@#TKdm$TzYYzB|^E^3&0yg|c;hO%tGR;Njm&sZ;5 z6WHTMEv2IYn=Ez#qdZcWhe{Y^rYXYwNdQ+x4tgEVbadB%Xx(~>&#f9k&~78h{eM8D z(osYCskkeuP{TF0dxzE{9C+HmyX^iWZN*Hhb_zhbrAaMWjmaJ0S1yD;`kF? z15x|bN}hycenfQY0_CoM>wsW zgN0!vYjot~*~FSB)fs5o@CZ#S^R&#XnGtry6tYbv3i`gy0%k+|NRxwm;q)0<*StZ(d#i!h~yzpm#nN|gBMOc3Cqx2al%)MNrH41Go9h8Cz*WuqgEu%og{UY-jl#; z_E>BO?!1_OG*@U4uXMv<>K5GOa7M)B)4Lmr@;CmA$$({hfWjDOWp0|rRNS3tnZ=U%Xfs4AS0M-xa^Lk%!q@LL0+ zx<@a}*`NxgE=FvaJ?zoVsS$REjhj{ICbDQTb2-0bpi77vWW9RzMCg-XLKZEv_e6j= zE=+W+n5mM>my5nUe>MFk*v%MHFG>QY8hU@61#+x39#0)*hf;$^o2w(EyTp+3$ogwP zvaqnwU`6wZit?F6Va{{nnARC+yhHaOCEW39-@KP35K7@ zz4S7_n!H#tCVDRp_?)D0{DHv=GpknQ3=U-uAc;mamxNsorYS6nOjij@KT-8=okb=+ z$h20lYIXIA-}HJJB4!fDt8&4Y@~EZ|M+nVnNn7%r15l`SNhh>ab&AfraLFfL6K$ta z08(WP2$@cPRZVp3__+*NLBxvGQfn%rdXDI20t@AH{V|VQ64tkm%4q}{ij4KOpB|-6 z9cAqGg=^oj1A{hg*&`&BdTP-M5aSpX9qmWI#-@n`Ek*a7kIvzgYXU(CW)nSXscCUa z$E6sOAaW12M&e#VSYc0A`jX5zSI9%qP{xr(YE+cmtVNiR%q;4XqC|D3QAyY-Y*>|A zD#Db3aEibGcD}d6uBTUHKEtikn2`W=mGIOK z3Pqq#6Q2xYWHm`sX0G!_ue9>$EV;FN#fLexuZHt^nm`B=`LDe{pCwToMVCzMcO;0? zzi1Vc3vmN$`SjQSd14EQjYm<+Ht2VkVeCgRPTgRzJ*BP8AHDG)uHK38Tich$&b9yu z>u20bnV!g=6iq4+4^z$H_nCDBMyTAv19#$u5AeBh&}}q=4CT^OmKkHI#LoKr@?}ZV zg^#SUWe)9O$BPc+rg6t-)rc+%r1esi^o6phnu8_sdKr->+%B2U=Jq#tDZ4_5MH;5{ z{V2zdGBB3R=Y^CgWxNCttJ)eUvY6$#Uv**WqW%v!fps_22v-G->Fsrf)#|uMrk{%j zc{?MQp&Hv$v#o^OC0$@{MaI>kxX-eqfNwIxfwGN`n)F0NW%c;?b(CYHS2gZ1md%D% z!U}aszdQZOt|{WDvyz*Z(}E}(Wjbm}!;uh|D5=tw`|#(^{9af4$Ya_r7)fiWuuWtB zUd(~JFml9z(R4?WGKOatiA-s#^+ znY@;;E|>34gEWc;L>IMJZqW8qP>RPKb;x+?&hpVtfriSl-k*4u;Vm4O-L9mocKiw7 zkCa3i4c^_=$jzlda}i!EhSg8|(GsM=!uUW@g~O$*mOd~pM+dbd`=vdqam~!tPlyvJ ze2x;()nOu}8^JTk1eiVY@QORpFS^ItX$11;d~NHaoWfPq3fi>GSMCPE70O-T|6#8+ zNx!XYxer)HY@58McMh`IMX&kWdt}_wjz-wls_xGa!*yu5!qaOFljJHw&cvz(izf#S zjQX%l8}j6RAq*=8AxOavmmKNS$HLKBeJMIE|BxFjE$e^Q$(d5yCtdWGU?xgNJSV#I zxDd;qQTiH3PlCC5J*9C^XjRgdfNAcM>oD#yv1}Z15zkXzG0kb}NMd@?j)M(yaMR&{ zexbZY4D!J`%19A16IE=2-F!N6uH`fH^){>}X=EO}p=FaA+~d)wU{46ePxM0a<~89$ zMKG3aZUdraZvKrj`XI!9C-%7?nnz?eQ_sC3hQkfyAy>bh68_YwzbH!#rI{8ktd3d& zJqk-=GOG6Hi%8GHgDV@G_}cd7dyy$T|`}0i8-Iva`-t zJ|ACGqY!?UB!1A25H0vIzKnj!e>nJ|JZ?YVe<^O)0FY=#?xog}VHW0Ci1ulvLCTyW zveN=85qa-^=Fy@>3n^%F@T9@U$f&8}b{h(th^R^j4m(*2tZ#SIj3O<@(l?8lUH;&9 zjkO;+59$|_evY1^Nz`zci5_QO26s{0NBzQvNDCZbiGQ$rZJ&&_Gz1XyBF{?{c}Orw zP=;uX@A8H1+OAQ?VMU2XGxxhF}9(`LYBf9EXf;BRtyC5X^UL=*V5Q1 z&a6+@w70N}q*Ysn7ciks6fgntNR6ldVu!f|r(E-&|3Ht7lokOzbltDN?rLaQbRmlA zrudI7B2pu%aFS__n6KkBx=vEqbfWA9h4ShPBjwGNP}@+AsDnbk4p~-O0Gr#Vtt+&A z61LHR_?uMHc1(+H6%RCa4nyv5OQC_`Gv(w*@*7<&h?-L&ILlleP_cdk8dci|Wr8tt z2BlY&)A<~9fePN~^vz1gM|#kDMOA^55dwPc1$8PO6?J22{wEMSp-rd6eTmYLwn?NS zdVJcPxKLWtSx}MOkx5`28dh`Y1skUQ!mt#@H9k53X1UX%z?2%#IQ9tDT?qjJPDE}} z9w11`B7Nmp8?s&=u%+;Si4!Gf>k8B;tvG2k^aw|T;Cr)*uKZlAr5E-?A_aN1KO@vj z_~pFg|;L>ltw^JpfI6nBCkTp zxcr9I&R=`|##fvo!eD=C34SYEVQ6*u+x zY!G6x=aon!pXd4ge=m~HdYLu@;4N(%8rj*RsZe#(@4a7Sct&nt`1g=Ei^F~Q*G*m5 z*ZEq(H%+F6UT?4VGtgVp@^+IM&QE@v`0Kl%(&5|Itu*!h=G$p{*T#Rnadq!4Bc@%u z*GFfLaX?|&ft%Y;%`TqjdH%>vkBmoUpOy_SDO)<<^~gdm?;*;e(CYsG$*s)&je65; z?@ng2RVCbk9w6nNDth=Pk2+YrtA|VmqZf@vMo$&0)Q}k5aD^=@3A4^7)ru73*HBjr z#({W#E-%@l!qp{Ep0H*iJ5^u;lSj#v#>$tOB{D@g=QK~kGmQ#=9?LYLoum4t2gb%} z-!r~Vbz6GO5B4sX>P7+cZBezEcgm{0_s6$$qq4URuh^F{_iCsIHP1p<=?3xuyHr?m zYeelneN6K%AGg^?yI+p=t5($VdF@G7o{V|^JD(>blK=uNHYLc24fy5ajxJHi|% zA#xzTrBVZ)ExguhR@16AIB?@?qnPOIMDI@v+vDJ-UYE|2)(Ma_FY2&~?HV4gV zA)(kxC}U1av$kS1olnHpJ-sSzq;l-xuEEQ_1eFjmN;Ly7&a7+MyHlIF$9~>iab}M7 zJK3a5csvkG0R0H(?uwMIk4FrBGXBB4y}~O0JB8X;0Knce_|p~|#+m98TEtyzv%tPG z#5?rT*J#4LMd>rPw{^Yp{nZV!zK7}%pXISGIiGxb?D*=Z)I@J&F&|Zj4vf)fSHC)B;7xNfnj(M*}wkXVOM^UfFnr(sA?oXC&L?F3BGaXfmTQ;KyZLN7a9jj00v4owy zr;XS!upo9_harR2N%a4JMB6QP=B{~Tt1fsEDx-HIwn&pH=WoBCYsts^r7yE z5N~9aU2)X=($IG7j>r^(y!4^jLKU~N&HNqo!s}Ct)vH>TSv8iWctzo^M^6(5yf)X^ zQn6)#k7c8QZ(^$K=JIO$7Gw8v|Joim$V2yNtx)?VnZKWcuUom7tM&DLBJQH#NbdVV zr53yhhdYR7486*oJig{TleUg?*eN?X?vL6z3n+QXd(ae}1VUALFnn}sewqs`|7ZRS6ZFnWMSLMdv*uNw7Et5U&Os{~0Flm%$v<0E zO|ls9@{})w z>D4@-bM{MLMpWJL&1!XhpT0?K>m6+72+PcYIsOaX*osw0SARf_qiY1kFM7guW@`SiUePt2WZKq!!@ zWw>MZh5@feQYKwfVZ)VedupuFzvT(e3h)@KT_Ylr74xIAwZm;OhIj0$H)YdF03Zv= z6I#^w2Qj(jkv3^uWi)uqt@_aEh5>9m`=db*-E$XS{C~uqiC>QC+y9%@3^ROZ##po8 z*2QuAtYogC1p*rv^?+QPR#uN zf#3alUY_r8FV}Tm=Xorj<8yqD!&z?GdigEiKbxp_TRs|LpDN&KGTtqdd#Lv#0w}}f z!8hLefqx0~Zj3>-R{n9`$u}kHH7Z{?H$M^wCB<+tY`WMuKr!j5h0uPRkTX zF*c3wo%8WjKHC#GVx_k{&Tlk_km(W_-{Q6mh25+>`6{(y(F+>pBEaoFe0jjf#}=7QD3ynvXieFKmBuR9BbP?E9qzTHIg`HL+bW-{ zvL`Gz6G@;kH7_t7jJ^UxRe>{ZCdYHzHGnc!hS;LFRfWOY8cSdTiqZEJ&Z^+r05IiPO0awG93o=w0*rbUE&`3#}oxPrNiz z-`?A}vc{3@Z#yO5mWFCPX~bbdga_PEF2yt~6`T>2NY>O)m2Y&>uNSf!seJ~5y%2YJ z1*X%_*bOp~b~5b>d6k3%+6%ozJ(upU`r147*;}y{SK=8pocFS#Ko{cI0x>P8-_YB& zm%>28{k2*$sdZI}38rLM&6K(E7IJ6jV(!3szvWe#y(*u^)7-W*gAZLX;j+tdUE|r4 zhpVnliQu71wCZ_lVfu7xQxt&p@SD|cfM=z9HV$XWi3)KK>q5V6i1zF`sy^W=EZhHp9=4cR#Cm6h1^b2+tCFt$=n z7(s41N>v;&qIS0T+q}>c>_-Nu;byW&1K9aa_5h4mqNglg(^dr5q*a8PkKIErG(JTx zEBxeO4WDywSYz6S?!0__b1;@^vc!L2KP5d!yip$7BbRz`?Fod84NySVrKL)d$@cfB z*5~A_bF|S#zh1~0Nu6^Lg0U0T6shCf#P!)#KJ|!)89OP!{rJ{y+fy&~^>)+g*mo!&ejv(S| zo89+%M<=NpW2YbDy(3(C9v8X?*s3teddh+1&bJV|@?H0i`GcY8>J3)9 zi5qDetTcm75e)Iw;ND+9s1(e~{Z&@}-0i14xp4%6481vs`PW%Amx`iN#p=e?wEs-w z+H&lzEK;7(+9-OdV#aQ|rBDWWj4z+zK$4mhC zt7P)(48o~%8wAoxoi4kVqi(F8HUYl36zMZFBCJN+cHfM{7N0RV)FxN~oNN8?25rYp zlO{b*uFM%{yNBIcFl{xYSim#eS=mpAzPj#c&c}^7{;Lk39c;Lmhvs?i4m~h#&`@n) zYWO>DFz6vBLi)x)Aj6ZBlN%nI`nej22S4>i>z3a3L}oQTILz}!1p=I3UcY&H+cejR zJcY$*mNdO@CQUK{D4F!lPelCB=H3znEFB)+fJ5FaU99Azp^}EeT;GB?sR=P4Qy`1F zI?9|W$-R)^&H%^`l07dQhSklZ`r-wqnYE>Z##D>f@LPBAaXm;#E=O-KD`yJ2KI>&u zQLQVN6J?LV$;|8-8qBe32=`!N4#jJ0zkje+?qY1`Jfo5*lI-I;RPydHQq!ztP!e6x z_fSPM?@zpM%#P4mv;s_XRT02&UytbZ`Zs1Bsyz7=X^&iyz95`MRI(cY{|p%|$Jomm zIp1m=Oq7CZ8In&xSwT`AAgb=UW09Ez(+y|Ws#LlTc_`<{^*iCO5KKx;BarI;_wn=8 zY?;j$`4e@#ILw5O#|4$=J?(z|9A>r7Wy8*-=OKamk?yq9f1U2xMhIRGvu=a2dM|i9 ze-hNf1vO;AItyD4?j_4gd=sLWee>vJv;30u=NEKyTN-|!GyYC{7Ix~2N_<;4(XJOrLD6jZNNJuH*rL(iY#$?T_fJY0 zJ)r{H?@GWb_!gdX$NZQ8aJ?8&Upb;+Wqoby1p9;+$Lgo_)6$yqXc#)O&C+Ej-#-q| zC_>Jv(UJvJZ?~}QCHwYlu-SU-)ErNaK`SmeYV-N?=U)&_3*A++3gP>6AzjFyZvC9J zS#0CM>Gp^kLPI?K?310c-8gcGiJ@9i29;M-V4qfoylo#Is#lW^i+*c93v)Nvg4nJCCX$Q(yLduUZ!1sio$5fP1!#|b*~dnwJ> zZkYV3`yKfoDBYz+O?9&jaEVFSd;L3&seyJ&A8d&{an>4LgbGp;*dm!<_YB@d!6N$F zJ2jZmkLcU|&4YkauDo9`bJ8+_4@%F($x>-dZ97TZlmAyxfr5(x9e;ib&e?g%EiP7x zsyeQFtV$NFH>aawD9!$T{f)Tlr&qVN(U@B0Ld!`^Ldo_TSd}>hZkLgn{6ffTJ{C?q zQSo!F`YiB-9zYdwVuM5Snc)OQ$jaAt(WOxdDBkO0Q6$V--NDmJu!7^au}C#3_0{#_ zN>R*hHv3vsL01<5pxp~@vFe?=y)q6mTIyE!A|R!uJ!R!Z3H8%cG1D(%SJrAN#hRm3 zx+tLp@8!$|uF5A2H>cY-#nwebBXTq~aC0KPA0`+L09Ud*R3s}A<;QW{>bP;3K4`>r zRTxI242EsakfL>st|8tR&f`=aDKRX1dV3hE>o@0@!rF6q_||&njx8{Yx-0u0zxxM3 z$?{$00&_v2@KXqujw4$6*A*=~Zx@PM^u163;6(}f5BY^@bwnj8w<;;XB>eU_499-e z?KD!?-kk2Ma~aro9Ykr6?_BoyUR098v`;nL{#4@A56{mT9*qvu=# zS=4?+;tN$_Btn6Pi|5_DA)y!=?Fyv`p3Xq{ESvbo52UP*Br&gepyC6RDn54TU~`Ss z?fdAME2CFia4!k435hf1UZITfDwk5)jFj3}S!!b?U44~eG|)i&;0FvX`pVXNBO<8y zIMcpK6^GTf`%cQrX^V3dbO21i*tlW7ikb3$9!;~vi#6D&QXrLFWzQWi9mgQY`;*)W z{l`0qe;=SO)wg0`bFLuu(PGvh0tR{D;Q98d;+Z4fS&LDNK6>Loqm_Rwkx%4UC|vFr ze`?YIBR&YZgYvy(HCdA;4dJ>50;^mI8r+XvktL6D?T@Wp5845}E`J|Mrxc z2mPS<)cFx1Im_YYnT3K9_C4&3nQ8jw*e$oLP8WUyt~mYrT=cO%lw4vL#47e%M&EFY zJ;cD?AFu6mNU3wnt$$K!cS(cJ$-RMp+O1V9CtdrNa?e=YQzd@5 zbq*o74zNMdz!k51$%EH8IBD`tHRSjDk~CO|7A`R2V^Qj&qrc+qOQDQNTGZN5o~nit@=lq=9BK$rMvJNus-X{e zHl9KcUv;i}(1r;OC$WLNUT>hzS&YlQfOS_XlAa{GfMQSVl&Ypr;^)sz7oVYiO|8MXZx1OM&zEC>kb*}jzzBpjF{CSds$5E)Ld=T z*;k)_yy19T3Z@c+{nFe(L8NZ2a!Pu9G%FMX6YO@r_b8;aHdGeo3H)s`2oZ4br^nz5yWVTeVUa@dLeiPSc8sqEAW~+Lt?jv*Uo=BGKL(WuWz#- zhtOIdOlCNy@mt;aR-daC5?0I4OO`E(f*NTg+?z`^BeZLTSX3rST9GYo#``%jpmvoz zpvO|#F2tC78*j)0ZSJYx4j>XS0-;;Vp!yeB_?fus+4NXL7W1b^XC^VQ?V}p;d}sgp zE|6v^5*nFAUYt_@F2%j#Q2Jgx(75DM(t;BiZy-uKNobB4J`5sXOd^`itgxhu+aw-E zvS+M1Jfy;2*5YKkU#c~U{BEqgEeh` zfuy&@*dxSG&Lfqd6l*_Bc^K;-5f!CPcAbrBeF1ZRAih8A`~0>8rvJ*?hA1+7M+6_X zZ#jfyFhn7ow44$*qq?*N?vvOi_n zg+4VsT^{u7iE!u*1MEDlDV`>(QFMU@1VxuXW^c-DvOUIKkm(S%BuT6Cff+9VON2mxD`ll}*|9qSzqAFUn`Q#nK>fo26b3X+ zN}1aNrVJ= zFCmySCh{0axs_OX#eQoUvj3AK$x@^8VdTeY#2G@}8G`XhDmSKs$0Q^v{0NwwI8F)V ztO9Mx$!$vr?dH9O?OiB9rtkqsce7}xep-W0dX~UAP=#BjB_jtf?1ebMpPArm=XH+V zQi=5bKl{aaY`p^pA}ef>I%!MBFF+dZnZRyW;3oQP&@>VVYXNre&GQ$ z-{lOa2~|i&?UwpUlc-N%S2twTN=5>kizOyr?*{#UG$>*)WTQo( zT+iZbVt}3mYiPE`3xa=;JO|0+yB6VfPQAn&wzggHlibRvm=xAdI4uLoNt4lVS+&C= zl#)s^VY(;^iBS5FX&d2V@L11_B|lqDucJZ|HCR;JV8}ZYISaDtMyB}y+{=_$Q8hoM z-%!rUWo)BN)!;dMNj6gaQnuzeeZ>Ey7@WhrwPU9f2q?Nq^!bV;rE8A{NAB&4*YBm8 z-*gLjR;F0_Ta_tE-AXJ$SI;h-;(BP5l%ExEBoDe(tyF=%?OY(>ML)L_FjOU}GA6MPHxVT!(krRZnxQTXBSp zrTUF~?V*$jD0!xabJrBb0;+w}X}A#>iKKoi1$+Q^cHL3i>IWp6RhBul`;TfGocV{H zZIkAj+t&V=QfT##BVnwfv9mW0uKedOa-2@}9Cn9dzQJ9e&jvyQ5a!S-=1{ueKzeJ= zC%}?OOAz0<(Aua4bwQ`+a8T>2Vpq=KdUB9+ki>La!kyClo=d*a7~9Irfp9#jm_M?_ zy?`95|<{hd&XTRoJ5 zdNE^Z!6A3KNFn6}f+|j{siqerX@M<}D!ghbSWF74K_x^K31U{>?Vxi2 z1C}*x=8DB^=9nurQdSYvWxpQe>m5{B7lvP30?sgbP8|k%8#^ubN;}?go(kM?GnXYD zn=R>h=Y`S92TQf@E{$p@@(J`mxPeobFxeuLzCp1LQU&2TUqB&r6Jd@Mpyha+@MW%Y z)$pAX_D{7)gjeXEV^CVm@AL{blrW6Azf>x|-@PZ9x-?a9$9Zoa2dRo*AV;w5-s`0B zi{}EV%TLQud3IwE>Z9q4e`6(`rC=u@6u@a-ixRrlT%=NpwVc78$vV}fQ_Y|5mf=!o zJ*bseE$dsr&JP$`k6XzWJoCDX!0*_Tq|0ps#$ep?I8HO#y~7uj@VDb+MB0)yMPiP3 zc;4tO;2Y%F>T?_3c<+A^lz(SfePI!vT%jb+K5}8HwENOdWP$||fL1GM9er(K|2 z=6=;;@`D4BG3Oa~v$Z@XsjtA#-{xrU(~VWSN^tj3A=n(s-+IKi?9n8t z<8^#jxMM(RcF?ga+}Z6!aKb37gWVtECjnb=>hBG8>}BopL}D6F*3+j)6T5zip`)JZ z7@lw47UN-47JKeRjlq=f<&hUn2}wU@wDAk0jTY+EO;7>IkF7kr)W-nSmf#MgjQGOW zp=NilJ7*-EE8oj?^`&T5@0WtjTjef{0k*?YVYxLT1A2jyE>^(6ZENW1swx&H2Y_dQ zi#L0=SvrCC?eEi1UvHU-y7Sa3n2!_s12>_JX^SR z!buXuFe-WiQf?m8G4TAB+T*f=tvqnfev(7-);__JY*mq85w>pa%n7m$K=`xo%psT} zTqz+kH&qE3oMm_a7D1Oe&&UhS%%vn!0Yjc76Sa?2V)(6N}?8m@UapRt@1qfdNf1;H7L#I zBB9>$RNyTiix@zI#f2V|#ny1FTIO_ua!B-4zwG%Z2O6&WJj}XYDlr+%u)#3v2_kaZXJ*pSK&{SpBc&%lv?Ddl0t5RgvE3b9zE*h;Ir*^ z3F#Yjtdd}=gzBHMQkmJ+aURhNkFQ7O&-JF!O{IN{m9Z?_u9TvIEtHZ~`P8+f`Vru= zUf9?2NtLK#^4HceZ%F=a_w8Ipb^o>Q7aEb*g;znGF#}9#DQT0rz5U6HRjjEL%Eehq znS-iqtKPl2+k-LhAjk#_Jfbf3+fs&i%1`lSR* zm?{()L5JlrkXnybT?id9LU_*t@;nBD+I+fD=qhq0^dQR*cf?*u5UW&)Im3pi!yxTm z!EeSrDL+Y*axovvVn(;R>yd)aYE8GXsxY{7l0->5Pjv<2hGbi#Op+>udrRc7>)A4@ zyICjFghS!t8h_O?Ygo#WBa3!@0~_RpTkIzq+K;%>3Boy-jZIm*uh;dng9QYWe3tZD zNIo#d;<|PhF#lQ;%4(TFR>3&JFt$tPAE6V3kSLV|47Kn3^vmidb=H;>+5Imd43!8w zOxpOs0E<=Q`ewpf+{O`eMr=Qjy{ks9r#RKI%{Q@ud&&Y*BzoacU*(_{)gvrZBaADm zEL6qB`{N*TdS9s?y-~rWB6n9xdnr`$YYVu&Wfx!vyT5I876>9pB(K z8djYIsge?)1WUBzIP(~M*U3*|qArUdA6978*srcb4*?daB#P)Q!HlrkWkQq;5!$RMn6+oUgz7J!6dFqI zwb}1ejq*LjI1!YZ0&(75^W}1m&|S-u$zsi|{*sGLefy9UYaFxxIFe6W=CCMQLZB7w zh1hlfKV|AW@`;CUo7!$CrR)Um+8tX|3PesV$x8+EP=;fYXuLk<@C_B)#6;# zd=sIZ{ah^|lP)ZS3xt>ywt7Xy0}DiOT*n13CI*`U+ECz@rPF%o2*9EXz}mY8j^bvT z)z?WiUCZCy`Z0!Q)$;CY1F?KP$~(GZj#OcUS6RdAlUCZRoFaC3+B~5kC=(9s9#&0& zNiO9tA*2xsuJsp=qg5vaAnMQ8gX1}0-1*ZgcOh4<=^3(({Ks>ZH`6#ZH0nc7EZ>-$ zP**>sE?M93U|aWg<1Y+2{l832$FFwGy4U`J*W0e8H~clte*S0obW?{>Ym9$AoxkgE ze{+-OXBPKJpB6ZGTS3)3$CMSv+jt(2S&`PUnmfGdabiSh?z`ILS8u$W8X+2$I(xLqE;~l$prx-1L(Pq@&uVi3lQtq41$bkEWhC1xs5I^~oUl>(U z{Pbz)lUg(u151_=06%Htj=E|_mETsxw49bk)1ntgueUu0A3nF+a z%9EZ7c@HAb(R5dVoK8aCb$}(Efikg;(5v{&UiBOJ!>6#ZIZpEt&*v)r*o+{U_I`Td zrg7365A*gK@$vcrw5p1@lzL79;@s1RHh#;q{*A()L;RWFP|#2W9NE_c!6fO!kGx&WOimgI$#sbtSMWF-yO?_Udk_|Ls{P9KM8cH^?3aoVQ()hkQp9$`LZ($o(> zH27IQed}Mk9{lGfM{!R`{K$Z1^gQd^7KyemI5&CaFjRN1oc*iuM>Q6Y`OnH^EJTl@ z3eaX22ZpZS{%JdcgCT?UlsyPQGGIWL#*e1fHtxZOzj5=&w`dI+VkL$1w(~hrP>+8> zE$&R1Q{!he+7|tHQ5oJz>lXm{Zu-=3Hq3t${g?kPYKxZt*=aueMVsQ}isjNZ%p(|> zCZMqhq@s-}P{q=QVy67+z}_1R#0)jQMm~|@23TVuu%Vi&pN3{f6rN4yQpj-Hk2g5- z+J86d;j4vf2g8O9XB_nu%4D0+n_KhshcB=A^Iu)*JOv%;{Fh~;RX5=39KEy^D^}9GJ!G3rtfMU*Z!v6`=1|eGLl*I_E0( zwd%^Wa!=C4&Kk;Jf0|@IVL#!Y{ujX%X)bfbc5_zPRHDc9{34`wpL%$?HDd#(c^)wOLb<3}1|E{m4);l#WniMGQC?(lj zRo`BK#isx2CN0q=_JgK2q3@nr!!d=D5Sg()uj>3Nxeo4m7U5yrp*cuRf?xP4WRw*q zG{bBI`RzV>;H0g;ufxgU@C2@|qbK(w+zg2;y>^afo-j6S50KN}F%1$`+q4G^5dG;% z@R6!O?Pn3$rs-qkY6%=g#!}?W)_!>Y8g-pU1&{2(Zn8V+Wevu6k@Q~Nt9r&sti$5h zH>k~A0$Qzj=J9N*^}(sXPfT52obg>;(JTyMBC_s`Ws-s1p26SHhqn_&+$f?(84Mg4 zQu_LNk;vXFlpvDzDWWK)%w(T_l`85~c+)?(c4PiVcgB_d(%E_YtfONG|#N|XN z0W$O~cCA`4Cb41a=%&J)kYSN9F87g0wbOUq1|x2UYotTUN#-^0qG)G93hMd#&Yz8Y zQ={XgNh*y%R%lY#*zcr296Q;dVgoWH&EKU5J%$+>DU<}`mP8$Au6=TNI{zdJk!TV_ z1n9mx7DrM$fi+nM1gH*y$n^DZ(s2}n%!p+2YB_uQPVN{ukdHV%K{{(@pZw_gPQ9S> z?=9hKmTVK499L_>+gf(&)KA%m*;91{CjwC+*E@+u0U@3-JSOP(iIQG;~Ml{j{)kn$rtiK^|^;{jzY_g>ypwCmd zhQxNsK@Xp(ZH9|u6e6C&&o6qtjNXU8=oRTFdZ{UOZzHKyMS=7ft3;_)RB*ALbMBlx zfm|Bq`Qp1A>jbwMNF148&d&Fnh(u^w5fP`;9H>P;t0&H>9?vA--IB`d9BBt$E8jjn#DUrDvm3bQj1!y4}sxb1PHshmq>(rL@LfM=`X-f3()k* z#Pf1M&>cIUo<0U3?=s2dE;QE%ZFFIdDtC(j${>bl$^=DJ0OwrVy$b0KkGC&piy$s# zugRnbS7=4Dt#&7_a~i)aasKUZ?F`gA^I!_9l@i*cktE_qma+D8YX%<;2nYZ^+y}yd zrphG1>4641jSo5X&(%k#!aOr5j9PQjyEgHceA#-ShI0aZC|~irC~AYmd$=T$ZjW8-P++7%|Dsbz?@2k}%OGdC4K* z%{eW{4K@4q*T2{@;ow7U9O3AaHAc$h^I8tS1!z)-?WgQz9G_A1ufFQv{mkQ1t+>)y zwwWr7W(TZ}sgOC`j$SwpsE2orM?=R^8!026{S^*rl>BTY~L9COkghwq;8xp!8ZU)urapKYPl$_6 zvE$bOkQTjuHVpvW+gtQGpbsw3PwC$H95gzwKqB~hX7o8T;p{%NLAgczx?(m&tLAoE z5ol#yqY$=-7OF8AKV%=+tFo@_U7Sz%6)!S-s)s?h-O>Ui; zeq|b{M1;7lz$XUfSN1!$oK;VfTV1#kGK&74O32?gi<4@DMd8`bppN!2EwuCG$lxp( z7I{DzAs`!zD}or@$Dx@h+V4moQ4tJBq7&R@Jb}V*Kr5)w>d!Tf`^EJDbhKiAMW9uT$YPICK3*JsevH2Al%WB(Cs64dxQ8N&gK zarnG=3^K|{CZ9AC5;c4}5GP+hCBrwEO`uev7fr5F{!EX|OAEP1nWpg;u1trM_NGM) zf%>l7Xc5_@(q9)28bTZTuAjd^D-ST#{t;5=&z+xf9|f8t&<#aAp6TOgg3FX7RBbKU zxfUtjD?}p?!|JaPnaCx3680QFetZ@o!ZK=YTgdo_kp{M7itEIGjx%ANV7R~|n*fWL z&gpd|bFQj`7jtzg1x_QW|>F4RFt7-V97)b zMY`Q&w60H~=&HxuB?J!<=N+#yOPv4eN#T_DKJf6Mm2)QNaOT?-4mMA6lbKGQ^&Qlc zReYKhAhah6e?Tgx@Bk(;mgP`1D7Far$RtKWt6ogE>CvMhk>HdrIsPo5b?0WNWXj}# z6To3I!A3C~%5xvLYVaK;w z0d}u4TRR-$_JZ``lCF0|gv%Bymx%Ys9_Cq|n}~^-QEo8ie9+u^rO4MmSox=vrpMe% zB+!#6Gkhtq@qC7=yxQ;_S1@DtZKiw;dYa3L%RFQ7Oo7tUw3@XyP~}n$s>sK_Aw%~{kb5s^H1AI`NZvc$z!u5H<1r%`t7${ zNY@hG_Il1vsrs{@+E8ccps6RKapOaW+tIDHI$NJXZ!&3t6KIZSH8K7Gwmf?V^p)Ww z<4zW@D4I{%=Ovi0$buDYjaXSK_~WXfQn$NDM95PU4$H7XeQK6}FmdX3YHZZ>Oicn;L2?M6!v+m$Ah`H1k7xG&Eg z@9bsjC~XYyo|H2C7?GqNzqm)7+;HXol?rEep|mNw+#zHr`nC;_fjL;^vmUFCKoNMi;See69JGb3w;$uY3Bryhc z24Qj`3T&s}=xEG|si!`**4{cO<%va`rCK9Kq^R?9&s;}4TieTC-p8IxdEN0?t1oXw zMhb|V_Mts_#m&w ze#w$0PP6ShbqXC+8lPU3R=fF+cSSweZimb3#+sT+8q0W(C<0!IRcO@qm*Cy4c&r$o zrP)VZ3llA%?~0!Bx%*7bh5m=y|<*2^drJ!2It<`Is3$pRz2;b>^=; zrVg)$mDG(rRlD$q>h$piD^~usdUYh1ej~v>Lgoj;11y7wmrr_yvfbRI=#^EEEY`Qx zm08UC-ZBCrUnb&Em0Gl7Y(w)+i|zP@IfzNiAfBhMlY+>HWQ~BbGIDcs{k<9&S!3+; z1xLI;GtnozmcUpgdIRX`-*8QdH@rtu`*dh-bUFj7I@`IlW=s_xUZr55k#1q2yDswD zMbAl+o+LV8XP$rgY|6&XuD-5*D%EA9;rM3yiO(BfX|jPwJh7sUNTRoUH+Dq_@t_8o zwR~x6uj1reawdpg7V}Fl` zh@i}=(>uKXBe+cQ1y9P$wS3PZs1AM3u2iz9=f~j|mD#7@zSLQ9AA*O%^aqxp zonM!G7k&=|vbXXwnPgymYyZ!{S=*i4KBKS*T-+2kjecG8yPPx0HYndU;*>?dsH=|Z4^36$K=3G& zo-2pHFDKaW{%?wg%UW`-rUS%aLis~DrLUYLV@zb9Z(HgC3ijnx+Mvo*Zp6V^K}QRZ8>~9cn{3p6k*H z=d-TG&)>4_aR}>89(>`7CY3oV4Vx)8RQ<3Uv>*mAuQVe_8K1TJI}kE|#kutK_W92q zlYKcFnLaKK%F}T;dFyx&5QyxEwGopoVr1e<8N#HriXivsZ>m3#R2PI7z<0%6k!GqIYPqkLBoBUC%37+jKNDK+3 zv2oearOTgRA;s;c)`bSe5_L@A2PI%}9<_B;)(7v=h=Vr+q-XN?wF!^Y+G3lp2B@^w zQ3GMo{S;iFrTB6(JVxAGh)%8uS7U({;VqMwE?ahcz!-3g?n)8pW=gK$aFRL_nwxP# zkDv+fxL(G$&lR7fR?QR4K@RWHDs#Qrm(MOq2FP=DxD#SlX=rE|Nr2KLLK#Xr5s9%? znZ;CcGkyB>F$SlTLG|!?rnOv0?pfKkPaBpo@o-`f&$5OuIu)6Z4juBWscSM{?q>0-|3$QXn_y;yd%auC`$Swb*l=K zf%XvB2OJ#+O<5GV5hImilnnbLsq#-sN&-Tjw`dV*@fL#0E%)M?pC{wcl9D&v+R1mk zMOVKV^=pJs|CM-3YRKtBzn!Sag^@6`a|QtLNx4Gxl4iesgzhrCiHgcgip4VV`+Vq# zl}AtXphWR4AcCqT(;M0D`+zcVDO%1P+Im|$sItIGmB`BMU z@I_}efrGPOp*1b}hzxW>mTuwFB}+2!#HgCeSS-oW8Cf^fys$k1p0w@CTHc4mdP&MS z$f=T#Sb)&Gd3ab5*D~L_iNLj;R@NNSZZ&Ruy0{X0xdpH;#^O>-7v}@+EHe{j8n&zV zC>iA!DWe?N95Rc3_I#%aCt8U{KX_M=s!Js0o_M6YlxEO#OicX-;8$ce*&xONzt5o0 z`}N!?86j0r7n($F9ACIc;w;Ze*ni*fV_H4mALSNtq*($Yas>g@>_w6J{@Je`mxHGc zns)kgQ5u#~4#v#H1j0hY%jd#DEAtVFKOFoM_Dh>{Q+DmzC3p|xWwv!;T0I-X49Wt( z9f5NQ?9ltnAAkPIaZ+sQZ9W=QW|1eQ<^?+mrOFX#x`>C5q>{ z=8qidLoZP*;K_xk;?=owgQmgmix+>>u{t;se-KwtLViev z?EtMul*u#gMlASHQqyShR|}ctbykiwd-o1M=q}0fC(BJb;*O*!TCi}S6F&idbqQi$499$%D1da%m&tNRvFgMd z=dWJ9+NQ-h?l-7WT;jUqA=(B{yv_;P1LBbhDFlm9E`Llsw}#eO4a z?#n0{%G`c-?e-4cejyiL^!)y)I7fb^co{zP22=eC2Bpk> z2_{X`-j8kCjJ>2bsq)^DeSi87`yf#EvTO||kYAK=?YjM*(chAsj>ln}5!8+!`yqqZ z(EShnVC0v!cnU4csEgcOJcFR%Koq$4W1VPKiiJ`LzvS-!#eEjUcMc{ERt?*!%LFWXg1P9KX6Mw_Cnqn9(5ZartgsJ9i^Z$4-3_ARq zD&k}Nlw3tjMytNI`<^n^j0tA?Hz95?)oZ6XKfE&4twCz`X2Vrj0CP3_Q9D zy05z;S-}tm>E*EA?ntBni}3u*=TayEhmP~_LS}{)$hxc5Q#-Jgc2ETN&_ip3@ zEF5mOVEDZIUbkNE3~4c8!i1ntP4QSz*C+9;L?6AoQVI)1s(E^yN*j{DWy2enIrYzv zw3v}wPe65~F1~zgi52Appcma0UPMLG{vQ4faggwwx0e31W=-bH=OpA(o;&P64Ud4K zn7y^+u2{jZBpfBMC2f&9-;s3XT1nRt-&3C-J24@%?RxA z=uP9*Xte0jGp-?)<*zz=!IbNdtdYjZz3>RM2uVovCXt3wq`iPB{ri{C>d!yw4p>_r zn`gIU#7cYI6}s^mS2URrV152K0tA5^#<@#_&A9Y&W|)petglI@r;Ip$@QviY@GJtt z4Tf~;%@Qw^HGO|OWj_t?1l*hEn4^Yj+)n@df@P@1_70!6*ofEIM>R+lhS60|kNH)7 zeRro_WU7E9&MJvxKo7U6r8*j~UP7R^shJfYZb$y%coM#2?(O6PwRW@mt)2fpkb=L- zIOUuh5>1XRLzZP2Xu(4D`}kwzmmkS57g001utEN%^S6+^kI5`jrD*J{r>s!FXcPhG z9c3R0V8zpwN+odl0Wl2J(c+5aD~^xnS?hV%Ecvmot(B@J4n}^T>{`er1|JT<@lGG8 zI-LV#o-4xqQfIR}zThXswl0#RUqFeUL#;1(j5rxRmmW$P!9tY=iMZW7=WXxRF?idp zW*<)Sre^2Pp52GFkfMZo%nL!-mf#&b8btp)>rjV~5R+2f0ZPncRYzS5c7uxd*4MxG zUXx9JcisoPO{;AMm7^2vqfPdE;4$MBO32`0+6nLQt@?LCMTe}q$}!nAjT3T__IMeMpu8->nvgwu^pKHI z?_ampeqdZ>d_l%WejE)dYph}@iHyXfikn{SByS!FSU4F7Y}2u0#}vZcUPgb!M-6(7 zfAIs0KczV@TtZT{AU=W|#Cn z`$8(+l+oQt{2PRB`m#cVFbZ~mP`uR)4r$r7x^Qj z?UG{+pbpVOm_**)d-fcs5}1Ry!QM!BegEFhlieEWX8-;)|2y@}KOZ*AfB&h;&i@W8X#CgIPd+@! z#=mO#{|hueod5s+gQm~_$Dhb>{0~!AV~8mwV_{zcLyK&=l(ZRv^8S^f*X#aYXOhXT z+cEd~ebkov%U#fC=P~N{-2Gh;OwOmV3dm% z=gEk#9aisEnU!$`$v3|rPT6w@RS;iigRiKXho%O>@=Bb z9u*`RLrDx7N!Co`Z-f;7fQ)aHhlgd6v%aLI&IRKp6WsAXa+Q)3K!LHCI&~*=q@!TN zIlIJ6dNsV0MrS4Y{v|Ss1BhnzW}F`MskOeoevy%p0C4?)_GKI#pX~#ZvcayK7U8lC zpOG@Mcr@V#fTc39bPQD`8P5tuxpz`*62ou=fJ&>RwLsDrPlrI^Ci$MEa#91t*z}|{qMbYC_=0Ic zrZWIxEP&e+H&SiPmt+^WDKmh%t8BL(%WozxjeGu@1||J|WA$Ac4rdpov55(2U6qNc z6GOKjUlw`<-obfk3=erW!h)2UW_zoV3;()WtHEVKxzu! zy_>p0Zv%q@=bJ!>aK~b$9tsCg9Fb@;xQ@8<%BWLW71i78H8qczpNK)(GPk_jOe;|Q z=c4AgXzGfw3aXbN-r|{M(0R#l_JJ{j=-7+#E8clRBsl-bS=Etu$*Osqh40HqhW{=$rW%j%Ce?Jhlg!cJ8PP}7u2#{|0LO+E66r-f;xmh7G+ z!{2`B5=pn$E@Co5u%{dTNV_23nK*Q&QFT?71;11Uk@CQ=Yrc2$^t^^@!x2g$(PKjH(2jNA|52^(o4O=b6_N;HQ8aM-RcB=(oLH-H6t)%2J9Y^a#<1S7nGF;M1@5) zn_J6yaYKHP;mC)L8oDu9`z8H9riU3Gtkc5WQs;ydv$P_LjpG*2i5! zxV8NfI2e~Hmh*s*`TEUfVFApdV0-{FXZCSQAwodLOhvLM+YOlh7w1$$eIfhNFJ(d5#Qj3MwpCo+ zEY>1TP1ISmvM&>$BnFnrm)w>m{0ShIy+V2?#k00~Cp8-I@Q`qW_xn^M_0R0gj}e2@ zG2qAjEcp3=exbBN?}Vcg)PdnBl+T190{2ZOhaxlZbzHZCOma$v6VK=l^f9T``2eF#*Oi5in(4uU0KE&erQtq6iqG8NnlmF{r@ zU@$7p)2;~4&6a?G{1w$&^p}qxFn;><-N1-5^R;DG8bu=)qkfoQ>Ojlx6j3#DHiF}H z1RlcleB1qpi@Y>^qy~xCidi8PTLXhaFT+#9tAl>0d1&-#0ivOq3=Uz5-i5paxv&9r z-uJu)fGX$Z4Jm=(VPr}qU`VgL%EqVf)8t>1r|zDd_w@8^X-)Yvim))x=abI{F{ltY z`C1q0#Oqf04wm!HlBJf3A>k~t4u*bE2n6&YcIjVy{`vJq-wU(q@pdf>&)^msy;y#b z%xy6%nlYc1a#x$&qWkw3A+atO%$PGk^$SbvNh2t}K028$#qcG`Y#&Tb|1&PJHu1(_ z!J8FCh`p$EwGH|3w2Uj=tv7 z`%>~IxbYcK0Xh3==tVeV_voc}38^ z+QqIzNQI0KmHDOuHDU}Oagv1PBb7adCuT83i)i0W2&H=UP}GE0tyED(B&+ne;|Omx zo=5|x)cm3DCF>Zm%e=qcpMKGFAM(kvanUyrNs$?S{JNb@PZ!;&iv%0UD=ie-KlO?$ zC$69y;Uu#yFtwL~>eEb-FR68X? zqd+0$b-IUCyG&1Pc>88Xyp0sW)Zr5jT4?f=>Zl0RI4ob>(ZV93eqqgLU-fRw z{PStklo3`>hA<4g<&b}#=G!cet_`lNn7J~yV0DwHC%kedi>wa~%il2&FoTc{+LeKg zA=PEN67mQ7^=v42p>K0^z2om*i^!UB`Rq5ZaF?rLR_hsN$^E1 z^4M-~Z^Q<#aN{0#Q$L|Xm{nLP!`>``duV1TeK*`{NB(kmZ(B!dXssOMmMZ~GAuoqf zzCf~nUZK>~x1D@KgT;@nTSri(DWg|W*P(x-73k&v)d%59lX9x2gc1P{+yzp{zahjh zU&au&${jU&bRU2_?!&xcB;7NU(pcvD%FMRx^440G+IgLd#+ecH0iy&xFrXxNm0p4I zSD#Yntb8jxxRjKXQ9H*OH+y;Z?AeF>8{C|jzgu*IO1iATw)`m7{BjCc7hWWwQE}CI z>RoR7EgCrQaoriqh68L6OUnsrrERqnTU?HbvqVJ04=k(3ANy1@bif#6$Qx}+4 zRa-~U0xhxB)xi-4Dz#a^-W9DGyu5E;F9A;|YMqcgALs6zWWNjyRwCS&T-PJWz=0tf zeSM`d^t%nqiQM%(E360&dXC9Y2$)fk?0nRGuQi`&RTTXB1tzrXq)DZC%l0RF`}gk` z)CMxOZmsK`+eO&feTX^EMtfx$-C1*E0s`Jm2FL{anEO|5UQ%tP=Zj*cj$NYgelL=& z#4_R3z5NM^Agm^Ly}$%o7&5N*DUFPv}zzTueU!ifZpjx%nmDQC$W;1QR@yJqWJ<$_AJWM zLVhFS+xlQA-!sw4$!TwTJ-rkRIi0yQhyHt!VOCzo_(D}m(JmD>qYU93IBYmxN2Wz( zxpOZ3%1_-}x7>YRlk1g5?H~akY%_Vk*|{rATNpj_!BWINd$vG%8O(=a>aKm9xW$xq zI}Gp8??}8dbhSmJ*YG+0_x~CJi_Vj8C3=jL_*L*CqFm7sb6c=LDi7#%50Zzf8U%$^ z8pK3wZHHD)ps6&=PLS`)X(~qfh0>wnZPNWee+;kbqp=`zY<<=_HQ*v z!Wpc21nbbJd-qY))P$4=y0`~8Q0-|T*fSg3S*Ggkjg1p$cAWUjvQ(av%%mni?jG*( zSky1BCVIFCHI_l|a_)~X%vIcpbm)WR{rJEC$uN3ij>paKtVxO^8!NA(-#aSfAm(QoyjW?hVn`=(tGmEVl)I&80fG>i z#E#1tMfY2e<2nlx$F$r@ho-C5eCFV@c(^3`eP?6uBpU%Fklr7PdE-Njtl6K378h6x zoGi>_Y0Bc}WCSZ-@CaN(HF>yYyLJZ%(~NHR+tw>eayAm-c17g4#jnS8ZhBfzR@3)T z;K5gOf%ZrLQ;COw){`t9dzHcLkT%QLKx>qw@XXrAE~k;xe*J)5O|^Nu!2)>m;8$_7 z6C{NfFx*$ek(QR0ZQAU+ePVh0mHX;JKK4QJWRLeRL2=odCulim&H*Vhv9HG&zx(Vh z8|y}es;#D`cJJu+w@^fi^!INm$Sj^l-!4VxeXW1JMg)u;!CHJbti#dCj86D2gD`8% zxEMu$L_tg;wRBK|Q*8zVEtm%+RSYpS6u=?y)0I3g3rvGpK$*-VJ%N2~kM9)z7$$3Q zyeD&uL6^ISpGcG8Hd#xl*iuB({WO>n6e$9$N;tlaNOCuL;XXn!;Sn%-ChD)h{xWe1 ze|i*f-vK<~it~CQD$V(Z#sV6~W$ej<$J129*=KWfDGi} zWP;1kbbj)BS@l{H@%HK9 z!nsvsc!YFOEcPZ+GINL*>rKxbjj~O++*QiTBT33H zPB-OFgo4M-n;>ntfx3P;MYBGP3>J~Lqxl)8-cQzB%jhKOTR@4xW>JK;!$~Rf_J%Dd z872e=$sz%%KAElR*PNzXS;k%t~`vVz;a9^n8siJY;p zBk$_y`fG}d*{_1Jx^3;0LYKhsIxo-JapREV z>gP!lWl;2rT)QH?dn-G4^66+#%WUJc+<8DzN4w-m3JG~Z+#GxmH_MN4%#jy$E~$;@ zq&luwoS*bb5pzTDxGPBK?(W4(h@wbjCVw5?kdix@rIDDjYlfGr-@RlWJWdt zABA~srSzU%eRf6^ZmpBY2(M(6kM16!1mZ^a&dC@@x_a#4d4bPt9)GHqduTs6=c~#R z0nY}0!7nacc!{2%sPDTQk5$#d&8LT<>rIaqorVLoEX$;@A?OEr-IIO8mlps|d{?(6 zjAjC;0;0P@2E3weAa z;l#@D7N0MEbS1N7rXT=h3l>N9?D0YQf2LHyb%6!zzGzruwEEkU@9sAi7K_)Cn%I2% z>)6XgTk{4CCoH_0K(uk_;qc=~X1T_O1_wt;Bb@3b{mo8>ccO@iXOnWIT?ZlW8tnI@ zR5Hvhz5D;L4EpQQhohxE$J`~N#jj*HL$eInoY&B7UAGW#NV8|0YyK1X%rZ>+>gLws zfUuOw@j2$J6xUF zLc%;)_XE6Xu8s0sexg0>(YUWFg;aTEDVSMK%e0f7ehD_SS)E*wIOi22KKECIKtVrR zUh=AL%W$U-aP1!7aDH`wfWYRj6Yr2@oqO>(HQTgzTefdMK)U9%e8(R#7&HAdZ&XyY zB8f@c7x6gT?3mbWEqiMpDg&0Gv1ERuw%xgwGP!9aj#J#d>hj5d&TxM6&s4oxxPIL` z4^zV_GlS=Aom~nbC{lN-Fv8wQR@5M|p?#i5)a%yAT*7UX=TF;KcU+cne`6&B%A<2U zAfu68uYV4@C>lvYWb^%=_IY$%nxR z1AFE_fAPYM#A5wI%FHxEZhO2GSDy3IDlNyg+V|ztP(1(<>48}H96&kx%eyb;)F&2Z zTB^KJf^ju071o|a(nz##&Ao$S`0GD3dPn<(2JU>0R}n3Pwna3vU7a49+&b{hH)9Wf zmB<(=Y2sRHak;|)_j<+wbSWC5t*zZJtM>xV{2(gUjz++8aCshtB>k=MeHO2vAfXsj z3IXz_ZNC!)qcX)u(1TIccmL?2Q2Z)a0737>xJDa!_vFtXVu?*!GQ_^}4f;13g@uJ} zX6AR%aU7gok+_hK94pmJYKbLt7N^s&KV=>50m7K9Ds6U{ zQvLSr0h2?2zojZnnXIs0X`W0zexfvH;gmE}7V-9lOK;ki zPHFaB&_!t%P?Sk|>8Rs}9^T{i#TAh~8dv(@>AXDMn+9d|+iqJ9?$%r9H|Rxndwg#a z>Pb7fcD+v>nuZc2XliOw!K&7@oOhFuSv##P>C}t}eDGpT>y{3&Fz=UGi+FSRbP2 zN5ljAXI2)Rs2AD&9!1wJ7iX_Kg$PA#g>PStCogZ;iTLoI@^i@1;1oCj0YWvW3Lp$y zbowlF6Fkbd0#8MM>E}y6Bz#7SPEA|1nA{MZEFM$!AcE+zOo|}{I#QUId1hlqOj&#y zEehV12On<>9iYNbg=fqjk>(+32d8lVh#M117+pvGloI|+e_@pQ#nT0Ec5z>OOZPsV z+AI8V?$P2`b$TeD2o01jiT|ta-2ZCM*EYUpj2YV=hP_P_Qwp)O9a3q88HEv1X+$CA zPzmKw357Hm3<)_)*h)!65gm$|?+ z&*wf|*L7c?pfCn>7cq?>eI(4^)8gV#L8mDv!W}aUE0>mS@DEy!y#y`Dm6xyEZ_m#h zZDrE#b^4v2M_T^qG2d^tnwpFfQ=a!Sr~0%x@^;j%9c~5t^p_uHvMsdEu;|U{F>k3x z1gX}EWV{1yq-*CWWpYt8wj1-#D1(D!CUj$-4%rQI(YUnwGv5E2s28DWFniVG_H84+ z_eQJ~pG{wknGMpD3QyG0uUIp)>U5thAKj4^`uBjzhYw>8+??; zhj`oF$*osLc6N4>?~cm8Mn*<9S90A8hgYc`B!+WQGXDq0=Np(|RE(|SUFu1%wS~#T zz)IVUv~Mrfp`g_w$DSQsNY#j4-O46xx}zA^9e_cTIGC_P4AP0hdDKACO>XN_O87aT z7gr?F68Oq_WQ}xMpf4_7R#pwM0lgxaWF1)gG^b@6)Dy{Rl0XUgn%MQx384^v*X~aU z!XWRYD96U{=y~w!zA~4}rUVA^fFp*EuP<8hy4Gcz2g9s%1-*hxc|~=jhr#(NnU$_r z9PcDEMC4j*9C0*rbq5l0> zY8xB(hlV<3{9Va>Dt_wO%j$ilpsOQNge1p5Zr$pimq(EjiV2|mC#Gt8j)(jI=EH#J z8HXg>DKnoLUUK=o@I+0&Lm|m;$F>mL7E+;&@OvBoVmif6Hc}@cR6scHET1e&9&8P+ zWj7)4H@rGYa5x%)^M_Zm4J<-*ylQ*qrvYOb<}Cik`EXWd6LT*@9e-U+jE|%o5St<< zAA`#h>Dzu6>(fxTf&rkixQxt!ev@e;^c_OH)y8d-auKpeRF(qdv9Sh#fYB5wfy7vs zNy5QP0!3^*C<8oh{^eZzE>Cov_r54?WxF3Nc~ajZT|WS$)rLAsEQ+tp7Uzk*W6N?~+RHZ2|& zN8jYNcG|6#ck?Eg2pufkc+j4P8Ofm0Wu~>1n*&q3k2E8g+mkwXxB1n;W8eDelrk#* zxtFz`0|zF80kf}?DUB=0&oYpq?+OS3QEQFRIUA&4*4bW8c_%14XZ?~I(&6*ZFLW@K zo`tlX9#>CXcGtMHFe|k@8GPY3lZQ|t20by<8k%!3=LjfFzwzV8Z_{?=VBP3EO$`nA zu8-V#AmLUahl-;w{4^S@LDab*owu)FAIDO6x%`>&Qm^I9r!u|n?ssu%n)?5oripdj zJ5ATQppW4Smp!$9t%>P`L9tx}Nc&PvEu0`JQCcm}ien;St9UX`v71}CJ)pj^ENeV@ zEWy!j?z|F1|MkV->(=N!bs{Olgb4QfZq`AqymR3sDYGp$ZFN5k>cJ7intf2f_yQpg zI`Rh?(%s0MnRc%B)FQevM7v}2rcEUaRB|=z=O2pXZVT|N60e>Blc3qe;c;8MsEC5X zpzKCX{`e!-^#yfm)!KV8jdqkR>I}pzqqx#-rli)*PWt%X2GE+uG`C1&KVs#O8I(Iv24por2CkM;d!P z3-EA1LrkCh?Jx7Lca$Mtfnto$Vu_rT z>gfy!JfA6jl--B!+uG8KLCW7arL_fVpR!bx68+{T1Oo!FJn*4I4br zb{wvHdw8RJ&C8;aCzl!6{@r*5RW$q6@~HiEQp-45cNG6mXlR*2q!W*eT{$DXD*yZq z;|A0K*dkU=-@f+A6GJU*sPr>ldF{)4v%fnqVGiaOA`07fgJTGeE?kn@T-8vtV5v{S zu$fK`UONk2wZB`P9JpS$`Hl9|%+)cRxWb(_4`NVVQxJ)%6D}Qd=ra=P@tnFA?jPAc zl=kPJ88Dv~yf9Ek6vmFVY;{HGpjLVbawg43c@K|0?}fVm-Iwlj&H!&;Yz$i>3XnZ& zop*{>$F+RlK6@;V7U++nW{a$&;^hIKzyXNv5{|^gF{zblLL}LR9CcLCD-eWABRjTo&nyeIUtUxfw zy(>VgxpdSVY~Ws}!RP@EUaT_3&}71*bnggpW>4hw7dteg)YNF2O6`du37jWRfCw_x{zP zM)MmlBZ`?3u-N--+>Hv|Ej?E~`e>VW#KFe@S#o*EV>W0m97$hug63UP@(JiY`w*>e z-E?FwqPszusl&AZ)}0_UsjqnWL%fTidY9-gfe4DuRprELy^Ze<k1C3DoH0xNrKL z6JcS~H_kZo^VY4g9XxE=p{(V&1G`z(du-1h^bZD~yk4{K<&Re%YG`T-8|^F`{;TXmMflAJu_@fodjS z&mVCau&6OGx=nFGDKXK~avBKaReD^aHgZ1qJJz zT(0CH6pf#$ic0^FGw!a}O+CuSY9YbM z;=?b_m~3Hj$3wwUAlG1{_pia!5Gz`Xkj_>31B^ywCp@~2xlki*XJh0ULobU?<8@Qh zBHRMZwC`IirYpF;!jTbefG*3J+Oi07vURN9 zo-4W};&{w(S1Mq&j~)1i+(ez-{$EaNog=rB+XF7=R3?is1IJi_~#w7guCKy3#dfT? zw~Jzl*$#K#=Klh(9zB{v^LPE_YoA`Y2U5z2PB`pB;~WG$34S3^;ilsFgAP>`mMG0q zUnU6B*?mPPG=%u0!wnB$uYj3GQ4;4zCn3se-@bi+AYEoD(Y^hS;LS2`nl+@2%=ZaP zl{1{-Od0Um?m^yAniPS`IF#U-=%kMQ@nE{v}RN}e+?(>@2_yG#19A&&yA8pvOFGB zdKd)i&5GYy_8PqV9m4O~NruV=;J0lTg)*KizUeWt;nodDa2r&3GsmV@-m!0yUW+VG$TlMDlPraYCXGe%D-ISRM z_@INnXnHY1m18`*7xu=J=$M>5nPA5KHge6-2pw6;F@~p*m)H|uMHEYQ_mWLq0l{EP zy05=CNlzS{!pk30bFn#G-hR#_p0T`pW~bL4oysyA^I2qHAD!F3Dy<)$P|j;|!5=R` zcq8sBd}jO*<#v|C0cDY{aQB!HSN~;IM(SXh3>LkdV9gU$)>8OfU2G2zTu zH1We&hn4RKi}kA+mrYm9F%!oSs_fX1VGP@{qy}ddGY$&=konEPTxtwa3=*1Rdz{2B zYlBe7N-fL9a4^1^%M*5o33!K&PU@X>FP849$JaKyLCKpP81fw~5;6cYS}W0>3mpzL z{|tB_?wQ{rGgwiu3lZketUA3i7*2Rp%PS{(v3n)SqUTlDzdGM8h0~F&Av+ReVPP=u zGCYDOVB1bl)I;3yMRn)P*|%){NVIH-SZ{e+**5kkdE+x1(;>j+`meu^Cdk-OXdCey z2bK;*qSt%DJ~$Nq{K4_nVh~$Tz|2q1WogS62=npy7I*!cmkvV!9c2Gi06KMH;TI*56c! zLKQB|??#>q#%w=D@nM#*F@PsCRjBbXCcpwW^D694O*IFKvS#L(8!IED{ehUoZ;67w zUV;TQ=rI!9Q1TF#`y|WZo#i5E=^Fivz}9BCB|wiopX_Imoq9^C+r;StMW^p0?;mDGKCivlZ=x zC5xQWmuYotk%yomv^cx^U$st4?{HAOAB=qtX{Mg_C6=1^2cW_^&PIzwAp`b1{YO8| zgmmJEAnxR$a49Od)l-RFOfpWB`^1ZC}fN z!5vHaKRb0q(m|sVbnjzNlKmyGampXbzl}pjs1%S2iHP(mK|0`($T@Fhd`TwJ9wsg@ zR9WOg8y0(&gL2|c(b!4V(J4$(s@fql)QN-`trgmI?2^e)^5M z0+HfEH}D`VLCPBj%MVRI>Y}B!RAayH4_5>CzW-&#vepl9%um086M_ySUtk8c=bGoE zN7_jsIi?+tHL*u!Ny+>h6P14$-)j6IN%=pf)f2Q3_EsO(93a74-l1ew%foo`*zMgUz{*Xjp2kCE*UtfmN%bJZSG9dZ{y@?AFV$+X-h!{{l)%7+tywj!RK1C(oh)G|&*4|hBWE)zvt5fnyvSOiS;?v+jbq)21P zs*kyNV~G>XpKt8XAFkK{EJXsJerHg7CnI@-%Ky;F`l1XG4LWP~Ha%@oU*%d~HrsQ8 zrTO|9Jr#;i9p#JwOUt;GUFGli@-N_i9S3YJ$00B%3~9wFs+|?lvj0u}#3vqF=5MN9 zJxA-Kgn&JLd~VkF`|>T>hG>zTJNeHJq(%37!5%M$=F- z|5pIH(tKh|imHQq5<)2>sGKhccs;GIUQ**Jm#qBqf3tP$A}WQHZOZTb@I!P|?` zE2{gVq7%yB_1DmySEHIomha9fq;M92U{zs~omk8|rX-5gpe9!JH-A$-8-=qc!edUO zvImuD7%UiO{8EQs{2asw+z~!TULAVJ)V^gI%bLZF`9~5#j@rA$C zqAUI%VFek5-gjQx+xov5LzI?8B~#rXoU00`Zm33cMM!j`TzO@toj>_-W2YOF_r;r^ zhN@Vfid|wW1@mrFnr4&);*Mw0k%;nBDiSdZy@44=cEn^Y8JH8rvgQC)SvLHGjB{r` zS3!OpD$ZDCt**}Kc;yI*ws~h%>7tk-%#a5C;DPlls3HyUb8lOLx2}9QPAnp(@Czgme<-9-#?+zI89im`e zGwg);`=7G@&sB#rF$6)_{Gu1HN!)fwU8+xQ;s6lBPf<<}lJXP3EpK-9Y%JC)*fF$V z$GT-EEx$HTxYqpYGrqNQwFwa-luwDtv<;@&886r17LXlo`L&RDP#=lGQC{}#0-=q45NxW9;Bfqf6es4FtdGUqnTk#LyypS%mZA^F-sY|Sg z#j*>}jribyC@;_S*Dlt-6@95(Ast7t5ajVvRA!??m$R~(Dd(ljqbsd+AbKg5P39oQ zO*)Yl2Mq`$a@1EiYwvbiTH1#d>|bD}+ITzrNKH*eDm|7ti0FQ?Y8KTav|A#Xm=>?P zya%|W1kdV4+c`k}Np!}3F$zF7-4K|GjZAVy51Hi=Rl&>+Z+2~x&nlmNVcW;&vn#(< z-QWimWJW_~#~?-ZqCc^S!^4WIS`C#SUHO^Y{`C*|e-Wls*ZhA+!Yi%qhB*G1xi9Yi RX8CRNaaP|)kD0Une*r_c+CBgP literal 134558 zcmb5W2RxT;*gt$VgchYzvL!-P$es-up{U48q9}XIx=|`wA=wHc$S5;WInqf6T5bG3=96U}C>pci! z#RaP6_=(W1>vQ-&tIjGb93&RW|A{Y64j~9uLh+!Src?AltFzOIhPxsiZntEg(QZ6^ zY}GP~9qcRBM^>|Yv&wsMcc-mrJiRHG{erJTTHdL$(|PR;4?G_~ORE03A&ha89Bt@b zPs76$VRD-`T%&y@D_=cXQ797bcj5Xf2dd%OPfZsGOREDXoK@AnIDWkT;%pXf<@8u2p;W@8n=Jy#qVz%3Zs5ZQHiZyeYS@ zG0P}U+KoZPg3*DU2rvrP;@je|YUvX^_dRx>pP{DPS+X$m{Y|(4jl>%Dp(t2 zTweF?-J6@6D=8^Ck#;g&zqBYNrMIKu#fukZZnNuIq)X1%#2Rw0>~Ko_yBM0Z!=;mL zOpYyk&sRs^ioib&V3L>^Y|h6&#Um)Hs;=M5O#8^o%ggx9?W0lqcQg3&Yh}h46cn86 z^IF4SFxhrl!f}Z4tRxY@@po&USX<7|{1j=mtM7UpqHKZ%khJT4&vxMIhYSOm7iVOr z#~V%=9KEy0aBgZO&SSy(^|^BQ`Bwo<%_+wcXuX$N>HhoR0Y)!hz6_L}>BTK(UHtTD zO6Bn3SNLcA#RJ*6S*x67-7Q!T{mX-S9*dJ6i$9*zbPA@XrcSmxw)=~;-3bb!wb)No z3H}YD4yT_#KN%P}cZUs^&05T5A4@pwF;gG+sHmT(RHvhoNz|(2^ZB^>`C*U6m-GfN z&by5Fj?9hKd`ded&~k%=e7Nz{ROR24ORwm^GsO zgN<38kBWyFBpfbTTU*nvT*n~f)Rz=qEweb6X}e6F`QLD@h)a%hpDFtOh?kFVtyjF^ z^Q$#dle*#jde7EVFix6OM?3#)IAz&Vu$RgUfu@F)dmk@n=({|=MUhJ4Paw4 zUCi)5Whqedb3DlaX*iRShNhYu;o z$Qy7~ESViDeqR0OCaey=y1C7Eb}+x9BEQw4*_%fCPEbeh-uRoGJ9dos)h8>3@eG%Y zsnV_#JYTJH^r+P0+}QJK3R&k3*I%ABJpboYrar~Gj=naS`%0gs#kcYL&256;?nhay zme`>F)PI|x$%|_n1RDp9t%J~fXSY%R-5xg@2q`)YCSPbuhqVyHZS{JR(3t*`vIOW5^hA{SYyERb*I zbKL@lT+3Bi^IL4*a0E(jT>11hS6s=3+I!>z;je@K^Vg@39Xpn9(TeLct&|&XE9Fv) zjYbeUaCfY<>3H&wGJoN@rd(4wTD!hFV=UG6>({Z2tSZts|CFxorWG`jv>9#tEn21` z&c_}rr3lDEq|@s=-Bk?PDMwp^2cnUgvueCfP(|eYsehiM-$T@-O`9JNwv}F{qoeD2 z8&RKSRFO%yj4P$kw#T8buKUS9D^L~m%3PguOx`_HT`7CPb^F?-73VI$x}K>BrSI1h zpKQ=!J=lZHd?(V*xbkVN)Mbl;FIQ0&X?LFgTVlI4g&5vdSFiVq$3mfiWt>bu-FV%T zt_lAfPdJ7wqLABTTovjkP$oKXknnl_`A_MVb>8Ina^Z)s%!>*dsqvux-LypyPNcPY zEY6RN#5lI~wH)xIyZv`R)tO62t9bqB1=f3YwAj`Re#$Utv?jb*saCDs`u^?#Usfs` zTU!GZ0P+Uc|4sCGNd_qwJ69`Y5<*h@aSLC&^nkDe?9R2p3 zMn-b%t)Mq+Wo4ySp^Y)Fun?Y8Eq?Lkjm>hj54Ez`7fdLgP`~{584AZ5jRId%lELx( z=<*-tpA0b?m}@r z?9Y&HP%gdkIvI>cRsUXPI@@XFtN(V9RdQCfhv-w=mUpYWxVTIXwR*EQW*R=vHh%Lg zV8xTH`+p01sDNeJw27O*h(xVohrx86{Hrk(9TE+x8e9Sb87Q1&?QYa2 zMDIucdwC9LXXouA7N}z8&5sSwFPoCa->vsL^Ze>M{02YhxbrVCR|VHd&+j$4^hl30Vz{s;7NJR0 zg#Ha_Ebspbui?DH^k+i|_xL2`@Up3|ekh~WSJ!PH9UZN!t5X&4OH_>!H$HzJEJS4E z^|?0{ac&cz#caQ!j&e;;PrrWs`sepgGP46Y#rEI#pLychly{LON``{4a<%^Vw%DBT zuQ`H`?AZ9ax|%_m3|%lJ4KXt63dc0n(f<=Ivd#X1faHxE7AvZVo*|TT#2CiABGW07Z)K(FUl>c6l>S>L~!Wg)l z-%A@C8>OA?s8fxLkI%lmae%oc=e*jgjj^94mr>vqJN^;E}jhIq_%vKY17G*&G0ydn1&yFEK!o9DMqeDP&@RYA8? zjb^TcBf+-DVU#?3%m8yjiFk@u2wSD@Zm#ZVj|d>p2z%8g|Y** zD-Rtyq^P*SL`ih)0V~tqH}oO3ezN5=%G0fpXffOM`c>7{v7**zX;)sJ92#sX9J{uG z1IXHxs2Gy%znak0T=@7~0B{R}Hn-sK@V{k{5iURbW1PQ5B)QC0OAJR#U3PeFjFPL= zTy|7dRr=gFV0Ti}mkJiklj`a}fXc38??gBSQBadw$+bX6rK6nrGYCVnTn1z9ky)Hp zSha#k*}7%-Y0Z5WZ6(u_EzWg{TwyoKX!H(WMZ-K-rz`+=qa#N<&{jHI1{$}xFpGLT zAni7z>pJqXC-$<%*|TT0DJcn_Kd>5qNi*%FIk**PxJZlDbA!d1Psaz&5Gyefj;rJ{ zk-3(RUW-Xg5)K2+`Ic{P<5MnC{~{JitzQtt(*rp**v89v0F>|H#1K_d`Be*Tk zW;H%FFc>hMO>iLsj8|z!t#SsLMs|HF=25foqlxGN!w@Mym@nWxO}lT1* zqydr$nT3yZs|aB79lsU$A}ynA=GAt~P~H6(-gkgCO$DbCggmLl>J|#*t|M)jTRx_1 zrq)6K91dWdMiDF@uTutu?f&eHKp5GXuR zu*g9DGXehy)<}*iEQA9XUwI3poyvIRvW5ty#Fl$}PtX6_39yR+HX9dvAbXhIr@jCo z3|#t>Mk0Vi$FOIjcb4LjaT0}eHots+up=Vx;-}jVAwaXjfsMyDkSOu_^JfFMpAAqV zkdv3#_Rd~%Q{h7NCC9bhJLHFZCNb$kA_bw!h=MF&+Wzg^w}s~N`R2)% z5}WR-kLfy;a(gbkk3S#leE*!Zpr9=lgcQkOF*LJhAkm8)2E}c^z3rfMD*|5ZU&0G+ z(*L*>85w!&mQ7cn5Wq@01zI3v=TTJrCPZVx#sE4+>=3C6j2yJeT&G*e7>Uu@H%78y z#FK);3W6sRDu<(d8iKCy`=jF3t5^ z^4q&8M+637JUTl14g3V|q$?cxC?MVf7eA!j#cna4G&3`M?SyJaDUf%emVM*(T%_x{ z`}ic0HYX$3IUvo^IO+M~$Md6a1jry_rHT}>jE#wzAKb3*^ySL1VjePUij_pGv=a|B z6_u34FMR`|9!5~^xD^+-C^fr+s1R5Z3$m4@=8I~d`_>Tw%OeHUDyb>)fbV)^hSgP1 zi~^-5!prB3$z8?sl)Bi@c@`bl&b?5J0uvV;T#I}GrDFCEB4nN4W5aJ*EA|Led7+j* z1mIG6LCt*cm?iBVKpmf6kr0)aocF3y)cy0(*00zuy8~0^XD8bMe>*mAlX4k1415rS zLP=iujlsfbvoaYBiW!rrh%%Q3fI0J>filuNY-v}bu7dC*(oIcG+ef%o5doIfzX91Z zD8~BLHaxAgMN5G-mQoE&)rvz-TTxN*b!BC5bN+C8e%nlO`~0KaK%}pZMPU-S1dt32 zWf~E0hiMIXPS(~6Ba60z_lneA!(YMHptJkdYW-UNOxV&J#7R^KqtUhV-E2?0BVTSrV0?S?M@ZX0d#6axvCpVH#-bb}A7@%=|Zz^65^N z#VI`&na38bF4bZ^FVuRZ4x)0x42+13oY9NY*y~ijT-5Nd0tv80%`8C7&*Pn0`8sloo?|pBBcGWuyi;gv~x|m2sc5^q6fX%NhO}Q4g|z@%ZuMr7l1B2@1}R z_u0POe9HCI;?J9c)l;+Wiwki7%12&pN1P5J0Z5euS%@bE#-$-9p#Mq{CfkM0#-$>@ ze@G39ikf|YfaYkdtf~-cg^X9S@t6D&@;N%+rh+9Lb^NTHksCy`wL7ZKk zB4rJ{pxtmU)U}-Bm&3Mu2)>=l?2PonL)9b4Q35$R#qO-?nCjqD>~Q)rzq-SxJ7epw z0@b0AiXew17BO$K1KrOhGrHITkGDo*lcq{idfO_wA0X!Q;7hB$;_(EiOTZ;t7MI?; zK0ZDPK_tl*J+>rDpDbo0wBZbf_Xopc6EXXcfVWD~R{sz)a z?@(Bh;2j4~X=xR}$Rg`8Xg+cT8N(!OrkU!1pT0a+wQBB&(W{%<1y+U@7UY^r&yUxS zRZ9rKznHs-v>u=$k~;m@bR10hZ2>%?PKP&)x&j8bD?&T-jNCP6&+G=^79Q|b1#}2# z?P}rmghC?1eB>!mkMtkr9fn%Av1n%KOF_Rg8Z7~D^!tq)H=fS7m>()0ekE`b3yX}m z;#j7jc>33uY23`9rH2l97i#<>kW9zStiP6O)e|<*;7)JsJ+!@R`K|oaTO#~~D(#<- z?VXt%ZijmU#cbdIVHXDn2OAsPz(HB(W15=dQ=NeofkMi#*TE%&*bZ2fjJ$$ft7*k) zaJ#Y0?eZ_9;&t-}nyK6LM1}AY2&&p~9KJ~T`TKe*C!vCkFcc#3cGIMhNG#- zCIHJr{ZjEIf8qfs+<*Ovc=kr5G)VD%)tDkEj#K*1oCBh=6u}V@z(9jCs2cpxNv{J2ei&FlS&1R1!->QH_JO-++B*Jk#uB;9IX%%1V$H_aX*j% zX8Z;RZqLPi;1VbolYRB)$9ro*uE<-nPEd=J9(jtE!!Jh*MvUuw@N3fE894N>tJR?Z z9((kc1-i^?D|T(Gpoz+BV{n6HwYWC21HrM3ksg?WhmMMiGXPBnt-d|cYwej}zPK@_+Z$Sj4=fP%xSKMz>(qug$=$sVa9NBcHXI88a6pWGp>B^!+O+?p+X zcW~tw*Xd;7Pq0$)L&cvma1$O-CkP*S9C>C9NX!$#N6vv+lh&QYh9@4{k1uJPn3&9& z_roV$E3uwnJMw48xr#1klup1Qu`O}gzVa!0QEOJOe&)ZDknKfq33yDuVWomZV}acZ z6KpQ9c&Lci{j1@e`D#zPpe2UORtzV>oAro_%D~6;=HA)_nVH^%(9lrCcqsjd$g4~Z z!pErh{au9nTCa}0Px?_Z9`3(A;4OSwK_MY@D^YRQU0u6vqXQ%HY{PXAn%@EcEF0)L z+3rzpQ0|VUUnj9fb~@X2dK6rArQCbiOKIr)JV<*a4)5XldI~EQsfDRd2wNz9DAQQs zizns1G+ue`*-TIG*#Ds~ZgK8B?aI!I!1AG@@1%n+`o|@Tk1hfxD)=*;u-SI~AZ((U ziNWn>cX%2-%*@QhmB|qG>Zny<`N=UXTR`raO;;r=)xLfEkg$=zgXq~(=o3Wu2M~dg ze#joI3o5h!%d>FqTOk@jNE-{0)DeQ`9fsSaw)nwIHUhZ-UqW}p%Ih?GQHWa*+pwUZ zJ*b!{jAV!6{Ss2Zlo@Vk@jooMZ(lWVE;EPFXh09iPpr$K!E6~fnl=Ft9`1;De#pPeB)W%%3YSFfk|wKmrsqLFv6hwopS)*ohoq%z=w0bR*xDePnl^4HWQA*qL&9daDsYW}20FeCAIR(t-G-V_8Fq&SL&x(O0o2(1{~YO!QA zwa|6o-i|&p*}p@{{2C5M`9)@7BoHk#u@+5PicClsUJ&s4$@V~l07kLVyS~gf4zho_ zzdXKV$yw|u-$YNZ7Axrtz=4*9jU3^jyOM~HCM_2@awTA3tW@9~i?F>7K+L5pmdmKX zzWf$(-u-8qL3ejIgZO+LG zfy}r8!qdZaVQw0++LCE_U@jX!CA)t}^5;h>7#(j57)Ya(z>^no{|s0FZ|IJ;t)>>) zOmv@=Z~VRJRPAd7N~~Zz+}5({R!C1_*xqC*YQ@~H4zn#W$lGrSVz*IaYf|Q z+&w@3)gs%8?zkm@kfU8`Sw8Oorvw%A+4|+O&em^s=SRErr=n7&0d3KR3OFJs_Y_*y z0qqI%)}p*~*$`(KLwHkFg)*XyOujbaIeG>O2<*RcH~~V8td_4`!McYFIs>*_meg*i zAQa@6s6HY30sZAVC@qk+HrB6heWJTR)gB~&^BMA zxfK^EL(?Djb7WXp*qu9DfsFxzOB|SZV(2>c#TVGPNG1ZTSZok!{)TF(QcG-7->XOZ z7p0T>uDZxe{0^=IE_Mp{4x1T|3Ks=5K^Zb$S=p~WC^i)j z6O)pX;^f3f@Nh;A!}fuW3qea<#$y3J6L4ht8I9#%NU{yJ;oKAR2^oF)lt2Er8vHbOXx9-nw-hVFFT1j$t4=kSNUy z?TmhFHtWFfuV2)`zE`8wi$B4DJBV$=nc*eu>N1 z>s+{ZTTKHB!jtacpUmP`&e9qHKbMBD7|0z9;1$p_LLok?uXLJzuV)k%`d0 zvf4|1807KnwVRkW8yZrQwCK@7mQzDWeZ_9j+cIMj{A_@g0oBl^y*PPTYdj)YO0iWvD+) z#2_yr1^G+I>ktp!r_lU`(uEBSi~2li2V#WEcd#C;hg11Bn(6tMIBj6ENW2m?dU?2%YVB$_*hj78E?yk6mRI(#U6&jov{V?84 zgFE|EGRTtA$EzWz7JWg2~@BS$X43jurhQOOa= z=p1qv@KqzT=nAP4Ag@IbQ>uTmoOp7Klnxh!B11w1!SD{8UJWKm&WJcTIKa_GzXG!` zCMMrdAp_4fBa$vX^Pxulm*ly-jOZx-FO%M)DdhU(Q|pY5optDlw2gEjm_v&WmMqV@WEzO5D+;Jlrm#R27(B+cgcck51;1oPpT}cp&qz#ABALMo${s9UhV|HY4aBxgaOjsC(5R#AVuxVu4 z!msOu9*=vCa;{lJ{}p=u3+UZNTvZiQBo4)zE=Nnkc*MR#hz8Hm zOHgsBQMDKBzP*h=Z;w(SDKXJ*s6~vI_u>=JWdOCgaGe-L0*w7Bc)T>l;($l8p5sQ-i76&PPv}(j)i3uOv?>M@UYPd!*;mgsz=|g{Kx07UcNk;QhuX^*JCb14lP-{i zL@{U&9Ok)^j64u6Ak@Gwf(ehReSXN192KA$8eNXO{X2(Ibp<`SWTXb^p*PBTE*{s_ zEy0irvgCqL5nQJOt5+caO8{Nj+1V&Z;jyv$j|Eymj>+dj?+>t_%nx%fyOoh3+;_CE z{Vn#0XCkm^i$T!QEk%`g!HfY$BOnX79+5#AA;`tW)f09~pX5nwX{bGe2w!yXG3Td? z5r%|NHVKK#q(v<=Z-=lAtW!oMr};LpG=IwK3Sc3DZ7xorv;3?F=5g$M-+w_DE(5FE z*w^8c38J6h=EBS1Uk0K81|aUVi;e9re*1e95Q3ts4AI%R6%xV`T)Ec?bFSRG|FqrX zb$@Z+bqu*c5pOO!SYK$YV2BIbly6iKa6ZQVB!Kjp64mB9czp@A7a$=35I6k%X!IW( zPf~)aLw<7#Hu?lxpIgfde}#0nCB?#xe8^3Muv3T3k)b4VW9#}2&Jf_CreIg zTo9T;vS4&{Ca3}Ur-wc;q6h1mic-r>y^M&L{C6jVvENAT3T*4@*?@;|eAqmn#KP5!@=Hr+??L?&t||DEC7j0Ilm19E%>t6ph)ZM@$Z@3GQK%txJu+KY&p?RCTDV znF(@qX$*tDKicsR*B9PRe7uG$8?=xQqX>v85CD=o&}c&_IiL@ogSed>HbA(jV;7!` z@cx-L=JFxoj=p^AB@~zo0ti%wsLBH{PMtxs&DXpgP1<5WDAwt^0f_=53_~!q$hn;< zZ@{*s$v<~>5w$fS>j|~J7%PC%K_vmDMY3S`5p@=%gLZ>727?#CN};UdFk!)a@}cMh zYPN&04zP^Xcqq7Guv@GvQB`7HMqbjNH#R229`t!VCmTU%IxV%bL~lGQ8LcAwJQ(P@ zJYK8d3ugeFT2%U+i3vH-g0jN`wuiw-Hy^bV(ER`j_-}f7W+&w2WmIfC~4KEzYx zqu`@9gzMTUf_ooOq#_6|(Z9va_4V7g$jmIQOW2Miw>{F_=H~QO-XK(^Kn{Q|z7Pj8 z=Y(OuOGrrY@_t7p#k>d=x#+aXe0EzlwD?SV=;Oz0DzvPvN65JI*zgk(SZcfgCJaQ(>Jx3`ehs^jv%)k+mK z3JK}Os+E22b#T6ACk+O@3}NDeUz>k?`1#lxDw!7m`gZ7?UxbY8eluk^LGwO*_z=4f zk)DY%es>4RPk_&o^d)dA0DT*2ts=WVw7&!9o}yZyxJ`LC<%iN?OeJ=ov62vj8=AM+ z54Ru3F|q@zw#2R(ER;-3B55ft0Rj@Flz)n4RQ}oVG_MtO@8L}&In#%_h5Ov!VqD4A z2*WhW{||R^6>Mks;RXX+MWkuk)tdRpV{1d^X5$yEO6i;)H{$e;J`^ZnO&QIXc)!d+ED6Gt!S5vulPv=JB)`Edxa=YHd5fk2WpOedxDO;p6LhbkWg3Vg zb`#b;#@e>6d`gZ(!&Wk42)%I;fF@}2_S)eDvUvnyO{0xy*2Zq);^68bw{HDfWJW>h z@n1wNvHE84Qz^+K7kBUMw0xLkW8O(h{+C90>D|g&I`mz6P`9NdmKb-8RrV69xhh=E}c)P&jVvg+JlboIXU!G|Kn$KVi3e9sna3~rEp%k==;W`rw zzBPlgwEu6%o?PGRpI%zAL#UxX)O7c1d*P=8#6kt&9Fa#10GgCE%mb4NUwrcZzYau+ z-Cm5QAzY(s2A@od-i~X|`*rdmr33zB90WxL^W{s}TVK8t7<+w-*()#{kblO@)LhqK zyn$|l+&aSnf(Dd0mj}!6l%9P9rZdrAW;!g+$_RgP0(7|D<$L-8)ffNsbd`Um`2OT|NAfO5P-W+4k}@x^3Cz*Vw2YFB?079W3vDJBHT^BHTm9S3&fT-Rb;>1@Eo z{u#*HXUywq^VwMg9%w!+ICLn9f7ACT;+Z^O+tS?`^8(ED8j7sGd@OR*%lmlcvft_R zCp1^md)>HHIUG1U-DV)Yl0DmEPeu=mv?{>Farsq8i?&I(aSbP@DRKltz+j>4HQlt5 zrRA`vCvo}mWr`Imh>o$tZR>RM|u*G zUst?`?dgod2uGs?9MPLYw)L#_He8m;GfU^M;=*8FVkbv% z-MW_$|J9MgZ=OFtP#aHl?Zu4(m12N&Dk?Ya-gP|n#_=8_6O;Rw&Jfu{sGfR7c47_# zckF80+7>Vz*azFMy?sSx0ZdL9ylyat@7}#zCk)BkT3~I2f)}=KE#Xne@uPSQY5JzZT_XM-*=oQAG=Nzb=vdmO_q;t(O9o}H~N-0b3ngoJx?%PMby z0?bTJNxIKzQQbYBIy2rk2Jc}Dx4LX!0nw$1@;M7fYx(l!iBcl4f%DK7L$vH-q9+Jv zX57G~P+Mn-hNYuHQ;Zn6h7*1kAzSwn5Pqqpp!W4^#m^~3m%Uk@%k*eBJY7FOKRWqM zyHDGJ?;$bN#rcVdEQ?lg50AwcFAlODMBx*;)c79#k|gitK|M?|?j@L|!<3Ocm7rh_ zi2cmW3=b3hJLj#qf#@JQ=i*}yE0<1Ya_uj{f|cIb%*(?>?|G9%SoF@5*0DhY6j^s3c1WiP z@_(3~4o%3gZJYC#XB2EN5MC#E!hC&w>#Ua%K@~yyD=LqDj?LFtqT8O5e4LywKYen< zoy$|%+KwS)?EC5t^~DpeSNV4DF1G8lAtyKUEih&WKR*XNo`7lT5BQJfHj1tq<0CEEPrL&zX8}~xMI~> z95za)eb&@8gX*#SbgsHMI}x{=jxJ<9lei>0E?^hXqL(KIOi|IVZ{);BdGaGOx9vIO zNh_apoMPq5o7+WH`>qkL)h(^9g98KWnI*Y>L|vycp@`Db((2^!=#=E6AN?GbpO$FS z`g{FlV+HZ45U$j0;MkZ12n5^VW!m6bV*vjE^k{P6GlviNv3oBNLO19I9@(iAbq+`_ z#DVy(U5ZqD0nEUC9;OY1_Ya`>O%@q_Dnm0f7ucTg(Yf(+!o|OY$&)?%zf0bSZZrBP z07NVo+KF>${e}fldEUL|=XVj$sIDGFdk@B-LI5Lh;us<*6$fu%?@M_s=u+)PFCCin zD4us~5VjW!Y+&^W^I&PK?#(J?av?(O>u=77aJ)nXso9VFJi!2m#O zOG~VzGrJFZ6)YGpOvld`6!6ffk7_aEoId0**uwm*|IM4B>sNa&!?9pAy1Q|;E3{Bd z*p-y5tSoX};~guTmvQs@IZIf=h$g=(FXPd7gtd`I1-xY?-xW_fHo>$7*~9#?{561) zQv0Um?^%!?w-=0V@6s>mPG5qCd zZ}0E#FT8(0oEFT>X=If@7F^`E|iS#L9cRi0WoBqu7a#{=lVR!>n`BRwEUcQLK^TOhc@7WWaYr|LZnMzo{kT< z8}wNb#Cw6RL1O&i{r^0Rz&4CM;@knd580h18S#2b&%4E!CoPccAQH&J;r4Qa2b#NZ zZ)-`b1|ZMjV`XE5oWGivmq$MM1q2FYoZF9%e`vp=HE^k|q!2$R;_0zHyCymfr?B;ud+bn(v6ppD zAPnS#Swjs#BLuwyJ>V$C;eJ~3vDCheIy_i9pc-_ZY;C}Uk-vsGg8s(>5man1K)Iks z1boP;aJU@ghhM;|mAm|WKzk3wqphtC5ktsR)!scoDIy}m_5!Gk@s=dq8NnThTw<;| z_%EoBUhVDe#e-9`q3%`qZ%@WQ`(c1{9}5e!!&hk0_<>V$LY(nQyrR_T62`U>#dx|f8-k0=zndjm# z2oE@PKwzK%4whWFkbsgQHr7)uO2P$1yv%BP{uay|vb{ilLfWU>iF*EXy2L3X+T zIZ?~W?e*M8hGn5z)xWih879>-)A!lN%ElbaJcUpS@-NiNQ;on$z^);B&R8AUO!UeK z2nd+6#_3QIo_E=BV~hL#TUk5MjgY?vM;eiIqECxpH6L~Kt<%}!*@A}8HdEGyrz<>} zZ?(sFgkh~@n{c#=e_f+0L#5l(r%&an zU|lIFC?pvz^UMv~t@E+ES~Z9Vp9Ny^KR!!hnw8$)a#?Q_=^e$~bGp7$ahA^v2q6gPKd_QntMmZDj@&VXeASCRmX5Ic@M3ZE?r8L z)6@(_Os=}3xlC4{t^8;(xd^miS0(J;|I)knjy7vvqfxeNJ9ReujvWV+UV`HG73@m{ zv1)n>T_8!P{1c_|aw_<)v16tBVQ}Z9r?Zp8Vg4)ka3`*Ic?K|;wbXqe2Eeb46~ti^ z&ad<5wmExwT2QRs6tpT4*}Gy~tb1^q%0)%iyALJZBu{E@`xgT}%QVs&>O7y<6XVgr zwH|>*$%%>F+o{i}cP(1uZ^xtO_8)qHMU(xXi$+wgg9hkdtjFQ)r00seIXR6Xr%^W_ z9!q!yC-Hb|k;yxTU869ej7R>#R3Fc7do=(NzWe_C$t z);ZTj4SVhh?YTYaod5V_kIy*`ocH`g#R*y*5Y&Z}2!Dp(piJ^yB`tDk34VG7{?t`M z7KZwp&cwtW6xfRAq0!Nj9t(4LxdJy6j4ymWsW|q>N+s^$<`xzfK%y}-(~mJUlw06G z#@0{~_PI`3IXX_F#JNrVkOQ0e^<{VWk3u1Qva&`HyO0MgjEuT4waNQA5Pa*_#;sd> z`}-L-ZQ9|Zp4RZsKmVWrOId&QsY45y{7Uru`%tLa9T-WBd}k389nHtb=R-@wA`O+= zR~gJ~8rY6vvziSe5OS#V z>sNU?-Cutl@0+p&ZbUpu{6WeeK=2>Bu_-Gv)5^{cdbtxf(O{NWch|TL=VH;aufRmy z1NFP|0J7@pYVbJB@hQv8d*K;8Z{alGd+4UZ?)03TG17wBtz%+t4r1ww86Z|zR7;LS?N24Q2&fRX@ zcuQ|DZV}_?kKqeJ5Wk6&DNju`%{u!MBz$o!ZV~5WcXCt=$;->bW9;khHnz01#Caie zbEsESHMJVdNXR&k?(jB*1SF@-;m59DzaD4C!RbEoEY41fRLFY1h}jzCxJ$$dGqppoo|S^TV$r zBDC+yU%}5w)W__eG{<*^Lx<9J3$w9Qn|U<3)#E)23!QQI_&36r8Z&Xk=^T5!qH9C;I zBr(TzpyQH^v_S(j#kZUbM5jzqrXK&r^a0N8J#TCjeYfHc!q#Q9OYuRmL5b6`%j6Cm zKN^LW#Y8ULg!_>vv(6qGG)1G!^sqa^(776$XeU{L$zY|~p=yD3OFT+`Kn;lct)_E=x=VEn|V)` z*!A5CID_Da=7X_Nr}Ol~hjFZKsq1uhz#&BZ$z)X~CPSq22sluL`%Bi&`PIak%JDD5 z9pDx9%1+>b8b?(HE51WWJ}@0U+GLupqHvXwnfcK?H;CJ*v<5i+d-o|Na%^E+9<0!Jc6Y86iDt8{c9}=PwnT9zEgUOI8 zv2Cv9_x=z?g7Y1c6Ojj(ej0sM6eT7)I$OxFU=9VPp9Ng1(La9taKaEkouPXqeug9s2EzRx@5tL?^Thb7#LZ^EH<0&yP9#9ss z_X+pzNds_Z#!aVryD;vd5-W*@VBsOZEvRKW=vF`#proVq%z{TYtwzU07>_sL-w$>K z>-2G6o*st0pc%h*cES$9>;Y?o8LAUG19sw~{%16^JTT*oLYJn=BO_A|NJN_JpEiWr zzXn_-k1~TH_EEftib5;kK|?>xlt()GLnECPBO@aqzF5sz_-?SHVDsi*`do)9 zG=6dl4i~CgO^gH=$=zC!dPsSQGfcV1W6O53X$cB>exw^fFT09-YX%8E zI6&aBh?gt4UbxT;-`T{}beE6Q^k`CeI9@ZAfhI8NzC!uwmpJX+xf3RY2=+JZhA|d| z>}|Z98^lb2zP>(8JvavBc|>@>nwpx&kJkYnXc$FVjqhM|fmB?Cf9hGi=lrc^_Y22sV2d zgWFugT;1Hzbld4hTn`K+eJ(h7Nl};p@9OO2@WDx(x7ZJS9_zdiIRt@aR-(wm;Vg6n zRT?-?!FO)Q4q?m3y5CG9QrnB{c@cTQaPlzDUZbma{OxEckgt3|30V6o8dCh|_r|OQ zyOhUh1IWqA!No)2gW4QLQ8=oif=xpbl4BQBQ&TtjwlXs>pfe=nGS2P0qGoTQ=Gx!K zIEiyxN_WV}*xXe??y9J$V5;_kR1caLHV0Z@XTlA;SX0{)d9-*kU1jJ5YhAw^Z`3~H z8}c(Mb(W;*m4kzWXnCZ~&oXY@c)Y~P+B6_N-CRrSPJsRKpIdLgc=<99 zE@GrQh$be42gNWo5yW-y@L@Bx0>|Olakr9~7`@1vZb;bRs3hbmA9AO`2NWWd=Y&$E zI`V(qX2&_&;ZqRJ_hmdNHI5&z3KtNd(11VDx=r=Sk!v43_m{p~x$o9TlRXD4a3ohK z8TCf63PWfsbfbqQMTNWSKXF>3m1r1rQCNTQzTQ`G`b;^X5}etm(|Rh8QJ{UzyrAUGLXSzl2Y?z?HA=>RvE z$G;|8EJ0f?t){KqL%`Sk{P}!`!PC?6IS;{3(3Sm;gtn2wTNFGU-srr$|Cr?vU5jpK zXQu{8pef+wER!G#?taTQJ|CC|XDkj*crZ)57Qn?26cl{NyPJ=X^yvJ-z>{J48dq%s zC~HpQ9B8P}5hbN2yyRy8VUnPtlW(>tEg>q3-jWIflrecAu`itr1E7MLfu)57u&&>W zV$Etc%w5%8;(83<$`mK9E^gnxeUKVB4Uy;SLX&We~Z@4ZRKSPxOyXNu za{%2v$$&|7o_DQmIJtQm?_o4;L4-6mYPDX3VfYL+ubqM)Lpg_gTmtY&Rv*AiqBV;HVj3;b;SH#BUB3DVD;}wJa|yapOmy|{t7@@MI3hwYCe>>4-;@< zN}T>Z@1nx}nwcNAj1fKl1h60 z@go@+bxuHvCOULjcdi3&MYXY;p4@x{!wg`#eH+_lm8tiS z$TSNH3Wi8LxStFo44zeAE05*zk-2Hx!L#q6P0&>|y<}*(_grO=5Peo|Zm6HMeNcYs zJ`oX?y=M=W1zoiAxI$;k9aQ z4xPY|qECwT;1$T6q~odkFEs|W`AMFyX6(zAVtaw}N?k^eQ`fRc>-KL(+R*c9#m`!E zu(Jclljg4>BX5iJzPC+{;{T<6K{G7OfO#$=VQ5sq_QI$Gy*$WJ4|xV=jJh<>@cAm| zhn=Y~)Gj+ar)%fz6glg)5r@CGGc)rW*xB39C%RljY3-`q@a`fb6Vn^K*i>;Zh|3oc zz0pJ4w@0bXv)F;@BFd}i(Mu{3JF^On2+0%8VYsEu-0G~Iyf0q8ij{UdJr)VDuF|13 zQO6g=v}+bdHvu4Vx$@AdpgLWCP?$}F4t8s(si$!kML5G0BU6#>x9)TC@Q|l=B0u#` z4>y1rV{Pm)m)(by-AuxUN6sB;2z8u>!Y_v~v5T?pNiYszIeL`33(Gfae*(z@Z9MTH z9rMLmCMVKV(oYKL6=_CBx6wZmO9VGI)gSKYU=2hih%nJH4pZvx?d2AJSLf*%}sC&tJL}ZL*(#L1gx*zyYwgTNPiveA#BImUcA9$*wANSNY2t^<3$e@Nq_Z3`)W3n--Moqe2^WR@bw%V2|~PQ&Ouzg3yY7x z32JyjzXgmPGsPhz#ax`6Zg>+$L-+-tLl!tdbdz0nkq3$OIc|>wHxzR|t3ibbujv*%i_IRL7?Qt+ zHI;_P8gjA`{~vNvD8m`nB$wL?7t~};fh%nb`VjA=se@k6aRWwl`k`C;-jt}h@R`4Q z^$MEl5*Q%PlOq*yFap>D4xLnNM!T)2w-+C(oLbQj0R^ppcV35V7(y;-Yiac^Y!v6x zOi^ch_g%Q$Tg?)|Xi(-_*8#`<^VTplGXr#l^SPwax!UMU`Qlp!2^MYwO9G*8c8||q|xWPiX;N`oA^<` z@O(|SxB0ZuDi|9J$;oc{($Nv5|L*MJYy+{Mo3$(5F~#z=tBX92!1e-_pGC?=$YuNj z40*bc0X|V79A?|UAFsyRB7Y(FCr_;;)2Dauj#h|+1LS9C--^mHtAA+N;3jPjA7)^1 zFexV(EsCJf(8SJN-=_$6MBJQE&i4a1=4Mmp8T#>yuAZKJ_jwnbIl#kXe;} zI;diPfq~(#K*v6|Hr+k<`c~n_O+*)t{E)LvuiIUk8XH{^SlR-mVJN3xVHJf6NTb#X z2?-T%@hwTPke7dk5|_($q0^n@dkH?+L*WJ zc7$BNc1;T;oTPM`DQ7NTyomN--4DD*! zm-j&!6~*=8(J^t$>cxweB75bEw>Re!_l>e0 zYWnA(FC2TEl_ljow9acdfjGDG?IO3OT|&ZwD9$3@fx3NUP_9pvg*gSLqTD=3k20kxjd>6Co!6f^}1+r zcBB9}+caS=XsRXR@#1p5 zU>-i$Z0A_4SPw(0d$k~&&rsP1Qyc||`L;9h%IeKCA2yRG7W=Qenh8HBEp>aad3<7$ z^ogMe$OGD9A+Ufd$4KO@h%OPy(m+7HKI)6q!kDmaM&Jm0wlM#Ha>!v z7ooNM89Th14SW(O`26f7<)GbU z$of1}2a1pPBj!==5S@>PX9hnh=p6n=#Ri|bw4?-# zgjX*bv&|qg_KXv?u0OF<_!1oD{%RD*1m|I2K|tHRKivt>vo~)_XG?{{5SFZNbGv8j zW#ZhfK=bhbBJ51Sdd}On|Fb7s#+C>fLRmst+9zYHLDHs@EK%7LsVvPXdt^yOL`92a zODb7MQfOzi5f#;lQlhB$^9?iieILhrJdWp`=XuS@|Nr}azt?qM=XGA^we+-aSGQzz zei5eO2g(wtLeh?m{(1N4j{USJ>{i`?Jj}ymsl6bA?N4W$%^?52XCguxwtLj%kxjP1kT_I3JX2;N666+AT|@EA=bfSu8C#bQlSefsNQBT6`rIJM z&At2A%rhDgdKZFY<`M$)OhQeV2TxqE}V`U`^vq!}r>|Jnl&&``n z@HZhBckfQ6Kf=>SSw8*I$Io0+zr5#X9#G}*j*%?n!2p2vDH4Ak-(@!@H4mRMj4)z%#RNU92|q&!lUv$XwDV9#Gx)Pk2Z!e_5)I!$<3uz zL3~IyGhc~}(4M@|nhvt-y*oKs7;=hsILqB=FZkoJ>5ZG(^-9jSH=v z>IP%fW_59)x7w%rt+P)5eDE1B+QwbD)6*P)LAZ+|?Q+eySF$}+)By#Q@?1Co{2Ah1 zK$DgziUq1FzYzJ%UnIT21!S7kUl}=)+>`IzF@PNMy2ca8mo=5S$Z2@<#({#HATg|a zU_Zz7AfeEwVa^rM!pe#NPwIFe97K(9hzhSHQ&dyzP7jS$@pFP*hZZ@<$#q_G z(xepu&-h)Bu_wMRxo)&_ru_Jo$8FrT7^H#$V~`1I{WsU}iz}f|Q9GK06DncoSFIW~ zPxxKFnGkLEA*TTy(3r+@d;WaC$v+g6(mjX6At7a4@ahTOtP_nW-^aS%pM~l0kbTe# zF0&rL=$9q=KK=aZQ~q_gIV;@~7+gTu9j>%;3KhZL-eb>T(_(V_B$O3%oy7ZmcNAukTbjOFD7-_!sI8q(>Bj?4ne0CYWaMAiD=GeF>C?c6Z;}_|YC342 zs9xhvTdyGN5_%@^CJ6e(G370)bSRM*xI8*)GWYtgO|DdfQnC`#GT+_5Xr$>_!!pR7 zhUM;GaBWzcW5#Lkz0MCtFG>n=o$6<3Mw19NytkLv-{{+zOvpLZSMH243|)aNuP{ z#WCB+-0PppOZKL{Eg%AmU;6!i>e}^`#~(ITKYKB;

48VQr6MFeG-zk|LwMWSc+o zhrcI1#P43GTc6+J#pV5F%Ga)3H`|q8U>;>|WZ&*(lT%lB^l*-j;g*Z*r`Gj%Q9SyP ziyHDfRR(nLB^@;nj|NH@2L2}9SOUHX=q6aY*bD9u@a_p;VW&419uEw}ghkm0S)sC7 z{}YTwapgvj9a~Y55PLeKY#4~07>?Pq@PEqGps3MX{lc@f@1rSqz$Xv#TREOd$M`f| zXKwlBV<#YJQeXM{`aZmOFqKR{(!YB`Lc+15N9|KDtz5a1+VAk=thKJLC)FJfxO|1i zFE&pPFTQ6n%GLM>6Q(~J4m-2J|0TpzwWV&*sLRya9acY2^zbhnNSsMC*mLq0ja05$ z=7hn6OGq!8%FnO6jkWlrrL?JP`i#|%ZjtBEkS5QMX3j?$o>-77`}$jin{pJ4$Bo+* z+|b3{{U~(qo{fk1R#rB~?_Nj)=Zs0X%kr)jFsDUDN$IsfvfPaB5i?4POqp{gol`DaH5=!qzk@Z!ZL$Kv8XWA|6Q zJN(j&M^=`Ky`!^Nk59CQtRJ;cW6rV_94*wTn#vC5QiZLd3_W#|bC#nyi)Vi8T`+d7 zY1{desxHpz6kd!X`NKuPPhvP3gF z_}}b^mJgaRx@P4jnt?>iXcQrw4t0y%Xq5qFmAT9o7$XL+_a$9$`>;eY zmQI?ng7DkOnSpX)zmXhjYm=PY%b4wnzT&!?TxhO?OF!P3FRHaPgfqeo{z z!FlvjZdLR0UI#_35s+UzVU4uY{%O;*&NTE@{+a2LpA7g7p$!O5Ej^BYF*85kMgbeMm*fk&#_qLFJ-duOJaqO#@$EV< zLJ|XW^W^-@Mp8}jnK)X8nHO{6JVFy63(JgizxoaNk5IsSAub2dQu#Q8NqS2}`|9Um zPdHtfHosKf_$cVPSG82XBFZ0IT-38KFg_uH#1MEm*6`w`OAP#NSLZBlZul4l`X^N$@-gg^$fIY> z+-TagY3shy%qwZU4bBD^&CoanoT?@f-3~}dq0Tm;nyx{Du`g-$%Rk$PPrP^)l0`bq1|eul?4 zDEEQD|Mgd|kF~V^0h))CP%Ak6b>UQL6gL5;)Gvuq1cXC7Kg!Z=im#a9Eyv;XuW;x2 zx#M>DcB1G%a@fJTE~82y&;92@dC${qqwdGN0M?g~eZRxImd7KToVQQ zVsvueQIYWdVakLn5-bKS42YkD9G?@F?vM;&W^NLMP0(FfJP!W9l=-~Sp^`x z%bPY`nmXRKx&@Dxa zbdd*55F$%!yHzhn7eP+TEWLnOMfhG)d_`yxXD`!z%=|>sA)3rbuHoR|0QbVorfBhk zCi!ieOp6EY0Tzjm0)NRzrEkT+2k@F=)`iYW$f`G}TZqar?<{#|e64Al_JTv^BE9kS z^aL=CA33s8yyH>x`(aPO{em85wuo6jmDlq?7$*+`Ro1^r|K)}lKLqv-liz0>x2fT3 zWMl=s8sJ|adb;%g6cj*&Vmt%VW4^%-UOW@+<8rpQp|{I7#uC=B7q?GXRdU?Xi7Dyo zHiFI2H3+o;ZATZqcm@~vexaKN(Wd3bXyU^~kw-HFgN?7uc+$P#+#geSG(oqcS8oIJ zHX!FRHe(#JaPT@ZcW4s&PBg+u=WD4O`DP=%F!u1#?VG^B=eFzHllTc3Ctc&kKnx5T zJlK2NHYTpCnX8xW-1+f~vuz9bFGM-zcpZezw+anNCr^kHxt${=j5V{Ulo;E#Zy%zp zOc%Y8Pe!TC%`==neVE1heExXHmmh*LD-plaS2-`bz?)N1>@emX$1C^#5GLWeD=J0> zcQpVR^wZmX&O?Y6&c?QKW+I&mK->)t>h{(z1OB_Tft$i*?%-)IT&!5jUaa^Bd7-JeR&&s}f{!*OsYD$SEn=&K! zmv%Fa%s~lpNGmk-<%<`q;UsE$Zft5y%uh8uW*AzYbvBI&udq!AzicVy9r7>^#0Jke z$Qz}7g@VmRJBC+DKil&DKp|%4f$El(7zFTnCHXNrOz$s`3x)}ts~Zn*8`LbiQ9n+QAnXoOkin7%**0{D5j|b{}|vB)3xrir+;3HXedI9 zHj(xH3g7)*2K4^oSa@yCqz`K-iz>>?0}2k+tCMcvLq0V$ ztU}F58iOL=w{zz(xHbZgxK$9AkIs8IQqVskBerVU5^dH7J`jR+2F)#~6G&(Lo3z7) zU5+w~s)t9%X1X0vXNu>z;O$Vt->66H$bSkTeAD{^x zcCFRa2mV}#Y0VpW1A`sB%4M#ryo>dkKJSZ-t>e=$N>f98;_7sVh-B!TUWMqE#l)<0 zcYWwT>NyPsWPQRuljGvGZCld)`+3OHCQd{Pm!6gu%^!h<$f-)lsPZ1)i6|Ni1w?wI zKp3p9E}W83C_N}A=Z%e(BcrAD`*wdRX;q=m6B%u z(J}Uv%mBRxO2jwQy2B))$+-!?18s$9^ye{S04CSzFcY!4|Lmh#=6?2K=u5<$C{*~= zNK30Yl%$r2bVJc%i2gccY5cWoGD?)Zp%2OeyNsG0ZVIJe^WlS_UZ|5k^DdYQ8^i1v zRV!7a7<$hLzCZQD>wHt9Hg&9M?eISYc7crLPQxNHNA$6#hCyIg;)F2mTvmmwD*{+0 zo&q-?bU;Ty3FKG&SVg&fSEqQ61S0S}x*Je=Uj{7-RejmZ zud>E}!cX(XeijoJaW`+?6dLyUZ}+va^T4Tmf=W`F!457Zssn>E)ZX^*zwVl|{d;x# zE&NIPC%X5?DZFZtqs=LUHuhV8FADlmTzvm8nDwKJFq8HVy@2uN&Hm^*UF+cnG1Ps4-%0<&{SNW&47WO|ym@I2u`G*cg7CQ0%Gs@E^R2el)()9b?q z46p+S8_2-F!Tpr3wE3iMulDc!W&Pu%q$vv5H#L|GNqj7AFUJ8l*R)7(gDAXO_VbEe zrY$5p&*Z%QJ;0FEvyJc!G@E!RCyyNY1jIwe0c^o~6!8jbEEs3|SAV`%+Uk}t{e0{1 zJk?@bQKF&oE%|Crg(f1f?`{Ewed569vp%pJ0uxe^g?hr$G;~yO}WN{x% zH3z5EdDa}M+LcHFN`eG8`!KyjcO9B5K z+qr3tc@2L{Fw;XLPVByO_bz=Ze(7!eAL2c9K)Nd+EHYz5<_e_b6~%9p`_jCD=7N$i zEYr8s=SQ!t*7CSFNp-n?rC1}f`Ez535d11bj25xs9pCggnLLO3#{eL+18qE3t|Ol#ayM`b96 zXF|5~dZqvU8Og0MU>eqmiXmwEAj$Uxy_XJ3knw<;2$$iWxA%`%@@pK@(z72kni!-4OR0kcC*_S&l~!gI=x@B8j>k$eE9Fju?15}~Eq?g(RU@b~@HfeTX)>72dDq#l?ff%%c$G3YotNqRLQphrFv%eD`fo$EHrfwRC+%cv~t0X_eoCMcZO86UBQ~Zmc+BlV=Fn)@+985PlZsweB z>3?o$FxtxT;=M@@2cHKJeu8;`($L*}Qe8gjyioJVwG;Y%d+`JMFtKp`x1(eD;-otd zwDj0jGDLN2OPjWB(Yw!*DSG{S$oSiBd{eWX)&Y9KtC1$*;@Q1?DIUT5`KUaj5gpfz zBhQWyV+s1x#bDKwv!(yexfL1{NXdILN?@~#s6@pZXu>?}nC7|&^?nSGUb-!9`z_R@ zQ{M^3GN;TVk^P8?srOA--p?|z7+&_zWZ0@rPK7hyeV>^w?oG|NZp|7v<4Z9yVB%9W zWcc`=N64I{-)0#>L83>jn6w%(a^!hXHR=|F9XxOvC)P|_=u>f>g62jq3><;lioJ5h zl&M##`;R%ae`mV(5YN|FJeeWWl|dcXfYwqQ^w7>c;^xpsS7<1sQEwQnU~+hr4_Nx> zfdi+PYfHWd4vbm;ms!(8soFX^WRnDVIMjCsFc~B)>t_1jG!oL}N94c%n z`N}fawaM4-KQ5aG)vY;PI?ggme^NA{&}|;871JZzx8H}i^_D9vEEJQ)ywavuKn#E` zf|sraSRlJV`!H*F>qSRQs|O6wml2&oKwL@M$@yXiCtZt!+};1b{zBl^)zw7?%|bJB z{?I;Mq)B5pytfOl;m=;8Hz=kJ!+W4X2TN0u8tf2bN_j3<)+X5&pF`}#qZudZ@%d+E-sYh40;EP=Y9{J%@t0Yl9#vlJ)R7Kh|r|3z;s;k z+I4KDL@hJZ{#6UR32ZFNVA~B*y(dK_v$-x%a$ho>-J*$2R$G1G+x=Q z*P?W17&ZV84_=1X1ezJ(q&8Gs{I<0ZK}fVOkn zkf9#J(v5nB$~3i19!&HUc*irAQR>^bFTul<8sYe6W2x;jgwudszrB%Z=t}EZm!QjF z86ZHEVRCm;Qk*!*RJw-k+Pt_d7Kb5{8AqWhlL6=?O7-rY(S-^4eLt^i=X>mu>c2qb z=%E7#7A{@dt49yN(kMFkWPdaia8UeW5}^84X6Beaclq7PRGCCVRj;8YA<1Z-fxY{E zDq4BU4P-P85ehwW-;s@YYGFxv@RCxCQ3qN#47}nc2MI^d zQcXj{Y@CX8(uaRT;!X?DFsiRk+>38K7xh|!tLOEh zJU<#y0*Q_s=aen{L^&76;v@ae&=YjWNo_Zi_zCpB`C$=Y+TfPbfPIX``c85l_#baF z_FG(g{bmvZo&`c`Q}N{Mn}?98!Xv!;#(R`hP-dcelJQ>Jn1mFaCY@Nk%;2ojulom1 zrgQYBtSlV&hcTnn)K(|^DrIhQLW0Nh`&gE>ec(jtXs~%C7lw}G^Qpi7$_)3TN6a5g zB+`ez;!~xG^X20_@;^RR9qAgNSDLP*Oy$Mo0_}eSY3CEb2um6qFFKujd2-uFcmbkChzHpyb@#-BtSZf#Ms@O+7hi{D0ik!JJ@zti#pe_6rw| zL1hm(=#D(ox+e&vgm#qJ71O`}sQJisYxxVWtSrbSQ-+$@r?zU5oPf6x(L!uI1NS2J zPC3@??WG@DfTtD_rj9&dxVcw8EkEsHaCroiM(IU4sC-h>+??2UXZQD4 zzg74mm7nC)Q18!=%$YKIa-iOp8wAHPTH;0w^8Rb6Zu{tX<0x$s(kC@Wc%MV3+Y*PJJ7@i#6_> zerf)Ny%lX=Xeq+OQZ@ON{j-P-n0>AT@6g_HE@^EylfuTD+eELV9S{llNK}~Z8I^V%v^TmaA|SS(+UlRN8B39W)EO>+$-tA1bhJKuV6gh%T%R@ z{~K(B6`6A-LCJdGdGzR!>DuZz#an?#KdjaIVFdXlfuas<6u#!zp+lx+1FJoj*xPHs zZXml?U-snTpl;?<*RTJ`fMGnLh&~)676EUsOT&GGgBPKbqejJNlLac64Qs`9YvDVe z3-97m!yj-bN`tZA)Vhn`2X*sn1}~_eTk(FrG{Mud5>%Kyn6#`*tNO}YxHihLqM}pA zL+LHp%$wIk)|&^3UmznTn9-@TLkPtmF^Ad1Eo-grWayjYCn7c!}W$XLu&eP{XyA(j+4RW&5cEQ7`eryED(W)5hWEBO>S?&3E=Z8 zz#oq=Y|URD?B4nWMp=sAXrjAW{2adwheM;gl<%UfMS*zk+;GSRO0vo&mfI(2YSNhU z*s$Rm@3EnQ#m0MMDx~~h939K}rj+U(8oQ(}%W2P)mj!PFXuupJR$%=$)yyt@J9`H2 zt+MGX^b-;N1K~MjlHjKx#tcIh@JDIi>wPOJs9ytX+OSyel=Zl*wDgSeP-YR?mHIO> zVE{XeHSOqW(Df)@tChamv_8M^)TDOa1`8VHf2=DQ;3emts5@fBjI&wOm*qqMnLnEp z17BNLXF;z#bPQF=gb9D;=DKqy;h8torlGk0gx55}xEY66{plHy`ryHA6+fKq40e#? z5WrZAH>mE?ymxmx$03(@Nd{8YO{s5az%o1KSf_PQsc4{AtsO_`Aaek;F;i|6WghF8 zHRb$rNwU1bPlA|zT0YEFMJ&o>CBd@V;ga#-AFr&F1vh5q*@Fr~Z4A2|=6uB8--1xB zq7X|{&95y6uUK4}eh5+VD`#hjBQ)NO1FbIQfhX|ftZ`4oc4bO1haoI~rzAO8c5FKd z)4zLXpV3fL8P(4NAAL@vL*s_9@r;hdFHP&`!}34}yb4tmJ9X>`DNnwIK7I1!$tB!B zZyf+J@*JbaEioV3oiNUYmyI3NazL+tKCxI*?(BRidp7dkN6E>2Vg|K{Pgsy-UwfN= z>h8_QNK^t39$fo+G&lV@2?;wC*HIZQQZEA@ahNlb$)J#+aAQh6H4y&{_ez*l*e$xr z?`gsjIrQdfD|CVNA1rO_S^mS6+Bhxr?AiNCNrpaoKvOzDW)mOgw{I7ne>?^oO3v-qtl8GqV*4}jbKmnx!MejnjoN|-FRmnw$Y%+&vnwZX z*Czpn^!OvNDeQ6K|JcuvyUO-w&z^}#%0PTr*tFcSh*D8O$tZ2+x*QJM{(#T1OkE+$ z92HOS(~4Dk#c$rw3yTm7{fhUq*lE^zFV4{p5D@*MW9uJd6zqg{Y~v#t{q;W%fB7-) zEwgI`I%F@LpAt2iYN$ZN*Yx@6r%du~!0{Sr{o`2N`)!8;j^Y2YgYts%hgOHnh_j+B zwuswGOw~)ocp87Xh-zO3Vobsh={~yZm?c)R^hn*W(tQ`hxWJbVO5E}5iHVQ=+&q$vfl4cPb&?#8bZono0aO0N-^Lt6QAnGaUi7j9 z&!nJjKTK@1-J0(&EX7QxeBfMs%Das;9=?42s;Vj5qD2eN=b4lXKmx9yMeZ}c6G9jr z_>nJXdV}8T1`HgykSv#Zl~_gE5bMqa$4e8b{o zpKWKaTu~o4&OA8d3q=cz`qI!~KuK@y4HpPf{FC;L>=$bCSvZxx zztu|Pq^$JYAKyK5^#gG^4$UeHNqQ?f!G?wreU(SFSJqfb5nfhYY-;3&kt0PvPoyTD za76JywZz)=H6sFr0Y&YJ*jQJ>DBJ^>oV7xYn`cZ)bi_4X zQ!{ttnifi4a`fG^onjPxly!A)WJbTjgNpJt(mZnN9{CK~RB@Q`+RdatymQZ*AmF?8YPa&3(#wAAb-MhOW zC{X$pwgSdBD|l*`%YmX(Svw2x%*pYRBggzjtbE>%%?o1HlCtud%*~7!WtDl!iA@re z08V@H3K<4{TPm0X#dQR0}6>5lC*+zLAE^_Y#7hf z)rk|0Y`Vr0D$si%y(pd!{TRToQCK z{ji^1cZM@evEX|?xus-u_eWIi8Nzi%D8eZ`n|1I0{nxKwTTFbIlM@3nF?n^BYJv~G z{TaM}9n5&R2MD%A$@9Pzd5PFNhG>%8 z_!~<@e%t;1sCynyNlBsL>(;B+5RH{W$5HZBgZ~^6lVPZdQ-0gFZLQ|cJ*p=xq3p)Z z(ysE#j-D}ny6ETLYsAc6Y{C)?xy}v|B?qjBtoP}#upNHCF1W}pt5AhXZaN#3R_ZNY zwu}X-b;!QA_uqUC63H1y_MiYpw{c9K8D*Y8@y=z*>oV8c8o*Fjp0kW9BWG23%*uj# zP)?Uloua%iNOog>#LI^xR8@`6W|^t4G{WDuZ|U2&QKl-Y3bBwJOCIBAB_$7q64KD9 zeK%OFig<>)zplclQWG- zPd6TUr?q5x2$c-oC!Zd=~qO-JBWJ5(h9)Gw07!ErQmk zf49F;)7g64Hy|KFM8Xvu{|tw7&LPp!tGJ*Phi4t|@D!wj<6_~w-XD+xEnIIKGubDZcjpUrTfE)!~h z!>N&%O+4tu!;V(2-UUs}y=~j3&C%iNQudf1m+p*M=t4!Sv6Amgn2YF(`OTA53}9R5L8_jDhwqcaaI7L$wqCDC?EgAxyuopXMM!f%2$ zcw>bryW415aT{+&XxC@YnY9UH3DU!v2c4uPzkKHfUi71D!3Y>aXYdU4IADf!nU*%H z8P9MJ8?$8Pr1D6x)E4w@R56GfotNUtxAF)gBGR$99KHT7UAi94-F>&Wja;BkzI_5NsOK2fR(e=riK&zwM5AC6cTmv3esY+-beg;F%x#4VtAm& z$}R+6R9!0DMNz~1;oE6zA8$x)6=rN>^Myr9A`QwcShPq?U*mUSTWJjDQ)kOYY3X`8 zo)ly3M`MPxDa(FU(_#uE!H$i%*J}8E=r3VOnBhQ2g5_E38kFGZ@ZKZFFb>F_NW~S} zu81;?v8{JsW<4O>L$@y5e4a8LB`ry!d0k$xA5iOjo%ChmdnMtL)L6Amf2N zc(;BV9ri?wWn$zyT`*o^c|lGxUM267R9w8+XUd$CG369KUTdLnX9D|55BK~?LM%wjEOX)0=<3>9c^19H(wub#CK=ai-@jK|J5yR`;pX;P>)rLs z_=qw#CiX80v~GC?5)rcrS;4Lo@ZK%h23!PUWoE~DYTmL_Ltn?mq!=cQT47u0>3h5+*%u{<#rcRq(KiVr^AbZwwNDV8%@&>@9gPG-d!jeMXInw$f}P9qII) zo=BHNb%@r`$Om=HUudD5O9gg=ASdG*Lx3HbIKl6_E@4wcd zmiv(^jVgjw{S3!?4I44SY-o32;gSixr0j9u<73dNX|rbS^YH=t(IDlUL^vM-o#Nm@ zorjkd#KqCIB7}DmxKRutdTe$l-jUH}Mz#_h5CYVVfIXU^>#Z`-vpv)bMRFR^@mOS7D4BK)NJKZ|3{TGhw#9sF{XLl zfxFsvYf<3|U5XM`K|Q^)-_w0k^iwg5N>_=3k0HZ_p}{cYeY7{MUc9)tIkR~Sl&`Ts zO`gwl#p*5jkGZBfWnneCd?YA<^1uHc&Z`fb_5Fr;*7GyZhK7=Nr~BlIlkz!dJ5&&2 z7fL6E*l%@9=`2i5rJw`wKEw(ZGkTnW#O;J<@#qDEf^I}ub{j2Vd)i|HJ1>W34soci zy83{KLjf4GQ_t{>hgc83ZfEp~H_LdvRTP0&93azSKN1yu ztiPXM!N12$=ga%MBq;GwvseB2Op48l%=>$JCP<&eY}v$z$TNlx9ZF|Sr2skT?S~ME zl9ca9P0i+mQ`|G=g zZbqXyS_no&!qd!!h(T-_ZW;l95Fa0(azIW}==|d#cmC%fb3CaOnzKsh4jrIfuzKU@ ze}jE5P6#O@$=qNEPiiAu_*fcGc{YKVKX1kXKR+bP^R!z0zU_{KIvXjCDu$K2#2Zc} zhUVYgvW2w0@J|`Y%-r2wd-Pbxa5C4GtDRjqR|=X*GcO5#D8_09Zb`i8(BK}urXh*B z|Ky1|-zZ)BvE$@$SaT&X-6Dj( zF0KCD;#T#%YpWLkQWboba0qG8U;k=+@7Q|~8!AEpy*l;&9?tB#LP)b~!p7!!_B9>S zzu$8!__q4bpNIJwf~Vx=9jLAvuSj-nVG4TlO<{yBmz7&7^aJ_&`Ud4V zk+we8HN6TLw?cs_D*{Nz-kC_RJ#>wl{kI(MP2wHV85wSK&G?jY{)gj|-GhIyH~mUs zePyo6q~kGUa#R)kGY`UylWpAo$5YpZo)iVFLM(C9|H*~VT1$l?ixyc0ss=P1FkV?o zWUvj8YYAYSy_C1q_O{S8j{|bTYlLr(C&af<*25O_03SX1Q9>Bw^ zrF7r2+IrXzu}h`x)?Z|bK!A*s2uHv`VyxRCfB$xE+I*$dHE3qcyuI5i)wq8pKZ{-G zlx?G(xt8!C`$r5-_dI%Cr>DHU=p5aA=j!4@BEMlVb? zXw&|g(M>^mc;^Q`p^Y-RenHcE(3?=FMdpCF9qGo4Jm%{CTRw2P14`mLqS2b>AcdNVn|q#&TKVb%ENrZ zkz^TJ-t{u+OYnb{xJO_7{dYd2#oyp3Alt(IC1(#Wlv=G>g&ao=sct}KD0imFT@R1R zLvv8)ihrnEC(D#IX&){4v&&~;wZ(U+`*Aw1_4>aG15bBQ8{syYI!$sD1tOG$!44Ra zetMgar_T0_e*-SSu_fCViNThu#A z!;Pm*NrFk#P?$mY2bj20&$Q}^Smh_C>^UAzJAORr!{8h$v1vYeq;QmSaVZNRrFrBB zo6Gg4hL+<9-IBsyl)cLsh9q|-zV3~q8y%?5N`H!_mBj~V1&maXdP4~=0yA^tR8&+j zC&-*VfBt+r#9%5`cwV8k6|lCmyxI$l6skUc9JkI=y0&ae@M!F#L`i8CIMbrcfoLk= zlu0;euBfzBS-{rzKN5{uRz)pyo6VFN4HE^Qq?=tX|7e*s!}}wHn?>f_D%MPX6r0BJ zv7Zu*0J%=>0s07F0&B2nzY`l5A&{*;E-eN;F?_+JBF&KwIxVHDct+0=;Jo0#a%pue zsF_|)aE=rI4IEi$aAs(me;6~-g2-mW49vcU>FG_}8vO45FS_Sh9{!sO zLC~2b{*dhu36KPJYH8hOX$yk~r9Zh)$}<8Mr|gi1PjPpTzcRBJUMccs@ruFUX;7Ny z4p&>K9YZJ09x~7u>ZHVL*FJrm#2qJY9X5>pUa(tzukAeBM$uN}Nt2_Kl9nu9Z2BMg zh6chv_$IptrU0N{`jb5jEa%Q;tJqpt&+FTleT3qo4y$z+E5WJG6crVhDGir895{Kh zzoMd${XK`HDO~aN@?T_-Xh8z(R2iRFoEiV|H?C-ilJz07hTa<-WM8+EIpl7nZpZgow@J%eI{!PB)|H&COBkR)p-`{NZ7D5jR zpHs#QqcTca06##6;y0K+CY1<478XYX1@eV-ma5oVN)PJolhmn??evYfWPkGo7^nTY z_~6y6b=TyLSFSw2Z{Mux(|0mhL{Tgv5XwLBvIR44Bm>NQr}s z;Qr*4VD>lU;STX{H%vucJv$}EP{7RI-|tFLKOO~lrJ0%d2x>Fi+{%s##dq(bO;teJ z1P=n%=d6~}Zw?H$AOm}g-Vif-;K>MA6ws9!q69@br}U?=k+r+slAg9&H`3dlJsl7J zmIH7jXRa^LelX~A3kPEkhlYZWd8E(Io%_vSMw3wyH8C4JWnaGn{CY&p8Q$W1N-Ez; zyEbqp6f85#Rt%?VeKrMCh6$L-od`mNRg#(<#RX40@0E90o;16I$JYW;58F?L8QfImXk04mSQT1(dr z&mSaK%vgyhX0&!+th<2rl5{BnK#s_VRtOp~c5+AdU4ps{D5s#HqOp<`X;*q&k}kLJ z*wF^-R3_G;;D9E&6h@~{4|FWjht%<G!Nsj=O%Z(j7q{vpjR zz;YtYOjo*fqJ$Ud@7pEe$*6Rf6%}Rj8)h=NrSS5K_(fy6nXmc$y}q=|p!b630vsxk5n|o%F3JH^azkk-)x0SxD{~Z&&}c0f2F{n> zpkd_z%Bn!*=VDAxrs~gbvo;btobR`=QwIJq53Hu(6Qh2U1cxSrSC4(xJfpLc@er}I z3j*mT6i(<@7#NU5?{6wx)Ct3%Nzyu_0?Y0?Y*!?Wy4D_ICk5m0W?Q}p-UN}A#aU$N zP6hF`^K?oWLW1;+Zrp&Hfy0}i*(Tbow#v$^|9}0jlf7+Kvu6Wf=``TV;RGp(r%Z=i z&KnRL#KmbiX8wssf93Exga0YA-O8v@qo`Y&i)?gkL~>DNU$CU$=9lwX+Oh2@A;owH z-_&SSqG{f16;)Mhdcr~>Sx$dA;%!!o3w=c%j@s-N`Lse0{6~injBj)yMy|Ni^mS_si%yS1K;$g>?(7K z-<7n207eE)OhgX>fdj~5-OM)v$HABxu8TlT7-F%V5M5wzit*jMjiOFe0fbT?KJ0FQ zOgMy|eT@4^spnuk@=kW9eUCEcAH51>7J3za@yg%4fxG-y$HDJIt@E4wD-_elLsd># z8XI4raRW}m1r1qaHE6N)3-3e@xha$a*O8%x9MNQ5|F>nR$uQTUlKwpVy&qR>-h2bP z0=c)7Q$&Wta_~jLRUx|eEaz+k@V+qD%Xcv|HXcCitxiSj`1mos9a|>1anYxh%DnDa z=i+HX))TxFNRFtRf(deCe!NR~$^!m#k@pp1`7UH{ja9#ic-HAD%(w91+jY_$Qik#n zAUn@vsk(eecJH3LLeX23tJPKNpuXUa7wE`h?&+up1)Hg)njQ=}`GpnjjPX98E#w?zP*e{s?F+jiGB5woHo*0}Z<@MCun zg*&l)NBy4ei7W(A<3o%X@s+C(0`Fpe5Ognq|G74EN(8-*Eb zQ57+6d_H#WTv(pzu1SBVpK8^A0Xn@vm`j$psd2Q$ZbyJPS}Nxu2KLZ=Jc0Qb7874p z*N*d-dW(Q@{uD3fo}uz+4b+G02ytEi8HQ6IHGo!$;W ziEQ^kzKdkM{RsRY1sRG6(Oq0v3cIvVe)K|`4I*j0jpOw><%wz1b8~-Q!JWVU>E&)9 z1C6HYGX~yh%te8K2H3A3uNOh^^_om0atP*A`^Wc@jOM^p0ewXO>gJbfqkD>0h#B?R zIEk?4XnA5HAclZV`y2HKQ!yzE=p<+MhyW0(sa;TxHAb2=i1sT^nZzhjQuF6X)NdD~r!g%b82oe#wpYKY3~@X=!0JD3KnRJXskWYVk3|pD9+bgepP9*@l6xw6iXY9JD(LabUbouhwqPaYcho#^G zu}_Q=xOgGjc)-!)W*ct;) z8>R=~!HWB`f|g;rTW>>nZmTQwcn9Zp=p*9iCPV|TJ?2Tb^hVEU_$cIp6;ZLo@liT` znUiNXX$EX=KpF$Eb`!-u0(uO)tkc9u4G)>@Rei?E0ouvqT_TI429~Wo$K*3-g)Ffk zZf%RONcyNJSR4yqSXKl8&~5m8EM>`-O=4BtCw91-9?R1|aN&vz&H@$^M4d!p-ZrPx1AY^)rZ1r$1YgbIs=zuVZ7kBf+ z<>nO#m(ygG{;Q~;>Oh=%Mll2#`^VO;C7oMJYQ2LqIe4f){WlT5u%KeS7PEIdoj94Sf@k3J<dw&KbrUZsvg0}@x$|@1g#$aknp%^CK)m;&xZw=)U*0#=lJi*;!5G8~%y zTBe)mAmt!YAVnzs%6)>D;j)!P%HE-$Gl}BA2_^Hpg1ZsR6H=5+SrW2c)Pl&Bi+x&4 z7ltZZi++Dl3j{#NwcVV7q;f)P4PE+W+pS_MQcO#Z`1%g;a(H^47Fz?6c(6DtbCGJ( za}>dP;D7<_^J>LAao*li3hE5&t4B^5#|WZ;y*;H{TkMxC(XC0JM2y3{1dixr{u&hu zXk$u#FF8So^Y;RekN{`55QQ;O+1l22rM-PEUxfuHbI~%F$BA9vJbnWi`VK6east_i z0Aks?nU>KoF*bhB^98h{1s^Vb0PE-3I$hXJOmx4+4-Tse@u8FV3C>l21{Q!& z3awkWPF4)*X9YA9#>7k{{w?`|%Sf8JvJ#K}R=X9EcJt5KM@JKXc;igEqnBt{ke=R(u_V=`Z4k3zshaB!)B(vBLhvVAnsc#Js6J z(F({-vY=u+-%jYh6vBx2 zSlMj7{i3*7NLr!So8R0w?Oo3yRA9`b)R(^GfC54eilh47&Jt{dr2V-~B-#Er6Y!nmohJC-(0z!sT6@rSsn!4F)?<7cOG5 z9*mM6BqVQOp>E%my;ee|^6Y|_JU@N-W1#a2HF+;NlJP0`4*Is@tsQ>JI_TOb)-dvg z+Atm#7Gw#Q-{lXJZvA9t(>ly}C}m|FDVokUome&{HvZy>iPa5TK-1k%FUC!ZaTmlk|`ONU8@NREwVs^GYeYpzS7Z_QI20D5KH zhqoM(R(9y@5q~6XJJE|T6Rm?f8uD5=%3u^^fe&)rq!yl&snRL9`xBReXArd|dl$8& zXW2nUCcUCoj3g!}uU@l8JlEakwzlU@Rp2QmCt7~#qyO~+U#8hAz$6^&#Nu|Ah|42# zZZL+xT+E7`2*2JfxphImSKxnw4&O^o{OyF(kq`lL20MP_rb<{Y(j8uQ;*A?WXHQUn zO0Aa*C~P@{Z?WcIG)U4P(VNabjV@KMBfMC&W?-vZnJWsff6kcNzeh8`Bs-esOO}5i zK!h=bOH}$(=;Qdx&6P>3it>(D3F}uNFhRh?e_@{YJMS~yS2(dn6rxC+{yeOu=ioQk zACV2b6# zX&R2N$QuX8qb{h;M5%|dB2QlYH4&x}Lr!ck;CT|PcN3@A3JO^rFq@8<(*How2!u32ixw!IK>RK{GOq`moJ zhsZ`CMl_%&+?F7~STY@m;kgYdL=cdS!Vp3dUA|zf$Z;=jqIGE1McenpiIpo>Y$EoM z?!}wr?DY|1V%|9A0;crFGDQ^?qDo3uH0;NRDrX;4h656uhsntf^hx<)mht}cH@g9< z4R!!*O)MlkN1T&JgSM3ZBp%|Atjyu@jQiT)EN*-^0kh!IBR+oMuL~;5Ww{Dn2f3Hw zXHT4V?{F*A(%M>&HjjZ!aK_n2uO``td;%*lQ3gh;xot} zDyWYSohl9(so|3anooL+J;cg4o!aa<1P9L)FzAqi$wCoOy3~ZxyT;H>$AAF=0V-ba z>*`kFBRh9s^U1Fl?$165r7a5wNyeeOB282aqj{Fz#4JOCK4V!KR2eUUt>OnaA|5ntTTm>&VNG77n zAr8CgDX2gW(oMOSAR&-(bFBXq&A(>xlsyNUDrO_09aM=nO9%^_o+DYt6bYIIwnGh$ z5aXM;ZVwibEHk4OpS!(QtaNW3q$zO53b@N(c^>a)k$#m|Tv*&nQrG7*MqL{tNW7T- zY)1^ToyednUz(2TNj8MK9@cyT`X{D#H4p=_D33VC#u{b*I(!W@075`3HTkg@3>3d> zJM5PT%A>nT`H%1210IZg@}#NSIFrl&Jwb-B9EXbre_~-yfC#VtG6lN-`;S6DRiyxb`>x&*d*I31`TDgU% z)`mBywukuaas>?8wWraE93qs`k) z_@&UGnRjA^coi~}>zLgs^XVu8Fhgch`G}UBP|@-K?HU`k3X%1M|zT?Pqs;drY7V%Xxr*O}TXS9@DjjQ^ZRh{P|4@VLzY6{W{&7F>k zwxc*WwNp`)GV^_zw#!GLdo*W$%#?_=-y9j)`wscLHce*;|LM9(wzel~3RA}?NyGE$ zRk9rOYbOb2zgps){R=%aa(A6HQsqVVM@YDG!_`-gLS zv8V7dv#~M^$-8(PC^dhqd#+pPEnB z?4jM^p9y2lC)-GIb#1wstx%vhqiex za!=uv;U9rP% zsJHdpRyy--9+NXhefXJUjKCD0^+}vkXr`B^mGfyz{4RGVH#eZmNon|TCaBh|xmY9K zT={kq8=`7QvD1zsGj7eRwr{P2ZVOvOAo__b@83c!F#9Lq{Ntg+ zhaVaz>^a0yFs~YS&p$~@PtOnEnXPWgUE8VkCO0Xyt_Wx)8K*IDqym7Q4EKL2Pr8BHv6;NhP^qv zkqHaGyTd9wd7x0j{F62x$BZcv;qOKnM!y5P!`!2(;c0P9%heW4EeZcWy1bYTu*T~% zuNEqa20XleCt2BDJ9c0^@u=W2GI`Xw9{dbmBkdT7`a*a@G6sDSq#MdjKYtq-a;Hop zJHIA>!XOGCAEGK{ITJ1M%42aQbOhsOuBXJN4vvx^)B?e=OuqAsxG|gQS zQC|!yM9~Qgs!JCpbP~u6n8532lGYZ`$>}8v2J+g>m*&#t;;!*q4Icw(JF!&j4xIfH zz9S?Ewpqvi-ly~@eR{Iz+6Eu1kh&xeWE74H$V)L~KOOZpabpNa5B|ch>_E$~a~zf) ze%_pTq*n7q{UPaV;I3BQ?}T9KU>Ioi!S@ZFFyR_q!^{JprNzBi-kos+VYjQZGyNiN zAWW$YY$z4l&&>o0B8|ZURLm132FjDcUa$gXv_t(CeM;$`Ls=QmdqV{YQ}2wHvrcW& zk|k%Zj20rJ%S(@q=}TF!*C}z67Wbs4#zpB*ICzFU_T}1k{U{V`m+W`Ty+pskiyrgZ z#|MIEykjuOVE8RNXCx_9S8=f?_X_J8+6!jd9WlN^pdl*@RdUfbx(VzCD89_gq&Ih* zRwHC>AV<+KG{UWva?-WX?HgOj;I8n6fkOef3Yp#X>GUinFIiGbc;O}0@l=`1dGck? zfq?7gQe_HJoj0$#0vq5?c$mIPKZUCzZ6~q%gpk?4X#doHy>a=a;`~-0 ze8Am-p0^NSXlSgXxUPHOZ~(+%xQe-8eq|fe3E*-vSr@Q_dhP z$BEfB-uJX{Q+m+*V^%#{BH%?55bE9AdYTCeLC23D=kqCaY}>qAjuz@%J&<9l%h~=e z;$u0QJ@xdI)|QNp&9m6#ubYq^kvD6>wyc86j~}BZrsc^EQdTAceBg^1>;UK#my{HI zFik^J#g$O4G_+&i7jo&>uX|y+jO7L>8odzL9{jqFudI9&E&m7RoA5A^QDOrGjIP+a zrdVB5vuDSSH)L62536>eS4R^N@ZaE1<`nPwsoM>6eZ@=g4g6L!HUmE`Y?O$Auo>+c zF8u3Nd3mlteDmU2;ySl6k|S%4tYw=B`5{mQ6NP>0!HL@$=Y= zKJWU!uG78QaI|FMo=|t)M$Jb$Yb)bo*Svllm-a!MIeC0Ll8%u=*-PQT6gZfR79A}) zFuK6$BmQ=S3FAGkenwOLp$^S5?5rOkPz6OBo0`^?=cw|>z-r?o#U`k$sarsy&A&XK zn~+qf*;zyBv}{?GYD=5DvYvW2GiJ;{xxhXudZVnU8qVI}%F4oXJ&~dlajVVC`@I-E za9~Tl<501lJ9kd^a1}rClRc}feEUAAt*s5AlOlQU-8Qma1YwG9#;H?9tNLydECfmh z?vB_#OB#ww)S5T10Gl3=9@58KDJc;#u2VU*DCK|l0?Tl-o-S$l`Zgy=Ebd!L*#M2l zSJ8tN*}uQ*^C(9Lha<<19Za{5;3DNcdsZ}N_dQU<$_I+;Cr#R#n5fe^(a+a+wTsK3 zQ?Xu0j{HSW5R4>ez%?9H@3ZO;9Sg~G$=rnXBQ$CbJ8`~o47p}wYU)ySr%k|mA@nG^ z+s?~($Btwem+5atTok5c_V!-J%)zpfvO~h7#bY=;f4qLcwb(xPZS1aHDQIU_3n2ye zd}TiHkj%a-D21FqUwCJ8<3u_jO18bfJ$`A!Y%?>eDrl75J9pv>Gj{y=*R_j|PrK_1D~B_nprA4E)c6K&tNZ6$?3r}g*k)EK?!)3fJ#>R(LSuiWm(=~nBTs7~6k zG@kBB5^!Nmt>^C0=;$pN*Gea>G{`NG;(oIzGkipTi{$G zJCdt>nIho$x?g|&l{v!QspLUg7^OF#%h1wQ)zj-QI{3F@_ENzx=r-~sLAuO`&f+y7 zysa&!`fo>+756V8B|=5v!GU9Q16V6BLF@a&PVc&N_wHp54qvFJ$(Ih5UTJ&_+|oIY zouB|gG~i*!qEXAF&3%?nTu3nkM9PgE8?=)e6;`aP0puW7Nprs+_j{I>mUQqqF=B8z zv7z~SpJ=Rklc(3(_E|fnkl^5~8#$~4&f3@2K!!cSOs(|y=Z-VyINw%d`cj0uV6@yrNk}&!{ks~Fq3H$`B^=yx zh6MpU#3kH9x;_($Ef|-3)QJgn@oRH89t)c)P(gC%(Ie|3uR+G;;VLWrlJe*gW{Dfw z_p1#F)?hzx`B#MBpvWQ8#ZAJQcL%mmfGns*Y_?!{_<;kNpo3#3bEserP1l{Kd)=dM?;&{WX>-F9|>CuTwgWYx<*IM_3)Prw)Z$d4YVsi~k; z13X3!I(_pWFCRO!GDW`f^9UV-lP)x8h@Mg@y9LofE1zjhj4gp5pL# zoizhMF8`0NH-XBz?b^66b3`(fA=5=gDr2S0QkhDl(jZfj1`R|-3T2KcQHm0gBt(hI z7&4@ZR7epGqCq6#`~A6}cYSMp>$}#wp7(j~o9p@y=ef^gAN$zHzJB8dUNd1wj*JMf zv$sbf8S(rM5QMbh_2i5WiPV~UJ{#J9`ew~vcYwnGKDUw*W_Q1f^!PO+X(mseJY&XE z6O+qu&IqzO@86BJJ=2V@%=9g7n!|?=N9IDq6udk7ry?k+ygW3PGA*A8<%1Aw(tLXS zxUH4dXrcrqUT*aOeEY3i(bXeI4j+CpF0Qi}X|QI*#tiO(Tv=FPj~^?msvbsybNKLJ zN*@SLr~$vgD&qE_MJstAh#B1`vB^23S$@;e=>Ur~5~3*kbne)ZSIEqa_-ogWqBvc! zfOsdIW@LohCJ&+uVC$6v9nEgCPTBn@t9u;zEiEJs9KQMaCILp|Z+Mu_+xFyx{`FoO+lXkJTO`rYi*6B?B-2B_7OW}`SKWe z;(hy`z(kFN{-A2vwd-z;+Ih9SaKx$@3mPt1@c7xYBf-HKmluH=SWa}Xy~KiTQRc;n zy*}RMtwH9C7RkuT?R`uD6r=|S~?p!2PI-K z`JYT}j;s}wvia~9eZlC&dKNK#!|OJEmuwpdCkCS?D=i%y z9&TxFE<9?OH#QpVDT<`Lo2l2Xkxfq-H*TDUhHP1+;1CBGU_cy6k(O`a^cT6g^%(WA z3bO9qv$L~9^S`k=Uw7WTHNaRN3ZJho%2v>=}}hl1)ub%X@!u-IuijsbZHSj6@O(=a&RNug}=Mz>uB94(=K|$47i6^l{{wg0X$m*RV_^ z#Q+o&YwNvBRa8}lQJ!SUSWgHYS70&Yvdbin3{rP{okhxGaencv-`#EvH7B1t_Xz0l?Ad|V6+lHtN9Ir% zl88C2Tv_mt@0Z3|_BCQZ68zac6lY6}4}Ss|f{qp4g@A5Bm^W>DOE?yOf@NuOIu#eU zWAi@pJ-KOJfyA1vvSQS^_cnfYDeY4@vKz+a#9S{tRhzRAOz`2GATlGu!^4jqGxc-m zTH~YZ8<@3%*2iYB?4m`B-e-Y2*kim^Y9Ydwhj3feG;^_)m7b1{(NfxNGz$-5Wwb`~ zaHmVC^$Ei2_Wj@@!3uO(yg$s}M@j=Y^6~O=E=>uVXt_oPj~|N;NxRil>KYgXC2RA>q+E3+ z)O`jUnVTzCo;oG}`=o39P*LkNL5lyWxX+)o({XWJNxuH>t`ZUwQc`*nlu{Sy>FqeM zzhM^AZyTF*cm|i`8{b%SwSV)skz#QSa~GVQSwR&`r;QtY<@k;ro!M&O)?g<|?J95@ z3-bH7Z+tPy)kpQn2nO8oC_jI;U9!Z$cN!{fKBOU_Y!(k}SZvz3xYMV@j~*3*Z|e#= z82D`7oX!9<{w+{(*_t);l8?qC)B=ycG;u&;%7dSNmHNg|sP1)1!Z)n@1+r4uv z=#v=e`dN+>)5+1XK2d$hF-pt8-d2d$5plPu?ALs!k&XD26 z#c5D#crn+Z{p9^5IS{ep0FoidvBEcu=JUVLL32>1eKEr-=tSAm)&MEJjU@FxhoWLq#WV zW>Z}T=#XC!9yhwX#~9wcv*HqFm@H=-W8+!2b`!#3x*;NYO=X<9T(j1*bBv+mkbY^~ zT|(k0`2xittYzlfKFQ9;qmK{$UH930WLww1vBBSiafHol{Un7m5l|);m-F<{G=g+x zizJL~e=$Mqv{u_H{`w@`EiUK=qx(91Sf+PxAytH*aiCwvcco;{9tr2pam9#z6xAbR zQ&Q+R=JNxIH{~sQScRgRL;f1<({I0e73mT%bf_-sxsZ?oLOE{{K}T6tvGC%p&PIJa zh6@|wS46=Lu$1IajSUTS5HU-szcz!AHxfJKAIAGK#Gr=7r@6`cSA;4;VeogcJffEj5>%5K-sUhHdtb&@@xhu0YJ-z@tlk;d@nQqrH8)aIx-%4 zBM*o+&~^`wS4l~sJqJA#)18j>PfcBZcL(y&NsLdyK8dx>3L<7m_2@w;s-e;gTC-FM zV;P?j;HUgQ6T=^?gS|Wsnb|QZN2}1;`2zf5)xvQ41+XrovL&_<6`K4VpKpb zAr!7?=pdgdD9YfF`5iT;Y~H=Q5~2z4!@zE?3(fz@<72iakoXaSAC}Jpk#FzO7z&I*B$aME>CY?J+dM`AWly3ZF z@#9WKhmdUj1q<$0(rn}L)XZS8r=!}FBUW3B+!n>2b#?xlI=*-%$&ZiE&uf?ySZ%6vNR4lfC2=aK7GKPn`h6qe66Ow-Dlx(#*I&Muh2>9 zHS=V2^fK0g*Rr*|Ikq|`mGKD)eD5Ywr@+8Y+i#CjQX(50^)YvxJ3UN1vv&(=+T9Tt zNH7A#;c-Y3Z-AG6w%rgd4?Bci&c=8ChB46<0N8v2zvF!4n!d$~!h<5>($C965{Ao} z5B}2rW&oGV)sYZAfAYkc(-5mU=e3d$uJPy3w|%QS#-w0aLJ$_lNkuQJMpsGDbOl;- zFV4%ml$YhgZ>jACmCl{}korLVwq+bH^n!R5khi)29*A9y&~eN}PayyG>yt-}ct@GT z>vR&3k-JIVoO?Xmjz5KF9<}s@{`^*jx15%Ty!P3XCs%N0lXQG&{&w!%=-IRHvb`+K%-Ef{V$^ci9J;Xa zbP@4(bR}SK!XqMp9U2Y&>G8tU2J=2DC_EC5@)i`&Zyyx$1riPPuwW$j2hf|_3!ZXN3Dw(i z*Sf##1jTu_8+Kj=xjQ6SE8OL)-o4 z%^UEHzT!k|l*mb2W_{#7d38_1han;tNvH47;9E6G**!g*c>zzOrhF$Uca}Lm)%)b{ z8CbY{aK2qsbi|BW>-1lrDe7?}xX0u<_|buuW{&7!h`!Ay8v0zd^p96>M2GB!<$|I=8xpw+RYq@=Lm@bTjd-`sF28Z+JJ=f_*~ zB7O`TIZ_DnDBdni(lA3Jper$N!GhGMy|!w^B_;;V=ncH;+p8CcncV8ZX!Q>%ZGgTb z!%Hve)u+$9)+LN{>3K*ji~_USq}^E$^Vn-!RFF;ipO6|jV1T>J*2#HUP|C^M4jkxd zxqKNXp^aR-n`+l=1zk?`c7oR40|)$o<{YZWn(wn{Qx>hK)Lk+^s-ue{zsNq~_#viJ zka7833dG6&w%{fYMZLA_mY(uUu%a^G^u3d){_#(Me1k$IT?;PTU#H!;5i<4$&^XsZ zI1#lS-lTimr0xg^2#yozA9d{=`s7_}zUl7Je_mKf*?`4XJo-Fh?oT<)F(NcJ$(@K8 zeh;PQZ`{aw4i(FzoM>)W%&W}q*xk0nslsG3V)+pxni}5CNtvv#7;^pA7tWt+8oD|D zXU|pv_Oz5KDIZhg=K5R+LQNJj8Dv!rN?p5lOwwQ?p2EnH>s%hNbc+5)YjeerG@5{I zVpv2(G8?(&wT3fiijo`+W?c`ECUuC=bSELvvW_AQLVmoR%)}-ezd2Xha?>+1=5@MI zJ_vB{H6aGMPSMIAZ9gVjZgi(e$h9Tj;e0JZsckdM)HIoLxYLyOMfZ!8c?pB(Z2FMN z8>M8Wc>IEn{jy~v(l$jk4-kZxoRuJcR_Jq@97^-P<7t*yf( z1RuVoY6Z3n7v^y5z|IHXuKz0rD9Fp#&mck04=Yyt@%v7z0wtpII|9FDcp~Sjl={t0 zgt00)`9B5{;+od!26KEZVsAKA48e!A{=`wHF2+`G+PVzdk^WW#MvLg`+7qWpXI#Gw z*}4HCzB+Rli8O9&(E*I8UWLi%LpPa`&`s1M-80{iEs%M>Ya~?9@NK4jdW^-Z=Sx+R*g|m|e$`>EB$*|T|1~)` z*2nV>{dVP4^>oU^lU zF8rD(nqf>4Twk%z5rW-Qr-LfQ-^jkFVq)6;d8ydZGh_Hx9c~Z@5*urT(sriHmzNV! z*GMJaH8C@TSW_%A0C2I6uSG^hv94aN%Bc7$nea}sXHSYvgSF?{*lY$_rB8ofxO|u1 zi6kTC`i-xnGxs*Fo;cER@P)ioP#Dp8mITXSEi~5V^<5?Ik#V3Mcv}criuB)2m~*0b$tzAT_n1?s0xU+TJ+{J{HPR*y ze&1_}7H=-Q?lv8qURM!)@tL`kp`-6=YlXH-mblC7>i{A*z}Zt5VbU_1F9-~D(@?V0 z>E&2w-2zf0nsJmLIdrIz<2J(jd)XU!2B6wCyX-^I{)Um72W!91@Ug4W3_EtL^pz~e zMn0@JR)cd{=4-wmor+yzPti-T<(Ls8I*6~(+?YA^34N`b;0Q-5EkO+M@LObP4SjV4 zcz`SmaBadHaX>JpCPn9G6I;$quU(qDcJS)Kf={UZ{hm8o^ge&q(bb)&Hu{Xf!dxnu7*H5s=jHwL zo2r4N+K87NXpqX+KGoJPDC@IzqK3wY?r)hZdSV7VrwwP(8!+fKbEoVWdS5Eq`)zNc z2(ahCfd`eepf)^8nMmxS7WuZ32ktQ8vVbvEi;mBh=ln|l@KjkzsazYk4lpner+75U z05IWQQZ^g%;MqGc653TnRC=55Nr59jS z$eeXgDtN8=`pMWZi|m?d689~kx3>E9`Szn}b?>I4G4Lk)womQey*q?R(VZ74lY!R_ ztM8VT{lSTV4e707i?JEzD?(OQ4!+ljz^|=*62($r|LlN7y<7j-AzJ* zCcd8Ptrshc5{M567ylqK=ihr|nk?e^dqg>x+~&wR{9zA`LIzvRm}ZU5bMNzEo0ogq zQ7e+~6bMY(v!`29gnakGV)@ad=a;E8HyQe_EHEy69{ND5aE$xwcS5e;)Wk6e`}<&y zU&rCFP}8PmWPBSV4ch5vW(w9pOa|E-G1dDrJl3U4uHkG-C9AJ}oV;J?64*R~F3L5;2-2pU$(S(kiA*Yb{+vok58>fD>?k7j z+tG9A{+;as3$Pxl1NnDgC;u2ZoX+ye=v0gfq`|T_=lq7-4U(^D3Auh zBh2XzKO-*|3tfM!?46%NvhCUxop~}b@%8!2^`sma_E@UO?pKAO7wXAHy9NvtSj79& z-1aw3LJZ{2qk;jmFft-podXRNQQBT)U`DOAi`;gt)Q)D1h>p={j@fFt@t=a#)z` z)0bN)tnjD%4IUgM&1h<1{$s??UW@3BzLnPfn$^W+?WTW}5M$tbd9;#t62pa*H`jfXvff`BHgur(lV6CVuU)&w4X=BFMoA*+^lqln%q?ao#mTXkTl(ox zV8puvj?Fh!*pZo6SATVZO!9U)`9@}unVDPXiNr!D@(Qndq55+1V(>&c2aX~L7xf0a zb>Q>2nxmxQeWp#CiVz|m7JHaZgJ}HhqzCqoMimoO @x zj0T~I@4cM^h}t>gQ3)}~eAD-}WT7cnUt6jOL)VM9`26a_u16$<@-g{* z%77w=?0)7uvsPSU8||E-3Z^^r=fWwyeeM^k@9wbLdh@Jr&F6>--?v-sTz7!gb*Eq@ z4b`vq8y=Dj#%mq<+cUTd4nWS+*ZQlA zY4Czswzfk)zCh_$e)u7;b}~klOpT^m`>y%oK+2TX=H@#dgXg~X=}ixBV61Dn{wil@ z70*M0GKbI2OS^XMf~PzAr+lT`)hky*TMbiU9>+;YO0HaRX_cd6$@!{MLiQ!qZZzYm zf!5c_po$EGC z_o^pJ-8pfmG1EjCF$(taZCT|M?Y&Dc>^U^&&?FVn`vI~8%HMbJT=NJTA(;SSj@N}j*2X7#nuMqu_4BWkM}^2qz3vL;Ed-Ffscomzwkt>`U3z_4Lvp}v z3`rf5A4DxKF!ba>4XrB}T7r%ftm!Y!sTeWA{t>Jes;FDauL2Dy$%hXX=C%K6h#i9E zm%NT(BjhlxZh$YMlxI$Tn)e4=luX4)ysZ+o)z7ND5tqAn+}Z`HUc~czbE*YgPMyHb zk|N|5=l!dO$P7g?LiwGi`mc+DI;dBVguGKS^4}xCXiyXK=Z$P_GY9u^H~tuKFfube zck?Qtfep3!mLH7@6a&-KBim+NF8BNeCHDf0++G%Mf#W@795r^Fa zTvgld)Eos`$qpE>Tbd^UF?}jYMM-Jiqx1`Bbv!-QizaCdZg!mQ64b9IeDw{JMds!( zApON^Y*ESvjd=rdJ3}kNrqaVS@YnjSWgU{7D*_@S1}cTvpL!{;pa6J<<{eG#h$9gm zrPkTY zhx_2oij;czStN~6lEU~PE2~G%S6J|b(<}B@I{#|fY})ywwTA-@4%g6D*!o{TdGZcr z&+P1ZWB^N-`av#pacYz5Q1PJ2xWAUT*vfodWB3 zhOue-m^tI-z;70_DgI^bQ{N!1#v`!+kWN}pW$W(a$C5oI%`Gfu(2&{lM>9n>7RQ}C z+uo0hKB-vXxo%zTnDx3zyT5P zf^i0;9Hc&I+{B4}l2Vv@6_@hG+Vq9odshSP0AVZ|MVO>S0UnK8-d-EK%oFJ^tL{2E zcB+xnQn$}Ndg_y^ii&(*YGFHp0p`=XVb`Zm&Jl8|5f7~2TlP$9SM#s*OZvp?Ry#ei zZJKQ)-<32+LlD*UEUGe8|MhnB(TsY^7!$Ws=IDq?uPbwW`g6hs#4j`x5xGGPB^9aYFGeojbk;wx&msGGi(NqN~DMl z8Im>YPwVZx-%+N3D720;)1WT7@Y$G&pb?7a@GE?o1UhQ2EhbGHwC zy>LU}q1wZuP5BM#t*aOQa;^ONM9t!-o=dk#Qb#Vdmo^Q79&T<9kMfK^ws9H#WSBC{%N_MCwxK~=-2Ris z4qFTwb0|M#M3EykC1veANtj>h?GnF*q~zxM>fYLO5$rH%H{%9$lq#5znP|IQJ8%-g z{+;c53`wS$g~b`RVtjn@7GGz>!gl$3rZq;&MiWQ5pM17*k5|uusrmjc3&IQp%L2-o z?m}^ila9zwh4kd~iuG=8^vVw{yW!BawZS{$wzqur>^?FwbUToP+(36kz5t!aDb^ss z(a5GdPo4<+E$8yr<20$sM6VRqVwfOTqKeZnJd2vuh&*TLS`uGv)didP{{9jgmtxsX zYs!?;?Ch>xc97~qb%!|RB*sM_$aML9@=fB-h*K8>r!Bv<7#>aYtNGlyRbcj;pBpoY zj#O9C!eI+}1a(DiqLQMLQX2I>eyr)7|HM%v0{VB?lk9T7bO(e@;fy{2G-$`LD=!Y! z%3K{u>4>Ip#|1u(39-LZyN#=EIq?o+RMYewKhSf?)R*^et#I_{H>mG|?N%{qmdho; z;rR{+Q^4Whs<=;J_4BXLv%?O|c+OE8L1_W4NjwSIQG}Vig!T}G4b?ig)&3 ztt~8e&XFutXmIH=Yw)nsQtv%<6Laq@vi0sU(DY{$oPbd5v>U6s@y~DH-X}2K_yr}o z)M<$hZ~XRlL{k2|K8{T21m`Y7(3?dW(uiwiP6an-+-;MMWzsrneTWI-dxYi<(yudis)FGu4Lt z9FE@~Lr$SeV`jLH1d3`8gU5zJA5_MU{pdQ@fK@#w`6_(+saI9=;%;s~fT!R5`FHG* zXfK`gRb$MUwLBn@0t_3H74lh!2RW#iD(>LWF)-@p%X4&Ed<{!w!M}4P0W$uhZ*_d- zRfQtu+us82(|@#PVdJh|BU&6nes{YSpLcXkjl$k9Lu;PaEqZY8fw7m5&+#c6rNnA9 z=c3=yTiv^lP=zemj9{xu*;KiH@Kr;D6oZ=&xLuriY}iiJ?kZgmL@z>OU6`G%#3ICD z4T@aa=jTqR_4;8Eoe(*Pg#v-xYt{w?1AE|pqpx$xSZ6h(z{lQ>0)AsFwPxyO@#?^t zdEsB;|92RQZn?e#=|M&5fuheI*(dF~UFDyYz#Pv{Q4 zrtNpmJ8E^I<4)}GQqc+k&(T5&lh3E#Xsa*pVErv<|2IE@_g#kQo-!;P44=AVa)*bc zHMl?&P$w#NxOT0T1=jHvF8aePEEW>Irnpyh)D~2rf@I)@1xME@mi-vCj0Jv9$ichz z&xI3ivU2?ghR&E1Ao)LEpmTn`hh5*m%;s|C9|X zN|Pqb3FO4zPfHzE=oM=1D6Fi_@N6R>1Rea8f+#{@=k3!UzF~WnPFiXF^Q2Vpp?izd z-)>&s-}}iunP1WF$oRZRDdGy_>;ERNvomFYAW8Nsy27<(Ic~bV{vJ88FJ|DocN!iU zueuP6mz_U$>E3^4ei;d5@J^wxYUWSJe4m2B#cZOvHYH>ZRK37K9G-yz0cbT}-MPWk=$H?7i{(f*qJCEl(s2bSkQuP#k}KpaM^D)+y8ZB8VP-9e67^Q+X=xN^@-R* zBci=MT-$qK?%A!Ubnuaew_uaWef`KA(d=uYuYW!;{8Z^aRtS9#)vwYfj`92XEiv!r z6jof@kvqyYN50e^2B;_rCwQiuimcGYNL2+Q|Es$7StSd9YL~6vl95$^OwaCorhaLo za`QQ%W)mW^$xOQ7j1U2cw@uoFA={h2sd&c7GUnHSE}bHudDsfw3WU_e(cX3n@MQSJ zd90&no^uSwXwFxdH2-b;=(q~`wEX!dj!HHef+_DGji;Fo5g4e;!}2vA zDqz%Hi#YSQGO7MFJ%_JS_Pi&w=BBsKZu;77<)y`lVZkz}8BQl2e0 zHMbjp1!CQM*9~Xd2ev)bWZ>L5d*tvKuC1#Dyt6hqr%or)Jhopl!94L~SBK9=fLV-( z01OW*!M&jDJhL${6+9G)9)mQ46<3b;D@wrW<!|K`)$$?K5zVb2+oa@O0$sE}AKsgU0F_HS1 zx|3|0)Ksbi7FQvdF0bm5{ryQ7N3&Kcz3g(Y=kiel#;>>s4cqc&go74in>hBdSla^K z2}&sJcwc0M@SW!J`V#cL^D;99saMoC?WfU*yk;j$M(piuzI3T!$kAE!Oj}wGI8wZ- z7knB{`?5lWr-NwCH~9a6BxYj=nJij#`b%$Ni3y+j$Hk~Ua=b44{n7)tt2TYpO?^oH}uI&zmhMpResNW|fw@P9|ZH)K*ozgySE#t>>& zNg}uI!qP*W%J#r?L3N%BWB#sV(MWp5snq%;3Aa*KW_k)Gowv7Y(KRGmChxEisv_3b z)-En}*bku--TGy%WN`F!ecAnD4q+W#QXMb(drz)+%xU=Idq?) zAJMEWq+*icBXfj~hO;3nipeE$urOV^^u&joE5X3_l{_@Qopk*#LZrhvVBCDT4zT3MM)qBCmSsZ;Eh(-#KLY9=F#laI(? z-6a_S`jKH?dy*FL0&R}L<#$6h-TYVeV{f&5yvsj%?I0|QzaRgl3l=Z#zvV5vT3b)5 zo#<`qa!1Gcm&2jw298v%xVq)-P}D?H{fN2s*Z11smlUw?D;4<@nNKNG+WxqM3>h_) zn=rXAa%=Lb8Pqb>e#IvxAvqo@zIW^7|Fi(wb9Hp=K#*U5mCMZ=X$&`}eAj8wqQeZM zRQ8NEhE#bkGw~PgBn(itq2Ub-Imt6Y)H7(6+YqtNkXas5InNmU+|l|31=X=*4_J&4 zuzGWijJ5;5OXzchHVEUG-j;rm_Ei>($8Y>>W3y0SAN-B1urNQr_uO}=fm;z7yK&C0 zCp<*PH-c*!%@uL1BshV5f`Ze#MtXf{Hqop_AQ&)Ra%@^r%+kzRByfmJG%rhcG4dFb zmGf6;y{~D`+TzQBs$mvLm(G8xhu_Rm3jv{_P<2(ycS3P2e|!1*^=d^A@1XWV8Q2gLU6j+nqHgP z+P>kTPzEL8-Q-6d0-)Ur7O5>ri@xjQ* zHk4W+kw@z08X*G?-nQ)ydK0%FBzUasw?5ZtcJ=xB%_Sh0!o|lY;j;^dv3J;~jVl|W ztFR@B^jxeGhIGg_+#ZPz;o0#@aTvD59kXY73AbYO1>~lvsHiciyxsQOn(M2ik9*Ar z>vBof#EL)Mf%5R-`KyTf?0XZ%a+_V3_w9!_F13Dm26e6fx}2HVH^s)TG_7#pWTR=} z?uttkaj;8`pH_YsX#}%Vwo0ROMBMwXdl{|7RJjIfpCz@Ue4k#@RZ42Zx^>E9oI>o9(NysdJq z*5t_*gSrBkoa6gSYeSkUuJa(uF)22?`P*B51YMH0ghBX(EI47MCI5$5wuo8s*RX1s z*(41p-eQcngAj11lD0fVTd0d+canTJJ?|xGOHTEe%ybBpMVhq;pAPP9L~8aOQC3wc zQe#0e&@3KExf*Ta!wyvxBjAzpzdp#zJLnWV)7Lix!SX`U6*P%ZN3Fl>5kz>NO1FP3 zo6~6%l{R>!>Y~YO7ZHGIK6$^dJ7xz{vcId07%_XutzPY?USSfH?SNds&ck(XDP1~B zpcjk*5x=vsK>bTQs?EBf48K1^W+(=SguH8&7w%Cyx(rfZY={2n>E%_(G=mo}*0-=Y z@@g@CK$z)vK2riG@}uG1PP(m_69PX)>DM|l2`$;78(~08dj1KWLKh1p^`b?{0P^0v z5o}}RwU3zE+gAcIrL_rSJe0NTg;jlP<4CA=r7^7#;vW2F)Y%n-3nmgoBI&>%@sauv8GOj7^pnvp04B zX!Vky)sq~*!o$Qw0QOLdOX&^$C&0xf-;%lfcgAo!0)L{OsU8MA3+`}L#T zASn0Qrl+KmPMu3EOpp~g>H)qRJUx*Ete6l^_3N&&CEFo#{CDu@$UfttxvHwG!$U$$ zsjI;^Q!kKFHb?U!5ZqgC@?KIdUAi5j$0EWtPS4Lf#;|^$G{wCsG<|VqNNbY-SQ5if zxDoQXWte18kv|xSun&7N0(~adI@_NbYP>&d<)?{UqhO@6Qy{aGZ?6kT@?uH=yvG zfTt_bK}|t6owFN`u%u+NqGBA2Br|h}q1R%1C$k-J4H}=*wYP-0PoEG9jPE{2VUA1P z9|X_i1>vZU9^I{bcgCOD(z>)a%f!@_G2R+aU;nTFUR19PF6y;VX%cg6fn8TOZUjy) zaxS^DyVXdr*BzI;6CG>kk@L?cB`syH0O5nInbu>_Ar*0~N| zkb6tSP)EnHL9%A58{DsYSM}czj?}eL)wLnoYTNhh!Gk%JfH}eHjy|PeTgm1myCDOD zWC9~m+U0%mKA&Z0&yrvc9Kp1?DsOvT(6(t;h_@7GRfY|t87LZgUwV2Ql3P|S`L5=| zyDwiJtv!^CXk#;AJZHh+Aw#BPM$KV_!gyl%6VgRoPT89Vm<~lp&onV<xEn4H=W-T5%&FB5wx3>~X1X0zCi*K&1>2o}R z83z@Jv|gP1Py<)2nKaUZc=4cNwizTEEXlAn{o>hQD1k}*8>&y@!P|9Vl#`tuxz$G7 zO?1x0<$M?BOi;(w4EeMIeuN`O@rUMaPATLJq#V+7hEYSQreP}m+gutJ=@adXT-8%~ z`|ZUUK3CJyw30-(>Did9rR82%$IIbZTjT2b=Tq^-6d|u9C>B~A)sF)wBpsH(^R$`+ z?Vs8+#lU&>SZ)BULKwCwHO8z1qMlf19j8XyZ_Sl607yXV!JASn1xhU!dR)J8gG~`p ztEd6VM38>J`Sr7BNqJW_G{^Vv-nMmh$gyKL5Y_eezropcp}||IIHg&wsj990@+vLl z{MkEu!OA~2QQ`a6++0@}-=`=-b%w>$;b#r|Qvdd5XGFZLpjHO+L`2s(?fUe(tuZhi zzXW<$JB@t*jz@kOMaQ~z>r#gPL2Qfvb@_9ZG>J+5uq$zMdpr5`B7J>kd<%o%Sr*Q_ zozW}q`KaPiC(LgEtMnYCc}r_$Z?WK}1+YAHZ)aj&f_dwm#`^kOmr|TD9e@V^+T5%) ziTuA9kUHnc*FDiSqA@-U(`;4Mr_DFz+V*gaX|8NQa|N|4_8 z?jj8?`$CpdYPcx~*^W#XGR%W|97&i?wU#$Pm#>@dEsyo&mYPDP3$x?QAW%R%xacWA zjg@`XS9`8NWSRaZw-?Ys0Ks$r&>1-i@q}0;fst`D(kWpw5cd<0JN^Cxa5+Azw+76b zl}_vW`AWIoz4J3RZA7TespIhiAu`=tAn}R5e!}NBY`(V-$NX6MO6f?EC9+@6K zE%oAsAF+PeQ2RpF_Bs1zZrmYK81R96=g4J1D{hA(0DhC>N-^`4Cs?rvBQ}o#<}gZm zP*S2ZomW~b%?M}E7J>*c{I{&QkgGskTW@tPHE`z+>DJcf1opt0Gb4*`$#wr@NSd)r z@#QbRo25WzQNCz7-d)t$`#fBksaPZNw>hD%;=AQAVpsn;$p$B;XH>^H8?0WnN&||T z9t2%-ID2~{CdFL*?l#iz%(|ydAB5SvFjE*Mk4*zdCHmSod+I;JZEYG(=;Fr%Mo~fG`5D8ZB(@|1kspGO0N5F$ zU4okhrU_?O(N{QV{0fYO@|b`Tu{n1xmPhniJ$zWj$qI7-+UwqVHa-8$R)(R>eg52K z#g<$_RmwmM{s^oZn^dOupZU?{-%?>cbc&(~^GHW)%zclYtzZ&%Seaq?sZSv!Z^tlp3F zO0TTpR*hLbU?J#o@4z=H(`NaeyvWrsc3F(ID6x^_5C9E7+44-7Ezg7fdTEDqOQtR4 zX5!*YE6@YtTEO=J*!Y7gvI#(Y&H%{eqqrz~b(k7Zn`r z-@%as(R;-wSO~VD(0q{)xQbsz^<0LE+7>YjuF8|1}$x_)6iTRK!Egx;P>d?_?R+xcdA0PYFwI;;eG_3w{MfiXr| zpe?{Q#udv6wdoa2{`*#A;aerq;J5TayY-Ut4}9>@Tq8f${L~e^zi9ZRZJT~Ls@e7 z%05wb_u48kkQc*Rriqwi1u>HWH7Q-)KpKR2Uc7T$i}S)}Z~Duw@E&@oOS&g-F zkj8Ln64-BzjU_xaR1>^CI_QjnTC_xv4N{KepGu(+SIjb$Yk@353GT=Basm?tkN|U1 z@~`h<+f44n-cEn>vT?Ssg^0BvyJpU*K~ZSt4`BDiQnj=!<~9U*1YB3pZ^Ir#rm~SV zRa8Eqnh<~*`fCUZT8DWr4}#=U83)hd^3aTeT(rv-7yKVOy+Ik#qw$u5(# z{#+V2vR{Yj4%=#FM54A!5eHj)t=|ll=QXVrqxhLss{g*@|Nc`tnv z|BL}TWxve{(r)cm?fAb6)_;Gg9UdTB&>z~c3soE%f2*4R{$2F6-MP+!XIt_X(N=uY z`M7+s75?4ZKSF`%%@Y3Wuw`y8|CPzx!l|-DK}%~BZb)^g#lsA&lfJ-RR^AF^yy$PW z^=YoG7q=LW5QE9ou_2{z&=9O6!I7S_an6=@ydsGhcvA_d4x;sLRAD7z1mCNTbA&(3 zC%TTE4jCjXMu*MBxLm>cpzZg=Yd##6Kfj^pt|G}S*JhP*K=T($`Y-va>K(f4gv#H8 z+5f|xo2g>$1SJ;K4ne8B^OBipW)}=d_x;}Z{J*o?f5tsFSi{sN#9-)Mk_7oj%ddl% zXs;?cpKNgC&Ylf1hnjB2(wzsdrp<&dGFrU5QLTpmm9G zB{O3$T#$NZq+s#Cx7JbzCtOYvCLzXm16{2^$iF=>^S*7pjzj$>0m4%9;j@3gl(+Gm zIbCjIO;Ehww}Zdy`w5-4-{;w#=@nBhlmjmOD+cVf54AkDnC#Pg1cM-pxG<%D4LVWzM8Q(rn8MDX9{ZC7=c2YOldwY6y7#()Ceaf;USG9Yq zk2l_)*I#{6(D<-dP6liD-CL^`X5-Ok!O0unwsrdCbi2KyPSBpSqeh=Cto;7q)#IX} z;?6Rfx3;|TdkT^9Q!~?6dEc|+2alaHt8Z#D)u1!&$0OyWhdnxo{35~{WlJA8K?&5#?tku;%HxMX~PuPN*uG#MEeQ@a&MiqTfG`9P4nX8oUTaKUQHFL z%tsCzD2)UQI(z>o6;bd_g7-*yd5O>cJNWCK6#k--8K_u3z$o5~-0X64XnO?h$*HOR zw2|h`_C7a76grc?(@*=X*=UBcP7!`e)HjXMPvumFw0Gh0LEL@F9D&QYy6P7jnZNDq z-+OM_N|oa+4;jTG^>-ADZUxASGhJH(V|zNrirwe8qfwrztm1pDW6R&NGqcyz0Y~Gu zUB^1wYpL^CsKVL_lrY+zS2a zMjHl+WcmNO%zl-)L{j|ZiGH!WXqYe`lVX^Jn3BlHFJH#u?;d^a?ZAu&w zQ7ge%0;xDz>ADLrkfaL z1E_Zd|7>Razl&tlEDAmqm6ZwsFCoAnEbRn8e_d4 znXY1{jU~@NJUW{@@rXAYUAZ?GQiAreP>lP3Kf^EgU`WV_#Yv0;J39XdxPez7h76hR zkULDQvuxS0PBtP19n2hHHv*-Bjze+6{crIFAW$j*ii-n~yHN$e1;T!0`(=5gZ(zK2 zbtUTJdmO*K2r+R?JRHi4QPNcLR|*4r-w9VYvlsLZe$0lUb>!5>8J&hjG4d5Z)6hER9o=)*c>J`3 zXjQT|b@E3ZJHfqiK9VBYy?-aR&zB9-n}~CP{U*K+Y%c5;jP}98i%5#C;3J-DzTl-m+NCYQcxPeVu}%Ed zMpya?1iOE4p+$kCsGoLsIt|c(a~BtF`Y7rjW$K~yow71N^cRlmWh;b`Y_tliI*8wU z4<1O0dC4y@0WKEiUoiUh7h#j~pXQV)0PgG9GV|;%8u8NOQKB#o$|~^<>gHw3qCe?~ zL~D-!@$2CoEgaN)a%k^iXO0XIMvwAe*mcWdSaFDh#Q_&nQoz6CnS#Z~zLd-(w8n*A zQU+6uGUk={la*Br5R`Y!4nZa$yvGHou(fCmhaVT;W5qLt#6NJ;alrw&iRuq7NIw|w z!3cF3T@}7`8Sxq$pWfPd+4BT0Qe+jbT!p|=P%ZcY^wuUxs}m-XFx!#O9wQHrMn?Fq zhbFyhB@7ru&-|DHLVr%651r&#Sj7RLy^B;D+l)E9E!ow)O&Bp5VRS{5ru!Xq?)+na z{b$L}&rk#*tHEOMGo+%7&oF*qW&^Ikkh_Z|4n{Vki7i9F5pTq?^N=?hbPL|(_g zxu8}IJ>vrJTC^&|d9Hi$@*6O4AUQi>3EO_~eN3C| z--aA}f#$*EWK_V#~(bjRh{|DFaerSxxIm#)D0{DOI61lLl31S9My8pR~Vg!MA7cR_@( zM@-`SR7!2YDJg?acZGl!hNy*(oW#g8cCdG*U=qjjRSGalc+P$h8tY;h#3>q84VAs+ z&##TxXB5wRlnHr_Q|IVsey*h{btGU*Y;5dywl~sXtEB(s*QP ztZr%zjc3(3RDS%K)*mAs66!$dRmnew{%P5ne~-<2Q5sTwkh)_b+A36ocB_?s67-Un z-|Y-*FTN-Ie5xxcje+n)X9=~0{L_0)G8lx%(&_QvhC5#D%w&oKhyM#@Mvd`i=SU^1 zn|;LhAWm_k%W!-?TKEP7nmMYNabuDpq}l8Mww=gh+MgL+M;~@XbVQciPn-CF8HHhl z+U_pxc#;d;ko!XX0VUPYLNL%5;`4H!!@kV+!$^>!Stq51X;+h@)>0(k{f;u_j6y1c z8El|1mYKNqg_-51_o2^ifVJ21|1JSzOFW0O?U*69_EifnTC z`l_DQS!p}XUz_BSneEqLJgD_`6*J@LCLB$ox$=~LqUer!%&Ys6W(as;;9|bx)xSZP z{j!|ENS6O`)-AG!?xnP9G1n(DG*n(V>RvJX(UeIs#W5<|cJ7?R+v(9+u=X)Eu^nB$ z_kUUdlPzrx59nCNL{a~=N%sEE(S>_=?PBT_z`LKe*;ylD02DJCy<3&IRZP1U<3UFT zK~~F1VG~RoF-u*M{TI{)zT+r^>7=@=z3YVE?=(7!*!44K#Xon5Y3kY~jJqhPMlK77_hjNjVZVgB4P{Ue z6|O*OF!AHMBchqrmr;x3n+10xAx3qAFZ}?lx6kzu9XG@&gSauBv50h7Nr-8PUj^F7 zCAc_EBvLRgzv3DwjX>x3*8;<_ORk?vhkE|Sk&X1Eo4p4=mqrN+GyYTpA*#bsn<)K^ z$DW9aLfUN=Qw&XE1b#CRKxxJ!x^8HKcZaA_(dzm(oTWu<_)g+~bK{remxb+2=qJTh zeOS%DzAMa%#I}H)u(=CtkQ#j;S$}xbF}Wv&({SeU7V4w5h~f_(T+S##S|MZgdI)E% zmLQJ!YloKb=rN@j0$>Unb`R5bqde(8Qs)v;b647f|BKFB7BHALjWkTbJmxC*3XOYw zhTm@-@|`t^nWGQaa06o=?wQ3;Dufj~3wF~Wo^`IJhj}UmhBKJz8~bXR0OS~SCs+Vg z(o_EtG&Y`gc#^vKG*M`K!VO=8(@_P*-wU7O&EieGyjp}&j)RiP#*vZF(bWy74uaVw ze|yT4yLV?>_L)Q}31rmiQ;U!l&&(sj{bW=?fHYX>_`kEGUi&luuzcrG%6lf>@#YB} ztPI8IJ5W;8ZTR%*TM#5N)Xa2kWi`Ww1u(t{C1Y_O_hPX;#Z?AAsP*HFKi)(amfw5t zZbFuaT(!Lda)vkhgK8tPio?_Jmt>;YLI#cF zy7(p&P&uGc|Vu#R6!NronM+vy6WeGE?Su=oU*Ax;Gws&bbD@O8hxy~sACer>j-3;I`1SzQDiQ_0ZZa^7EaEjr*xwb&abA#@AiRN#^XaJpR|DgMn z0Vz5uZ1fB4CWD4{29u_yj}nPG8Ot+~0+zG4oZL_SGiId3#LZ{xAHhu^UXq;tCNHnS za)AbcWDYTAVpRTF;}K9F+YWrRlG!}9=-k_PY37}VKD#=)#p}YN+Yib4f(|C04-@BKF5R@2@fn?H;Mm+ibY+K&dm6S_*lSpN)14aXD=6INdh9Ten}(9`nDBSq1LO0()p z*>RAV@52}V0Qzw3{n0-Iwbaa-?KHHEKM$&#USQ2g@ktyJJZu1$GO?<@!6xDV@4@za zb`=FB1k`et7*A#iGgwwn?$?`o+AqpeOF0wqIfcuZf078z)P!LwD*COlR`7FhFY#*b z)V;r2{Fylgi#rBDtf?RbrEuL@k;0(BnZ@gP*q9|?TW*nPPD@vU;`Xl+ z`{VuH&%{v|m)GcU`9F+%$>|{iM5p(eZ{SP(4>@4FLbLhDpPx_fCon-)(^f)>H(}}u zsx|MPnN99u^eq(C51n?1zF70F#P_a0oEa2@N`Mcgu=COkVVbq3A_9}EY`g&Dx-Y@K zMozXDh5X^!h(gigzD#GzbFlTP7F60C(HvT~kB%+Gwf&Bsrj z&k2S^nn?(jI$%W7jmk7_9BGb3NzKedm}4?zAevHh_8fg9*$cmZKW7%-n~K}OfujQ+ zqA+D%hf4*3Qe)#wJZ<&~SKL?I;i?t&uUCp#4!Cwzk`)fOQP~{Au~X}S*VO_}1(_S^ z1N@=!39n1V2-!-Ar9zzj;6bL_?G-RL&N|)ALF(2Z^6@#se&-7l721ZaRsnF z8%&7rv3%{pe_W~P{n+v28&E3F?=uPQJ-g!n6|$%uZDv$5iz$8p<~36Gt92T|ylE3N{@6pb28CV?%L$FXbF+b11+ z`eXms#zvEfYm=icfEv(8WDSB`7tQQ?1uR(^E=Q!H4Uu-_ARz%3^``Y0Z<@PU40YeZ zKB?3~X_Qb@%K?E|=X1xap#dU)ldXSg0amUFR?v8zger0{ZW4M&zl_5ajD4YG@Cv54 zh`n@lop7Fx>Pi`k7$GP_B*ko@=%QLk50NNXWy+MF$PV;kF`H;61bOc_2X;_8^U27> z6NhDM(!0HA{)E3|4v4E0jOAV1!vdVTYte!}|TDkC-~RfWY_n{`ghg)*t@&JfByu=PXUWbV=#o z^XwLwZRhwVJ+0Vz40!~pAr(VA;SG+mXZx&F8%QR>r8k)+Vje>dmsBK7!n2GwQLyaV zPL%a`4mDRDRtu(KX<9^F1BeU5u`};d)(7H-3K#{`Puqt3!ZxUnWQ&7NJgaiW;(H(< zoY({&g)Q}sjD|TwSn}WcKv#s1r8JWx3^03sI+#?Pzb1RZl1>ARp!h7eSs=oa4Faz2kE^- zZ-Gv&X%Ke);ItRD%|u4}MGx^jjS3wErwhJlGnGr8^TNNlBj!?C1btVY`aJ63ZFi$ zV^lLT+RvbH(4epphim^^HKMi*+9s%7j52s7G@>aeR8GM%I{Vo(!+$5$imW~Vom3aP zLK-0PlE{N%DBJSW0NRaJH)i^D+xYU&#mOTlPHf7a%?LsAX%;EP;&{l4^Zb~$&VN5P zHJQsjz1wL*I728S1WY;(OP$lZ8R48uC7q1y-vV8Hp7PuaYBlInN=oQ3DqsY~LE)L+ zGU1>UOrSe&=P2tTxQwl%jAW5i)P+ySj#LGdSTtkz{}*Sx#AM(U0_XQBvYQY^L@!%z zmH2|>?f>EH%)@d_!~XvaW;E6!OZFN=C0j+P)FdQZk}M%hNm-)MB9&1l>x8mPB1wp{ zYqKXyilmTGl4wzBLH$10^YobB_xK%ee>jGDx}W>L?&~_2@A*Byr-)NV0C*H@2}3@* z@33L+;Qb~6xGUHZbHuX&yd65Uj#eg+3c!ByjghHoa9AM)&5X`;`<$IY@WT;Ia+QMo z{6dSVXnR=R#Aia6tS3wO()~S?t9Uf&Gg?qfkv(A0d^B#vWGKZjJP_s~XgI%qOdori zwph1wD-Q6W0}o{;rbP?Oj<-%*hHn;2=^%e^%E72#K-A~Lly_1_B%E%MGcVNP9|tIj3AN0<5DL*jFZCjW-ZoTo)ZUeuJYa}rwdnE z0nWQ;{M?8q_skU}>Ix0^GgKtV2Ms(d9pCmCvFZaZuBMIas#OI2M`+*o6ofOz^D3Lbza{w^C0)_H67GeVk+(;l4JeT6dYG14s_=<@^MsJsdT`VG+q`oi771Z(#)L-T_3yK8(v)}|A zn-we0&W(1?JaY&WglW<0)?mmi!t7~hfhak2rLYY{50rOD@jG012`r2rbw}@-~@9_iHR#c*MVOX^&IQn zOmbg-)Pwp6R{o@eygY(|ALS?8w)p@1_uEU+-zIMF_&+v^asm|T4cGZLLdiqH!IgQc zSO?||K|Cb2XU|NtHj$j|))2fHYrtm1&B05W&}S;`tqC{jT?y*59C$u4F)==V-QWV@ z8a6)#nR)Z>T{-GOvdNqDS0w{=V-Z(jRp52PMc}X?lGiJB3ics>ZcjFiqGG6JB+uuo zE%!$1e-NT$+xNV~jX)hEtAGzmOL zr=uSiH2%E`7?~Ytdv(<~g)(rKcxJUqVSM8lN=e+-ZLAB^ukqcJdss$g5Kj{@;Q$!{ z%Nta0+HfW$TgoYIfHL))_>y}1oCGgIT>90?2+iA1k9YzLCE9(VSWur5MwT2Nx@^B< z7)}&E4}p->o&_nhx8cSA6Vt1hulXXJt)4GkgXZAz5C`Rlm_i2SR2hM1 z!%y!oja!U+z>TRYFQ>~F$qOcc=3&mXgpZY&tHowP4_+zVpcDH{4LM5k)s{YKj2K28 z34pZQbP3(W=r9SqB^cnUk|vVd8ycKaQvJ{1kVebRV*9+`H0Ed|Lu#ebD8moV%{o1x z?mS<0b_GEws0c+HKskf2Sh@vqUM*-VbV%5m#!WUNY40A5irs^iGYXOURoz<2Vg&8$JmWBW3Kg> zXdDGBb-xpHu6?Cf9Njyq88UVRgNj>tw+#z1sIveK3LuL|&y8-|rp?!?s)0e_%^xg^ z{pX*%tgD|Qa^e7nqgJda-7>kQBt~v_)ldANL4lJZx2nDhPhVA5#(3n7iZ>SPQZJe# zd*{}aPK3AYvW*-8@rC=Y=S}=uKwL~Ks^m1&FyvM2mX+w^V{B|(JU-YQMCSzL>+I*x z4XO@GdMPy+NZ*1Ta{LRGUD^izH~#h01K6jsPoZV2`zf?;{Ud*mt+W7atiqUEXAyI~ zV;RtG4>5rL&`D8I5$n;e-P^*#no~MHU$Y$G6(+}u0jKpfsXp`%*sAMFl8qbUqfkYp zDE3b16TayhurvWF{w^cq0^lNf3n(L044GrFPa<_Drr(GBvXsmHty_k$D!GLaZ-5Bx zbh}Xb<-d1|7vj8l@d?}l-2uPs=}L@>!TM3|4bsl5BpJM_XwuYpu-wd8B+K$Zj3?wc z5WcLXkKB$y;FN96rzdudIjZsggm@{d`6RqmZwS|_w}7&l>DyH1EOz6op&4OIfpd5b z{ZrGr3~^e;(B)y$X|-gNK11%ob_cBskB9@cPX_A%^KRK!SE6j#a7-nm`w@>qS}|{5 zMbU?P87Ui`WCVV}eU?LC(S+*oU^qdJg(hW~el0hk?wn+&O?Gxq*mTMGmG2*@kh~A- z{I%ru;fU4mp+P0ATN^)DOt&z4{o?!k1I{C<2SrS#N#Y?5BKVaKE{i4VT^js~Wb-s4 zI%$s6XO{-)(d+q;@-av=yJbFlbViz?hBiYOq=LN(JU;6*dP8CR`D%%Vs521)kPQO{ zfVvuam>o?!k8nwJCPwonEeu&%xH7#IO{xb93q;6Dp?-IEoQV!O@*YcpT7QDs$R0d9bdcldX^@7 zY0&WVBWxw=`{WNCWXh2yoxd{iCGOzB!}nfeDXQ?D_?BZT%*I)^5Ap_%9GQcK+IELERQ9^}!oW&112lz2TS8yd zjcv^({(s9KTO*K8rnk3#=MWNmu)p8yKe&GV2$`~Z`%63=_@CyZKtLDF4lE^#!3@h@ zMA3aqISe~V%EiG8?|3m>*(3nVOxN5=D^2O4@J$rR0fsfc@|g`WVyfOKHXXQfgedzj zA0%;KDc^PGhovTw$lhLiALI_wh&OMJ030-J3YLO#-!j0G5l{G;$qG}cY$7|4J|hz$N6ewVb6@@n{m zZ+d^C5;A7i9;UdLbF~!oa+YQZZ!zZV!8)qi)DJ^J5WXcwcDvL&^gBR62i*`({&-`tL9;%m}?UPwYGT*2f~ z{Q?mJ3;;hQ0(WpIAU?xUL*oJ4dGqV*DylbKvEhfSY-Y|}S9-lUK>P_vTCvFeYS z_w)iAo*qpUa2Kdew)0w~+`nJMs0n}V3_SSv!}Z@I$Q}K)yGr7E$t{gN!Y2PGmf`VC zkLkL`OgXgePBNB4PRjeQJ|$wj8JBroa8ZcQfZc)5nXxDhDkaLIKSUJaR+&`&D*u)a2 ztVqWA`H6-V`v59@{`j%BfIjP&krqcx{#vffc|?WzJ&cjH+w)hm$F&}^WnuRFDH1ep z7x7Eq!EB$nMN4F-NCFXLkDj8Sqwzyg`?x_n9#YTKFZNbujVVH8Oc43qf_7w%fSr6f9}?AK7ObFr~BA+KJ! zGLj34luZtgNL;J+zQB}=ctm{9-;V*J>7rqGx zMS+AGS?Z%l10K0Gw=kg`D@5dp0@250LugJva}cby)Q1k3mnWz7i_M~=!v}Z;uuNts z4GvI*VQ}Th*Q0RJ1nQ8{X(mHS9a231L4Ge%bZ&d)N>GcH5c*fGI?sRbSCFL(ZTek> zwA)KE&h#&<3p^R*i+hG;AiOLJ&tXAR6~`1ncS}uMh0xw`rAe+U%gaSc5HRZ`S^QGx zi48|XlA24DgM#~u8K@sj0(Tgl*CTH z>7%1EYQKG>xi$f^3Uxif|5^nKG~!S(r_m$;$lV9SAx%;wspuRNs1?|R5#Xg7eilj4 zfH}1E+oMRlKQl@YNi zxzv5L=CFmz2rVb=*Bz%IN*_RT9m}VR?8kSfBR~U~ETd;#L;B8iydZ95t%nD7Zc2tz zp6Pt}ML7aolnKL$nLz-RqBD}-FL1C}J_2sns>QndVV%)J zMzni}*oKyp1qKoqiu6p_BBXZXTY(cm#jgB=i86#l0vZtJZY#Io?y7`^f`Y0#={aE) zOfOMb88|GJvx_eL*Sz*0R*iT?>~71Hc%X<*2fI;xWAg)7!h`w9$5MI_4A3v|{aw#` zMwAB~XziQN7vi!>)7qJAWh^HKNw#sc=K%T8{3IA1uig>f-|6ZcNA61rE2wiT*u%Uf zQ8X4?0b}Lr_Vq*BLJ%%+oPIB>JI#KKk@Bw0$f*ug=>fyMj>pm&*8_=-o;{=EmOeX`b!uiT zTXTAKjU;FeUr5MJ3HlDGXpkZFin_$Q%7H!Xeh)QcCBvaA(dXiHL5u_CVCrG?&yPlhzjSJ>G!&S{fwL=xnhh|C32nnK&o-#wIW| z)K2EKPzB9;j31_##t=mV-w3MSny{-QbHETE?j8J&B9fK!F9!Wkgolp&7T12oRzm01 z8Ah*>KZn0>VsR(_Tk$pSQ0ix=##7_*y)Ak%S@ocgkS{#nZPR1JGaS*RvSX4CATAI% z*aXqxl-+T!uDCi7sE~x%s+Y$$1e%hN*R0T;?cHqx+7lxZe(#4i#XnI56ItraSjf4P z@$ZMd!(gpJ?SQ`*Oym2-tpNci=+;+V)O6l@T+wc=nc5sexP+>vuu{i(zEz@UKDghR|TkEi#U(KT;bdlMmI zPR%@_>`vE#!}>Pl$`XS_4%iuLM8hXNU98KcBMFWJHU$KHfDoP*-(50(cSuMbCP!=+1*iebbh}WgRM%JbjhTlP0l+s=ATuB&K6|+n zFI?*}TsNq^>h^0fTI5xG4m3AbLGwNia6#h*Hn9?j=(|_B9F<9Oy$VZ&_Z$?qj9BQY z)9NyZ&QxE?E&T*j5j#Rd0RJeNH=MKBymgoJr)5+F5h}V z*5Xzp4tr|~CUwr)`C`%%VZOyz*g6Qxogy1mc=P6sm{kn*`hJ^VLzTM{d4MkXDRRFr zU<66Y^u{Y+P)H)U0ITs+6owb2499nTA?r!}w+^6U4G0 z}f(4h+uGOunEsi)e zVDR863QDLIdwj7YSDF#X4p|4^?yX1S!hvrbeR+5MB()S}8BFsTtxbdH-)MI7l4SEP z+ywTOAb5b^b37@Igu`N?iIfT)7}J^R4%8>GYa%h@hS{}4!aYSB0~Hde!bvdzisuLn zO#-l>abL5S5ay?@8u4?bM-tRp6yL`sue9x9T;}TLW(y+~>_|}%dSLzF2-Vl|9j>Tc zshZ-SakHXT1UTkp0-1GwCnp*o>CU*YzfV`+1>tFn(H#%<>I!%lPyO(-&Y?4w2^LbW zClE$Q9>klpS!#|Tyt%l#vQWPMwQ$D;Y^ZvR^T$A{-@~`Il>Dkh0;Q{k_Cbdf zj5SJg)|T|2an@KNjXDn)8oea4WO{+UZkI~Wc*QUxQ+DY2OP8Rf`Ho%-AZ=msmA-^FZIk;B7 zENB%jjChS+f?>miGKl|o>2|{D5U1|hyI0+Bui@H@R2Id_5$OpDLhIi)D5{YwXlb45 zWg9fD)4D;jFArTY;qXX9(d#pqnxRNJwl@`VZkEGgtN@6O*37U|fxpnEMkpI1EwQ@27FAVzwbf;93ePEnQmt+`(;#I!KJ#i@PUy zo68HV9*9-^cVw-Z0#9}hkqKy2L5m4`QECoT3}cx^1Ci81_%_3iq}+j#(K6<0Z;5hK zz+-fF?e(h`esrERC1JLF=dK_=7Ad2@B1ES-AR~gH?}IBlN&Pz{1lZe~&K$})Vnt2) zUJ!krn2d_x%3iYXA{&Y&^ zrjlz!FNMXN=iK~BLr!C?v4TuQ+V3X3=&^C_rJZJ|RoXAa%+RZ*zE7O>vbUpEO^uC{ z2&Cz2;o7~E=?-Gy{T121RBnPPZ+(F=H!Z}8ma!4}EzeJuV?2`ZBcu`$FB z+c_LKo|?u*Sy0sN%BAYL9{C-_8by3zjN+6zIl=hcqPGC3)ODjy8|Aav@Z8?<)UY2vu%`31e=dT6cZ~k zt|Mfaa%T4MDZVyd-#3c!vm#F%P}QeX%hyfR?_qEjIy(=11RfqDZXn} z+Xa)~i4u#v9*XNgBQ()!m zm&6^_$}ysWWxDOUY0}`vIN_y@7pL20SI{p#y-L>?9ENR`dv7q{#*gy7&}wJyTW{IB z%1154dyBlBEHS@|TCe>mnkFVTkxY)TuCF?CEoI*acD2&JP3U0M!+P=*7i@6 zV&{~XZ<|`&$oHOL_@iS_xz&SOhY@zIiy*HRAMNF*SXNfX!R|oI>`4+Ykiajp99Dfv z0!D+n894AdC|f>5jm!J;Z>zKozYmG>DP>H^?8JK0XeZq$-@0rY3xx-Z%0vI! z5Y&7scv2k5m+Y!EC-G@a<#RTh2rX!blC{C~k-vMfKTS$$H-IWSUExiB{t)IF$c~Jp zx+}IZWemoocxVebD9y7wP?%()RV)h&S zD>$<^3=#-H`oo4jrMyc52*|W%)hex9z2Sd}%7~p^9%McIn3ZNQbLqLce$jF1P=~?G z9QOAIIz{S=cora#oRjB4BIQV+$25!2 zOl<^ms5<47g!bAxG*I}rYaZo@Of4kpf`Vz4bYCbn6TgXmDzHCrM79M_6~%^Sx&?4l z%c_A*Cd<(jNvP?y()#DuN~*h*)Yrd@-qqc--K`VxPXF%CwmC8D+=6DU)fCLnpBsOF z>_ewZUhb%1B)4Ti zx?9XxoUz-wgiBLRNfy76NlirCAcO0|5K38CK=Y#0{{FXtjQ>zm!?hvZ zhU&;m5_>W+m7KSFOlCYsiJpX<_!sR)(82H2Xy}>XzXXQ|&_{`)Rsy61zCP#k^j{^( z{Q-F1(X!8)E7N3lh(d>yQ6jNhFQ6xo5g{E27PNJ%0i(GvO2JcE4J2zW2Rz{JiP10* z`sf{UIx*%WX|8VBvZYcD!0xM8H0FP{xg%zd5T0@LyLam3K=a7Qk6qfeb0Laj`6<)G z0~>ub4bCU=_&Rn)bom&VLd8Qun>YVJ^93tKLj_zNj)dA5zMO>S2fb*~HAASmtRMnGMUJoq7Lfsv9g9j` zWpo~xD@Ny;TmEqh_50Y7nx`*u+)3+i(!Auso@X^+!%Qj6q1~h_<+FePOcdy)H}6e+qI!c2c(T`nwt@_W${O#eD)3@VwL zV~I?=K-#3bJFME;lm$N74L#p;0m_&uhzyaELJ=p3vkCeZ`&VFzjcvT=t7a#$W)1f>%K1dz;^Z;tZ8cTpiMSJM(GUju) zgBPZTdMZ%kKEt(ZC<}clNM#~*dD_^Xq?_~Tm`}Oei!yWKw_^R9OFH$Rbjpgi{9G z>az#O2CwGv#bq|wD;=F!wkd;t-}wxjzFY zL9`$eAa+q+JSr*V0;aux#OBdtxsm3V*NKT=@}6K;$xFt67GSHyWh2J@qT`b>k0-LL?^TP5c)fhYxOU(0ClO^o@SDj2kNq=4*${qHYhxu|%<>kQsQbzy?hI6ZP;!ktK{ z`YiGNif_o19U-7HH>AiwgUmY*jP|qe@tuAcj*G|8(86UE+4^p?V-AQWLxX*@YX&-D zuV_M3!f;Q^zP#hYhlbQq64;$86pEhe7XS~umz#D$PKux=`sHV%`kH?&XUv-Qu|WCw zd?rpBRWc#&$HR3${8LPAl*Du-j*-sVXmXK0y?a}onjt8J>+A^sJ14#fA;#A*!MfsMr+$-9U0Yp-J$m$ zK5E*`5N*G!K4&5A@~Tg?0oT`<5P}LPziU>e|K`L0xl;Y zz~UooQdm%Ud^*PK$K@=18@u#XGF#(f#_QMPQ%hqjI3Rik0xX_QE*MFCKbxkT2SXiS zUA>U-EH&lf1VX=x*Xg>XY0=8W8dE~9?Hq&8f|?7ozwMJPohae%b>BVg?2eTUs=m~C zg#dSKi<&R8JHd%0kRH>K(IBR_oYSYtDX{tqef9}Iq1#jl-&BDcX;fkow7>uO3n4kS z5J>xPZF?-u+a{3=&m=Gbls@)-jTf-Rz8?wFNoLxR!7AP@EkURqT)AQi5eIMKPc@ZC zW7b7zTAc{Yifr<03-!??5n5VW+Ov1z9>UQ9^m*3`sQ5`H^q(OK&sSUrLfv`_F8$RV zvCx6iGjTw0HB^>=yVv({=d4?g9u;qBJJRAK*yp?U4xpDeA57}NfwFaYjs@51-@mdF z2>$ib_I4YK1^gp^vUa@LRR|$!vDeB??f%?)5bfuK3A78xl%1yM;J7jgdhPm-o=7iS zZKg0mavqU5d*7xP>>n&(SD!z`nE~Ro-)+fGBw{)tixr1kVniwr?I6vYoR~xhCM7U@ zIU%-Z^gYw9UXNcreOeK`L`;2DqXl4_PUoDiPbaT`xb;nwn=PL_sk*)BgxLZ*OHn81=gWQ-zU?s9zH5&{*nhB_ zNh|+U6ZVa!>bmJ|)Y{=fm6dz{Ht{NFVi9{irQ|lH5M<_B*UNgMGLeSJWM}yLN|Z#< zOKCBE!a>m%ELxx*-^yDz`MGYly|qJ^XzLOUV1yT~8LF=Sf#9;^4&s#$Z6S(wejPjS z`rozLr8g=nTs?HAh8nNu#P?fGR#>M~zp+Y_z?{05`nzu*sLKjubKjwdL;#B-T8(Yl zb<&$5tBA)@sbpwJ{G^=^hFLA7>C;CP zUIn}>0=CjmZ1@Yamcb*SNxF1ViGn2g5fR$JAFrE?->szquj8Z?aLQW>dEc_dp-GpJ z#ke^Y*bJY2m0>vSfoW#DTf^usqUV3fyDhDqdyHKB^+B?^PVDrp{f4|uDXlv;Kg8#@ zyI$#w^VW3Ipq?|sF`nsqhZ#%9`J#q)v=#I8%C}YB?KfDClNhhD8}>v^L!)rnMeT#$ zO3KPlNeeDzArtobfOEnWHL8`g6O9{wCc}xWuYaqAUwvnrM`egM*`Y(NO)2*p&P$S9 zkm|l9XTbxqA&uwgZb{}p#U^&vnHF*-jaSI|k5rjrxbfes&@FWno|nEq^5#&I-n`4f z^MAYP3C>UG>0Ca*8G%_R3Ias5YHxFMq2JC%26y!z`F}n5&s}vGFnfmW-CMUh+b;F0 z_9Nj)@P0S^^2Y15%LR`>k^q#_Jf4ggLf4BIFRF?%OZwBI71)~2pdXlCH6c_%lWBuX zYCRI3Tev$#5rF!jTB1#Uuagq(B^f*8btv0!N_mOBJf)=*r?=Sa;IjR{-uV`6o0Yu8 zz5MZ`N0V&3z8`#WQ`Bzu=Eua{1kP!mQ7x`Elnf+xV|n&xnM~2pO@J$Lcg&-YCD~id zi2h&Xw;Q~7Io&=`9nU-!IQI^!=<#1>x=Vb!6`-_2Y?}~3u;>i^zQ2Qp0Be22h)}Vt z=BFQA>!1YOciP$}cG1L}2WELp#3EEw5~$gGm2oQ-7v4;~UBUPWr@W<5eCkHkk$V4W z9+W&Nm2`cXLF(ES^ysa7MJGUqU3vDaWG!qs={5~ApB08(^!@BF%_B=8+-3)e1RCf^7U*;^MJ ztqiNV;zyl<5>jw22_^0EG+w--OENG0`laa9(wuqBM=!h00nN-LBpn^FHA)+4{$JM=&F?~(X+Kf>$FHlMY~*xdR{o2fstCoBT0JFD&`EHh zvWkb*HmD?*fIQuAwcxtwJ^EPQyHB4k_Yh48d+DvKqvPu8suNPMEhBR3SX*4WQLH^S z++*x7z5d&c7g3_aF+@EDQGxn8qjm`AQ7|ER(6ZAcVu)4@3J)P|AjV0L7K3?9ONY^h z7eK*CAf;o5KG-wxa9x=XJL=zU+xnC2fo09D9P*3AWhYAo6>p}gZkLDD=x4{duWYbn z{uOTf*m6oedJIq_Wx8^5Q;a{QH$-HNBd4F9wOSf{kk@^tMfxe0-kD^q%#dXFJD}(f znBb1vc_j;2Sneq0KDiGFs>;1syL&rZ9ljg9bEh^*YJS_an>UZ*%kPxO4aU)}y8&uZ z7*XFPn~KWD*SdBc{@ch?Nq0%&`pt|v7Qky0oiiNuO4u!|CP@s_u)rgSf%(9AKyA7{ z_!Hss=gM6?or8T*Q=lAlB` zZStEWuNN2?C2?!-8o^{@FGtGueDPBZW!sU}#`Q6zr!c0h22cK41_@wn&E>TymLot( z`b}o3*t2In&>B;!={?;_ZC1sr=+h_U7lWoRA4buxy1JTGh2bh1?nRe9rv!o;p&Ef_ zHw-uy0U0{(xa4LxQv1unWoIq4MckcWgZ$$vT$&7diRxMjj9Ux|o=_p-K?bl0B1wmd;mgzCl8oc|Zvu7=&_)*}7({tmqU%i@R zN{v1D?c1<52bVg41K~=iJ0XW4wFIAJ_*@n_#y^kXJ%JxBr=MfTMAX*7A+Hn6)1Q!D zZLRi&;CjyG6{h^RVW6i_^j$eA&BiAB|;5$%S9J zUcLYIN0yE3dt|IVAl=48Lj8|}fs%~M0MY9f6;(v}H@(~Rx~?=tg?TSsAsQBIso87m zA1ErUSEl=OW1H^#v$SU&XefKzEn$xtumw5eP+q)1PN>f-t6PMUfFjNH0gg?-o_!tY z&)_avt7zK+%>q6lUFD96Wk=FLNIH-2gc=Rqv@rAa3XHIMmC#aH9RI8D%iiSHj`961 zLX+@y{0rNnbkdc}m%oF|urakT23#;>`>t)Y-;ry9EpQj27{7>kD|kF2S9_39=IPw>K8aT z2>6G47@@D9=(6h+vqU-r)I&}u;TSt+j39&F-xJMq^E^ekMhI-w0?WO3=1r6bNXBdK zYK@Z&|J5kk_vIVRHoTxRvQ5)lnVCW@%Sp8EOI!>Vy*BCZxahU$N`GaLTsPPC>*;%I zw>Fa<2>!HNg8UYIrGL{s^?Bj5WD$+A=TGCzXX+t**wU5_3T74gh1@@~p}b-@_P>t> z4xv11$-1rHC2`k5pa;fc2PHI{ut8*NC!|It`OoS-+P>ZHR<|7>w|arE@>37nieao7 z)YR@6`qa(V`0%``OBnn7^0jOID)IkQfqLM$Q;(~0A_e+;83pRQE>^Di)t}hEyZr$?Bx@5dvhYST9vnd%y?q%3iviS1K zwz`fzd!D`f4OW;GUZ01-3t9l_gyOY($&o;v57(S4jV^27`qYq1+PRU)SZ@Xej}*`H zxNuds%ibA<1I&pu!>Mfaf3))Ih)+PG#r|7rz^?5@e02jeL9$ALE_QERW`16ef+9GHr z8M4-I`NfZtB*)F{vvTqdAW73>sQ&;P>Bekr`&xA2^A@f}p}B>XFzsz%F#9tC!I4<7 zexnJ)xT)MqKn-RP+jtz$BkU6Cy)-23m3b1fWWY;EP{4Xl6y~=IREtk67}T-1j(jKe zx~4N^h=58*=dtnWCbNcRVW*|5Yo#%=Ca=BNUF(FX6#HFOjf8FnqI=mQt@3?2p}r&^ zSm`9osa*~YOY?R==d!ehILfzMuIM$`Ol6mA7%w779DOT2Jq91ky_C)ZAVg}WzZw;m zs;a8#i$05IbstMOazYsv!be1IA9OQtx6q*GW#{$hE5X>{C;69(sw>0@r_t$6ehic? z57 z-w+1sY&$`Yh`RW)6auX_B?5qVd0KpY35dA)o4mYZd~@_4BS$6>2*B`H(;Yl@RW%%N|fZXuC9)vks({Ow)@6~ zf$Ip>i?)%>1Erp+ra1Mps|qc-_aqPDpA8!RY9ujPEL zUM+D+d8FY)zLU+(5sD-fpAJPcu9y^;plCdKIK46S`C6aa%Wc__*n_53vN&q#1Sur0 z`}dXxnQ<4n>*zKTQH71Yfc1&`(qg)V{zK7TmqnXWoX$ zNZbV3RP(KC9@juT6&j^1b#f6wJvBBhHK=S5p?9-SI_og~LKGWK&G!t5UtMI)jAvOa z?(vinvLhi7DiKsY;H%jRy`pVcH)222V-$)AA3PsM>Czw4ePhG7#$;381pm=e!@%)- zTN%s26Xs2kDdBVks@)9!d5=N>he5jokt7-0p-CJGQC7$Q%F?^!Cqn4;_{6UKuZ0U; z*~lX7jxgqz%uSZt@Vr4(%<$3ecVGYshvu>;rN2ZVIU|y) zjA>E-K%rOgnQUSDm~z*z$O_hijy|nBFs@s365kUX0 zpg=I}8+TiX*Ti3d9!$KK=oJ4m4GJ73rrzVEO@($OdfgPwM~ge$EJ)C;y<`p%<_-=q zFcLSA_WHFgqq&#jG0XGsN67Gp`l(bBemHzeto;}zOn$i}Ss&vmz^bQ!aYA{n?sJ#k4W-dNFu*hR47wb*hS=9Rrq8qx1?Q( zmmy`x68k$WxsQV2tw6CC@%#F2xBvz}FIn9Lycwk( z0Dp9!bYJy)QCoJmr{J}!VNp1F%qgDAa=-k9D~38_HOrYCF%hIx@5MhqUv3t5=`re6 z;U&+mUsvtjTjVX`J6{|-etdgZy+67+!ckotS~zJ==b zXA9|bS0ciT{Xi&fQ^3A`S)ksa9^q@$C08trjeVg+SVwi(L1syF^Q#9BK0Um+;O7Rv zh>Q_J)q=yL)~Av1Med)iIZ-bl+)jHZm z0=~EqtixM43S@qgm;W20&j(^-%dxDYRQd_@r^m1rq3ny!v5-^I^HI_ur=Wj;{T(UW zf6;#q8IpmZJ2;Z%T%vDxN=NvoX?h(f3=#9t&4J^yp36N7xhpy}oMZdOHsFbYU6r+F ztN;xrd!91)41&b)nBnYL^4s0|vrD1jF9VE7_bkfIJ?mC7cb4<|r319IXu2Dx7M8Xc z9Dh`isJ#bnpdJAIk0FogH*T1iij0r<45aved=vMD@I`d$M4x3)*12c1$fxBO*^_5GPCyr-CMdH$Tzx1=a*NZTejH>QY>&;L=@K<@2%a9~O?HY`{WVGaylIEIo~#>8KH-6bs&AQm0bN8+@m z9)G}VBI?_vm32^Fg2@|7Sgb;`01<2O9z1eSAQ5&4CF`fHf9o}N7%?S~CMn;^xbrbF zwGdhQPS`t-D2V25kzT~X-4|UgJLrFhe|7(s-#^B}p$yM}IEH-{pnQJ#rcL9>=7ja2 z4tug|E932=qmEqMn^aK1)+{qcPMF{`sH~mxZ22?JX^E>ORR`s`M>v+EgdxRi{@Z!5 zV8(r1)HD!w^`AB0zD3VC_m&&2-5~5qSs9`K=FQh7acyXzr@Bq+z7qeGBGd86Xd`mf z+-20Qeb}3@lT*f1Rm6`o{{1Y#lh?~O#-7}kV1)1tbk3g0a0=c4BW;G1gSYTmLZ_c0 z>jkRY!#E7&+hj&2KBW{=iYY`BX&?a{k+#@sYx7?EXpUsj0m)e=REC3c(r7Mrov?K^ zT0cFNjkgOS>vb4QH9CaF^ePNhlngq0W@c`V%1IW8Fd6SLBZgrXg8c|_O{b^HY{c8{ zl9&y^HLN(MrxXGWP5c=>-BzsFt8Kp-S`$<9b|-55ykn5g1ViT>12*oDZnOPQpB65L zX3aTh8lZCts6nsk$qwz3z8%SMf_TIa*Hu(fS_zYYL=t?u^O;Cmn2_DX`a)pMNclb2 zmgn;!3WkzI{%I$$3dEq!NjJN8e~mnbU1 zE}}0E!@0WnWz8I)^C3TvOMkjQ5qe?HatUr5<`$8a@kIKD$(=atqj`R)9{BAuJbC>j z{3xyp&__*;Iu1ZPDxd zajol`1Hb)zbJy4$^`LDeCx+{4tvoufV<-cbGfuQFV+HWz{3*KA5Tw5rvi_IcS2(Uh z^lBEdbCXYMkM98o>Qn!8+o=WGNhzldMrf*dLJvQBQ$@1mJB_)%k_LB_ou>7_0 z5#vc;aQL3+D+yGB2G`S4RD$Kn{0Y|+NOXHgJkGY5FlkaprGW#Zc`Y(bRRsEk91&|8 z07)*Ho56EJn>>)5M_kJ{Br%xV_%jW+#$(_Z>X-;~DBvW5m}fkVTL-eg7r7soIK!pH^b&;^M^p=X<) zOKE&@7c*0-)11%Qtv*GqcW*ywNWgPWZ2WSTI&fPcDr!4H{D_x>HKvY{g=_#_qvgpX zw-VC{M3z2>fPl!zLI5@)(J3*y~avcDDZQm z=aC5Mx>I-nJdxrK!UX?~8)q?sPd4vC(yOW0?9S5P5`Sn4Np*osJ$u2shIF?yzvFh2(9MJ~!ZMTsrxvH?#o&ZsZ+_sC|8X@e?wLkR#Dyue|ofM^`s%-kj3iY?Y28 z)@s=B#@mxVz34{3?TT5m))&sv_vJjmUV0v#BE-E1UJ|%7?-xdfnl@t=m^~%3KMu$1++65Rx>}|~`X5sZ(*HHYpgHgRA#19M zLMUOw&&zdee7U~fG^zCM8$r|;kPMhlIB(Nnyd-wNG}^*}mCRHUqi5)uQ=<+=+!86j zWXw;W#*HK|z;#Ce@53$;DUdg%2-uqBOw#*I?$X!=^&^pq#k_(KbQCl%=J29@cv5~s z?>9QlzFIgQ+wY5du)Kw0R5u%SAM5E^M0pNh1xKR^2)BaNMK0KZ7h?JOda0W)lDD1=^vWbyN} z1&yzp$Xp?9DjkH9O6Sg>XY9aO&=a{uD;?SRZTO6hcp{oj!70w3Ev>35d3F~HV_y+Ho+}clK$raDbNUj{;Lp2^ZhV(Z(h64z zl$&IFG|BZUc(YIsrpgcCw=s?O!CZn+lz$dcxD?l9r%h}4oNtT9*NlQ`;RueXw!q2h zDouZ#I(3qtf)g7WIiI*2q$R+K1JxinQJ&}yfmMDAtbVd|*^XQHQH#I^k0vqHBIBW6 zF85{@yY^)_)b;pa;JS&l)KV=|BQO`y1LLxPe)ZNyUu|d1qYxQT6BP*-&88*|ahPx8 z_fi2z1*ALy!4@UD7HBidhkGWEnF9Ma{%k#{sHj!bk|SM763#Pc&d66ic|hY8s$YB| zo80_cZS7`(T>!dM84i^nzNLPR?)3X~U^lWNDKQbl2dkc3SZ*PWzy9RD^Z2HwWY2Kw zylG5SqJZ_9q<SPd2Ne~ zbi@P)*OInjU}~p!*>)#8I~zeb@-aFmNE*nQK_U8E9y6ms;NgSxy6-;xhX4_-k_M5p zO^)pal&7X<5-F&lAi%k+b2_nb2seS9gdvdN)Lgoy<8BbRfSezH>2eAYv;P$mEfkyY zqoB?TQbY7T;B>IzV4(y|iE(1$an!o#2!PhvKc$y?Z=sNVR~mn{hlq;cGFulf;dd*By0c zWPFK<^#|6ir-;)BB0VijyevVjr|iQ`Plfx}u(#@#@KUJZO`15dgOW`4ixRSYEOYi!Cnry&4}aW;nntAV#l#RopdxrM7!m48l@5%z;NbP>-hG*);}y;- z-?u5iI*R`9G%zDV;abAJ=FOW&cx<6n;^o!V_fu0-&wl%g3QaspkuD#%B{Jk;ZaG*L z-6199EzW6S{z&@qJuN$E9+NF)@)Pm{J(?cF(WOO4>cT#iL`_*JP@O4FGTmKCcOh!K ziSimHi|Xp?&LN0u#jh&dDbU-Qj`1DfiR+Lp0v2uIBqy^qpM1(f0o&EJ+}BrPU+ces zccYdLdgfM>L2W;RHa3XHtr!qvx~UzZ?kk`@_K@t5^h(8rP@SlK_5zj`hk18HtnaHQ zYgZv!+~_ew2Cpbya94M(X)q5I{u`v!4O|bVL#IxPFIQPt?X=1qAJjqpN6;WfdC;q zUIB##sa`0gb@tYc8*hX4zfah+m~hUi@nd@~JhCrtp8Cr*s#HfV8gHMxV< zj8g3S3;NhVgGeqo$(2w zPtVK1@6@2CT;i3poqw*7o(T4>xSK?w6mWAYcoG%b%ViV_eIO?<6{S=1sM>cNiK#6J z)mjbIvIsTaAH0zY6^(t51e*$sY`}m=Sy{tzY0`P&HdkxqJ8iYOoJzn;H7OVx;t)NpeIe9;!{591rc_uK6R^22iDE&;jb!N=oO* zY6v0Nb)*{9DJ>GUX?%*Es7z_a9R1(JbxU~#EtS>Ja&JTto6T_JbL;!1#8R|7v8I{C zB8$6g(7WyWt`{)Me>|z=!ASFmdSU-W+`v5*W^Lx0H)RaZ3AT0bK*;J7%_> zN*N+pC??ka#Sps_i~-I^c!y#{i?(g2%LY5!*`m1x%*l8a>Px&wa+vWAahvgOL8H&> zO?JdhrzL);dy$Nez4C(dB#JCJk=m)EG_o0$?^c&^l=JroJ3?YO!b6i>V5jcwEWMHg z*)(H27>54Jx3`ELV%JPY8Hz{>|F-Jm(@m2%h?nB4gsUgM695kZB!!1MT2#L?%G)rs z;9EWY#u?*AJieFLxyA4E=g(4ke|h2`x(o6TmOJhpI^kK^gKiyTH@kNlweQLEu%Qul zlUy&(9QR ziext5{ns5Z=rvD4^riCqkB9P&f}YZ$rl9BLPk(26 z2*tU~HTR-p7~ynfyhO5Z9n`CA^Vc6gd;kSn>+X)Re~^-5-fxNcrlfwKWHo+D#wQd_ zCQmLAx~t%wFGtH_@3@#%c>~Y@=tF@FzCn!Q0@j_3aq2dhy?TouqsxKxa=7KpPbVng zX#~F@uxX*yQR3Ulb;SyWR;}VF(R+D)ryvs`#qp9_Qo0zfuRlJ`X6n=@fEt@Vh$<7n z=#?;hij@Z^s&&hj_vx0`ahQa6wMyMdLPSe%3HrE*WIjb{u5uIvD%E9xD$##OP1}Qv zFOnRN*XSbKZ!p<_;uc21L}ApZn_};;@Ft$br=4Gh@mjWQ&PLB)B>Bm}5jStXqiS)H z0q@tOO(ZAG*$NCJNu>6vph~eCU`SM!Q|2^)A^o=JT#{Ji&zv=D5-LFo0(yf}8hIyT z!oIMmsD5H94E+$(wi$UyYaFF%!SIJ^q_z93E2}gEdH)?8Xl%(`RU&}0tzldfY`>>3?fKeM)A-!KoXp=xT z$@J?SCcaHXHv7`~cgBCY6~_VDmPKk0W>3>S*5*Ew)0bA-+qTW85%lZ*N2 zEDB?yGzUo!q!mdSv(qd56)|_3nwqtU06z*bWOFUf@cQGAqj(e~BxWDy6+}l8$zNr% zgr_Imr#ViKrPjy&cQn1pRKHQQKlY0IC$K6iEE_L0VY#UmH<}M7UNtm~mnu)=5{K+7 zVOaTs1z!$^rf*fzVVzsHXu+T2og6*-ho6PIGR1J}5pbo8ut#u%?HnBiF;;cdM&%_N zjzmWf%y>c{vcsVpOrhL=^mNBASo!q7#r_#Ju?=dcyV{()c=4HC!p9LyycHzIMXg%5 zMgV;Smk{l~UF!U$SgreRic!Rdb7(+NI1&}bsGC6!kGfsi(r}>T)q~8#Uk=Ir^E5UI z4{s81_Uu_?5|ihM2cPT40FDeO(9=+Ty$TqDIMgaPx9BNHH*WQRGn|gh?mc=8@Vg8h z0K=OF5u(saL8z)On&ktmg0;{ru0#wg9tt!9MacQ zS2xQ>*P{wY00}Jbv@9ITy)e2~Y){`ct-6n}Dwd9(&+WjJxuN70%&~FIynELHckSu< zeY1E9PX16YT^TsXUo6HkCd<3}%|DRV_b7%DM6tD1kZk*TbH%colm4pJ7)%k$yUD~y zenQ9%kA(vC7Ws#bKP1nQeo*>HUE6R=nwl%FH}7ed_5$_%gCkaNmYbhH|0`;Jv$Y>$xpCndbsU|uAfkN$_q_&)gZGyn3@CBP z7r=gu^u0-c&GH@evd2W5ejtZ~0UU@FpFCfbpI?c3bJs>2C&gr}u$c`%bN^y?f~fecT)89c5)hD3 zx-d0umL}VYz9)slk^jsRs3Tz?$JqRuGurYvIbOZik}0xJJZ{ryjTcWfHTB~rmE$uq zGDNdHCSK-%DS&D>+CHJ(iSVeLtOABUPs8B1#QGnp=tlf}b90V4{}LJl=<7nW4hTLn ziWoA*1)zWw9fGbA97unG6Ik|_8UmrUvX=lAQ=hx+bdVv3z> zumZp_3mqLboTtg&q`DEWDlQ3U1b3yMswt>3jjxrsa_cDT&+vi~sVax7fN_i(dF8~3 z#VF+ndXr)pa0~(KkfB3I;^6?81l<&IL-vUntdKVo-t&1pm?M0PZ&ZA+>{op1>o}$a zMpdljxPfpoJ`!yU<_l2`G>pytEiEjBBUSa=w{Ip>o-Rh44i}Sj9zE)4La8uehOvso z_oVtCe=LNlNJ7VQ(^@?F4+3sn;@v12vw;B-o;N z@96YW`+W7v6;a2vvAI7>7QRX}|K9%cSILPu#uxR0@#+UyL1K1I(=&>xHCF(_D|c|d zTdKl~fW3cgWHIjS%^Ov}0K|1D5B70jI#L@2g}{qqRB^^-DkYHH6c@02EKiNyxqJ6} z^$8{@=%)V_d2sri5?f6yCS_oiOnn1W&M76wFn+b{+o*OTuO>*Dfi?pCkO5y+oV#~vBgXSd>pZF;fIj5h!8FjL2F&t^D4Rpy7p?+bQ3#@L= z5=dTn8Pt2GeYaFaQ6d~Clcpb(0d@&HO?6pR_QF*}sXroL!u%`*pE=j`?zXOO`6(SJ z3OXm$j8Rz0mc>H|=#Ipkef;I?SEQ4dpBqI0EcOD(gu|jm`wtvQM6MolLn%dL=rTHF zDZ6(bv1&2PKnZjgI+AbSY9o_|Zv+sq_4zT)*0vrpcNcBDfUg6ZLie!Z>g*lfFoR@E zQ2QA%!jm`2Tv?LygTD8Bo`_{Vxf_#-UVA=EnNB*D(6vX8Hk&YEcI+aAV`&XMlzHEAqMQq$@AhoPIuAlSv16a-vWGL=o}{On_i$(XbAXBp z3pIjMpw*3Yo_!nbf+0hu1#fjPvDB#h z)@d196F3`Q7>dq*V*x&)3Sy1WbNhc+aGbvm0R2<{(&gj+i&lX zB~W$OLkTI$IZVj!QWf3fk^wpG zaW-~84i#4|2-jpz80`dR^<=oM4#BOW(F5T%<`XTbLD%QH>} zYkG2_*P=i6z|d_C;ZxF*0OFPqjp4&*Dx@J*XK1*s?#yMd@bgn}k2|A378cUwibfy) z+xBJ?E8rextj0g34uvQ`M%^1s304>RcSjQ~h+CxbT?P!eCgR~`*;d&j5t}|wEvQrL z=&ytu0A2~aQDEJZzX=XaPVgF5f}_{L1nq}GBSy?JH)^T6v2D*B8WpIMG;7ujJ}W}p zC!;Bud;=Pk>X1TV8_<2ETk&jFQwn^1w>KrkQ>V0mDMV6Rlo*KC4(?&dzTYM8`3&2b z2Pqo`j%!$uRjVqvIZT+J;*~2`gzu$~4i85eXcSF=bb@{;D!M;spjf&9fG|w=0Avxe z$pybc17kc`ena{s(N@E_6{6#OtmDc3Monq&v&kN$tLyc#nd9sspNA6~Ay-9d&rk8r zojdIE{=@syHpl@bKLONJHlZ%D05GV_?%EcDp(&)Ok6toSSO?FtC^KsRGJrs9k)R?M z|N3OuC6fRwv=3zuph!`z!5bf|yRHei7c`W~qwoXJ$RQ2;bDP=9`QetL?1Fb-S633o z?6OlKnvFfy{uetfn=lwufdu-Wdz!42&VS6#fchnt9qV2X8UH&B^*W z`Zd)2uv()CZ?;SnV@_gX%uywMe**sZo}oN94uoI}5tneKY0CPBhI159g`^CHB0~x#b>2^_*uQhmKj*C1_vO1~wT92oZh1QHUP=zvVWvl-_d#>egs23>f$I&7uyXR*~MdXn1JQ zS`Mju8bUbGd5#J!x<3)Mipk&MQ@S@Y@cWq}MI&1?+A#{9?$F{gmnXeH57Qvn77C590_I%4x_ADCDJPX=Dh ztEQC)XT8theYA1om^->xj6v+4o7iX-pZq%EAO78?(H?d+j`f|Kmnx45z(C`N;PO zxly7+BG?fVRLE9zL5KbHs|Aj9kidyLPSl}LQ_B(22(p20+QLO*<@IFSE%FN9=#bx7VEO6f$@AY?D9w=_@wsD{K4Hf1Z|a z0&k>|gk$&qz`#?ZmMvI72Fa|p&y+VsMW1VWiBFjNjAEg^ns@|P4qne8gyRfD%(^eG zt1&`@zfT?PxsXHynRv8nSER3gWsx`&M@Iv7?i3uwQnM@}`;D>bdDw8)ETn+h!W$hF zK?J+?|6(4GcAzVaYG%^n}-p3xj(=PyWBhkG!o|Q}b!_efW^f?jco~G+}}}ys4BP1#jM9 zO!5fxft*1D2hvupVd>%CAlyfZOA+Za5}YDcuim+H3Yvua*H*TxD9OZRX3~D-TVk|b zbp}NN#jBQ=$UY%OmB4YP6y3dI?yL_?9d~?vd%vFdo8H;GeIHFsY8hlyLM<(mnS3}B0#Ky-B*-eDTE8-%3vC>uwinFKA zzh%8W-(bIZj|`lceh5g3*pDOWF%EW`M|=yGm_(Thy}W{L-=ul-Z(qL(7e@*x%-R_; zOt!^fp?MnDQ9&$7KioTAe_ih+%$CW`P$PO^YegasFohg}mvfbFuh7BVi`_BCqi3I<=2ff z8_SJrG7xneG+S|oZr@%-s>^%p`hvI-kRgl(J+6YGM4Nv-6Ip50n!B%Gx_GfZy(~(N zqPK5li^`*txa){a-9VsVFi@8AO%JGEy?%W@WHfSbXau_w#wcBmxSgM$|L~z{)>y*~ z%#e)5Bb_`W%}fw^Js;(w#TZ5llTyz>fGuq3n&OBk0gd;rqb-{?VRQ9aFn|6)WFB1b zXUHkoINWabd8Fyd9j8x!FnzPOI8$&dF|TGjr>CWzY!1}Vv`)8|DI zp{~CFz=7o47ji@*-9#4T<(#c)*1BcOJ#x7ndW;t@=?RebT99W*rF&M-CSQdzVswvY z_`O@V7BSM@VLrJdD>{6;KS@p57l(%@d^auh7)2+@eyoNU+UXvf#cTyJl3GL zv7U4XmkJZikMuSDKff{Cj#rd@Yi5`6c<8qN$;W<5m(!xpoN2>B142TIHtvfnA4{n} zZK)gtX{6{_3H9+6jOi|2E~lj#1)1EkX)m5at*k}f$s^_jU?b6sJ@5h11Q#f`Q2gBa zSx4M%DP6gJeonsIx8t|kA92xtBa2*tr+YfNReiom;?j5U;Jpd80Ea3EeibYe0!^q) z2l$vFOV+WeY>N84_mc`q3hy&UpKsrNE$|7sFau^t{V@SMeDI*I@6chxptEHNsJ1Ry z{*TXKtr9LF^;xAh^x>gkKFOvwSc)Nqcz9YgW<}OLOkyd)Ydt{H zM;RF!?wLrjIv5C%g_;#QeOz9S;(4EnHGVfg!q2eGMMoG$LB?rG5SQBo!)_9gUyD9U zr(=BG1Pxxmxvfa$CUtf%c>n&CPoD6l$2^E?VSHVi!esq_(CP4fkRE6n5(Y*PD%wTI z#R(H?Fw5UB-#~Gz#D_bY>P%?j0Awo7J4!eEU}7K)MBF8*$4@85)*<$Th#iVA!+H1z ziywE-PhY;kG}$q5?vf=WXa(#{SmCMl3&K{H#JcWR#)O#5WM#CDD=RCR-e}a##l?jN zqcub&cFXTKVRip{6DfrbUlQuvcY;(%3t~Cd4ns3fK%@TAb)B6t|Lr7H?-Yjw+X5C-_}fUFs3D=wn>Vi>GaKXdpU-bcGovv^ z))p3zfNKr9fzRfmYA0)FlNLlaQ+(=OZ*TwZop}zWFlHJE&JpV%M4wAA#lH0H@r%S zj2-*U^}-rxyLa7{Pyg?2i{T4%U@?Y$(jWw40|a^G)lG{C^QkxL_Je0@O6mGLpfioH z2iMHy!+FXk5T~AN760ZLzM)LPK;WsN$}C2D=x87#9D)HpczbSb#=}>w>+WS}|L=W& zy{Lb;5UKE_riTTXl+`7^ZZFAZx&{Ag38ix5A8;9f`2f4@v&7~Aa2ofa?iI}f1T4$L zIY~`jop}9$TEnsRpZw%#hr-vdahtk<&nS{@*@We+8-hpRzI|_By&6|X15h$+&9aHw zfWXFE@_{!>fX}9j&iHV7!1QJT_l4$yQ(CUe_LZDLBvqz5;9a5PrFb$BI_||#A zf+?Iplsa;n)av4T-#?4XDp+aGlP6OdE==i>ag=36EMdFr!@lm=as3d*jqwckHNIeC z%fKb|nt&ZUrV+A973f*JlNj#VOqbed0Qi_uvY=@cWg&8oXao`$a)tD9@AhSMU}P@k z90NJ-YeQws3`In@O2g~Oky-29R(O1QJ=I&%qD`CcSNHSL_kk3s&LxQ9`=y;_CAt`7 zRm7l$c6LeggV@f?*lD0z5SwDl2FW>nxr75jo4%mQN#2&avKT7}CP z1CMXGnu>R#y148-tXXR6lfq=M^?m{^l4^ZS{BjTB2bEDwtg}>_LC}tgZI`-?Qcqeo zOj|oFF+CR7Hj)gQP9Pm;64T2iT<_SJm_!gF+HtQZ&AhaJB1sL<9|A8w0;lqdC zs%$9Apix*?_$SE8Q)(Y$4o=QLmP=heGqpu4b#)xCyXfeoW@no#1I_fK80&p_5nVCs z8O=l?cfj99ATZ~LFAQdG1`!zwkw@9ggCjvU%KmS0l7&rEx8GN#%K9mF78EZ}+f<0b2 zlvovwrm%z(6Pbo^0a6(OCUs^P#kV->{#T;ON^T2%hRvHN!Qo-ryehIhVe4Zz>&fHC zTuM6HCxe6i-3Q5f1BOt(b89;xH#nc2NIc}y2 z6@h$jt0xrTn-&TQRSXjVO*dWrLn(AMT-1CTkSrivhAfpIZ~=hh2jNw4{dzFvDm$7H zIYG6z#zfe>3nzR!Z-PV&*&0=;w*cu_1l((;B$G5HG9c`@M!^FAWTK}-)&#hm&X-Bg>!V%9Agl`TyoTDpks1(!$TIx9&~ zAKtzDfxEk4P=NTR4^Jfb=S(ooIWNP@&$t9tP|SIiJN zC&OMNe(MGypmwK>$tvaMZ1VB~PJBu10gm=-M6rfnJQfnd?~<}V%|=6Ur%duD=Sw=# z*~-{t?vk&fK_cJX%Q9}+Oma~3R;MNd8k%aUfwAW>RfIqTw#CP7-@3I0%{>alA$iW% zZruu{Y!sNvI^hZM=FWZ=$w5d$?HI9uWnTNiQ(R81>Iamg;w=d>IfbP`K?JQ?{$@BK zf>Lbi@B)%Lh=aHRY=R8R0;ewogVdFHF?X8?GoO!e(F`|&Bb$Z7>~W2cD3ig#MFkQr zF7eE5X(}2ZLqK9;9ydobsd-}!I?r$*#bqK*hvxsWVzZDlC`XeM6JrAj!O@%%>#Pu-?uMK zg0HS0lW;w-lF4LCPSApq1i>k5rlJxKCeQF?fB}T&(9RyBI;YBo^CQl@l+kujqrL|2 zjnnM;@GsH22-OkVbv3p56g!w1a2DINW2{>d)gg=e&E12Ty=yG@R@}TXDMxR2EeJaP zrDjh)Nu#yZC%R37`O4-&MK>G@O5`c4EBqphFr0qVPL+Yh`_X3m%q zkN|ZQ2fp`za}Q`0Y^)W@W$GRd=Dy;27N+CvO-N8=IB7f!1ex+;3FH zXm@L?GcSqKvsq)lc~-#*N;p_+9!{CK?9EEHgLX8y{am7-SXD z38HNi(xF%+zUUH{W(_960|Vp3c44|W=>$=mbAk$*L(k#}HXFSPJy?i+nsI$}ve0(e zN9br=r+o8LrQ8jr5^WWrE&<1KW859(CtOrVT0UVf)^c#9<9*$z4I5#FV za5FefI5x$BF@5@O)vG8N>1u&bLFfS)lqXEA+f;)gqIo_t0+FQ?#GsKMR2s?}RLaGV zqM{;GAm0cP0WpKfO)r1CO`7>WBE`F+pO;4Ofl`Km#3#Jp8A(tmsW*F$ExXjdd!Ihb z+1|B`qBRT^bni&q0g_%#!b$Iv2!@qYFW05e6d-<>b}j-Y1ljE4bpbWbjP#8->Ciq> zmoRb5C_$HaaRE{R_n8k7peiOVXIw~5u7=x&E%6zImx*VdF5`9(AQ7>|+8j)!S0}t1 zH6ycp@dS%a!hB1^3z(P_ZrfCNn_|ALYS;e#GqSSGeul8cjoPzRSW`ssaGUAne}Clw zvcB{_%9AZ0@i7}%E5;*5_)--It)npHwn`&ca4vIU!gc~C$Q``ltZS;K8mD?@ZOM0TrKekzz+phC6fDzaq#?bj2w@n8lf}$%b*h7tm9P4U84+M5 zuXRU|H)Xw>nGyVBl|^Pf7r@{j7K>DV#a;77vNSC4o0+wB4v1)QZlvllaxKyW-K8JQ z4`u$fmOd&RYH36y>&2*9QBkyhoMSe2Px2ZkC%wT^zN!7VkzO)ua0!}AMMj;gKfM@NG%S;2q)*Z< zQydC=cJDsoT^868xpLBq7GXNtoo93+k%b|-ss~jbjSi{uXYebKN_E$)>}*k#691$% zv)807OC;E3Z7Gg_kr8QmTt#o+w{vijiJ=4Whp1Elik)u z-?$UvKEoB%6l^7U)Sg|tOiKIw`{sg2&H8Ag&)^1=M+Oi2M$;;yryB6dz=~BT?=d`t z=6I|PHs?j#Iv)^o%OG84O%g#xs;QQa?odE%JKBZ9a4|G;*q{%qk1Z$$DLHINV2`qDQp@iO>h^OvP8c;boH2|zP7im`TR1MT z@rbo%9~JjYNoly)$J5#KJs`;fEks7;(nTjiK|trInf7km^2cY!PQPk;ElP(HQ)k^| zgaxKONqSp9)f={l9%D)P)p5p4a~@G|(1DiDE@mr`X`IyHD+d~6c4i^ zJWV!R#lEE~7aK==m^)Su6CJcu+cO;K z6Gf{Fm!xoYp#N==&)>ea%;`^m=|rCO)7!VhVsycD#*GZBu8OHN<;%Bn?MAK^VjV>7 zm16F^^@#R2xz`WFkd}ECYz0<0svjMQ;|2!n-bKbSCr0F=^xJamwi!&G+=dDt$RT(p zg9q^sBu4eu6JX1jLNXnZ4)K4mhGZLwjE3-lxfMBKk+2Eaut_83jd6R1Wg$36)ybqpn`|GWQzPJnN;`5m9(&eKCNn@e9k?e- zN1n?2StyHPMlUviHasw2df*^O@}~57V(C{Zlx@xEw$hTIg0D{aH&sj@lWc)0A%9p& z>oNVvr>cYF2c52%DwVeN*bsG?NznaI!(P@yo`DuXmgw)G1R}Mb`E%n~LN^CC(B^1j?FwrLg}J)998nOkrbV~684_l74AS8 zIisOyj;lN$Ro-K|Z$y4~h)6LUls{SRoFCG(r!?VVz@VmoU05i03S$i<0WhHG$nZ!f zA1ESTQ9f$8h*jc`jNu#SKCLm-DkzG3|FnqXWq=MOlB;}K`keK6s8S!W$NtPKv|=oy zYS&@IDj>v}`Z74e*yRrXQVzF|f1o?CRG5@jyfZ#9(4e;3qXb`?H3L1)!-)>=?I>Sq z*B-6h7|ejE^0G27RWeS!!8Jfx##2WGOrS;&_T|$oylVvqK{_Da;b0}TePa!Phm+4< zrsnDO&6=MacV4QjKE5`g2M$%CKFKBrd(umYaVC-QMRQ7y43sy3k)~Xu>&YplZcU5nhh>+ zpVcJD?SRGf(`V(<79|s*$4P7Lo!p|OC~iPuR9}J<3EM#rZ7LByT`S2x68iY~up4VQ zMG8^@MzcEi?CB*un6q`^CY_JLaIIlvpVM8RH3fl!ezJGdW(F9}3?hA0Df(Zpo^U*v z{nAoGxQ(y6S1KX%RhNiN4jD1W$ch+uJB?prH9q8e{y~;X zo)g)loS)GMks3!~pf&~?0+l1x5r%zVSUcToW?0&B&1v8xx| z35K6dAAKE=LRkz76}lfz4hkmWWjM;s8FF%re=AW8lXgh9(9#vN*`a?Vyffm1^jOLq z+EXL4J?y0;DFr!Y_e3|P&c%80?j3%eS)g-#m7+OTwHJz4*#_V> z05E~xvc`2jyP0uF&GU`T84nN^73Bfh4#R}Y9AEeEp8=qe-c0?L=h}x+wtgwh>s^F$ zZ&|E`3KCP|-H?9$42+G@a>YQl=Dj%WDQmn`Nl5l%1sOeV!M(sI4A|DVPOQd8(0u!L9ffddY4i6neX{4-MCy=PAb6AZ}W!zx0{#yx6w5}3{mzqXbo}JWZ#WkzYbdM`Bw61H zD3E&lG75xNImvs21{=BNlqr;^gx-R#x&^ zo)as_G9r8cM5w@N)2$K7`(NC;z0*kR57y%;fmo#pz>u|N;Hr@$lX%&H|tb2N?P5#rD6UFjS_>oWrp$B zeyQcU?4(X116iNF?zO8|=|7$tmB(?* z|NhsuKN!UL({=4oPYZP2ZQc{)fmdP{9#!&gp+v(qp~g%ZoeT6*p&Uyr#R^VF672S8I0$&dJTX;A9NtXu!%xC9XO0* zb4v1AfkiJ8Qhm-@BDXzM08$4*E&j|IHEH~KG9_x6@s+RJD|T_M@@0%1BKvuS92(c@ zLTvN#nSZJ~Bp7TrBwO&1+THa}rK8=$8h-$k7F;9iTHbyL33}^}9ZO!n&IaC~Mn|V> z(2g!7#-+nxPm(<)B_@(c=q*2_c(2Rv#t4Dwm?m^XK%3gcN2rn?PBt>~OMo$^qYoZJ z4={D6C<;yN0!O*yyz&yraWZ2on?VamMDzkrhy1(yvbqFE<+sMo(@;<^i0U-SM>B%2 z(izq^^F+k(AohQ}L!BhicGd6BX=ty~FJFX~7JdmR2$adJ3+(8F2M=PT3GfX2aeI9S z>31*Qvx+#`5my108lYC_PPA_o*mJwM5Q$RF>aMJk5J)M$xYDk>B)FVDXF2~^YUQ8V z?e4G$C~<`zHKMM>RWepcEyZtn`h2B3B_#Q|7sUBs(Rs+=!A@(}GXCZ}sZ9i0Sc`Ra zFMoLB?~bRsKssuwe}byOdr76d_kE{OiZgUo@d<0$MdB^~(@z~U0To;tf^-1x;8Bk; zK8Jb*ABK}AwU!Sfo-eH@3fQj1G+QgnjkJZLi(DI_@ZP=V;p4=`4zGZ51g;Y@oN8vCmPcHTH*e=+_d!>2eG}W&i4r|iBak-2 zE5q#>MU*9=Z7paRG8HkT$vSRmXPOru>iYRg{`Fi8leTsX7Kp*uaP}|IlB#qjhTH#m z+^qAcMA?^FFI!{?PV0}F*tKuwOqZ85xY_x93#$nY160)mEUxtKzP3fm5GKeCx2VQ)1ccQRVLf%EDXPlMNc=Po@`ccv0n>p+~On&y`VSH!FNE(6}y~Xn|MfofJ;7$PT@xKSkT80c(RegQX+5P2T9DcTG%=h zqQQecg^oli{cRlF*kS9F%D!&|mpc z__OA!v%RpR+os)z+!hX;JRtuK&{lzTfOv`*O40Sdb!C&u~Gx z1>OOfV2mM=U`&>sg9GU2N9rASx_L0TB+7C_^$s0I0Gz}>99{eCO5t6@6=B1(UtvK3 zJVqCoWjb<0WyX5M5H^U%OeboiKZvI{=sReT(8Y49V|F+dvXaJiB=Mrh$vT~PvW^oZ zJyy4W+L)qwq;08&cW4RIDdFy45Yq+Q1k!&d+Z(dvF?fKM7kLb}a2Z_Z1@99TAIqej ztWNMdk0Dal%<2MSW^=uwV@!B$uiuSpRVte3!l$0ZNV0`Oi4Rjl3-!jdT1K#vlIFy6 z1vR5@e-Ht&}q2h+usj}kN!vTpZw`;0EC-NEuWW{N5AF9J#k?1sI0%v z{P*QWl&tpe>y&yX{ZHJOR{Na){1Mu}{ww|dVcCSb3-tR(NBh(*h~Ga_a{te#uKV%Z zZT0Ip8CqD>gGJj9)qO0UFuP6N-Sc!@O^1uq7cwer<=n^dzhAY{n7a4YngoWdE}B^xkLAVuF9ZM5WM5yHDhF#71}ZnEDZC1lYBttXDmwUucpFYs zp#wiBcM880-hrZx9BTwVMf&n}r>(Wdiyicbh7Zwm0h-VHRh+YMoLc!mpnN3w<5 z5=rX3H;PyFtQlK>SzBY16!0fJ%vTHeHxu>S9$Oh!*LPVD#mn8=Tz?1k3VJLhGgADE z+3u1Im3-K)_C&`vM*EYeWn^d!y+?Hg%L#Ix{Zxs|4@XkDf9uY-cAu4o^E;KzD z@oaqKMvnjj`~-9hH*eynawj)eFiPMN(!?Tv0C&B3#lqje(Do0l77dp5xsem*r7DnI zN?a;_Qd0;~w7w1!8ZombKcZbPHI9&mAw^@tyjD^!#9YW=P5n!2{AxOdJXq9Je}LlN zrfw;Is&favF^|WJ0{YXQj70-sl_P$j%MPGqFAn43<2Vi(u@&r0V+u-2*2rp<4HQBK z)289TD8?8V?OqjjXq3CP3h#k4Ql=pX7jGudre-@c#FQ)(u` zQgx!7<6S7Q)7KUYaVEoX$zW)}zkU9^FE>=Q&b4OkK76>EGF9j>avIK|feOsWCnF!` z?h3SHUG5iYzV1d`X_7OiR&ZQ0(QV@GpbgF1Ajr77ywXBor~2FnauaH!I|z8v)6&sE)B`JGlOs@t?S8(ObYIZ&i>e zE)!XOpO5MRy{uZcZ?Eh1_`=G0EKoXnHhH>+Ko%NitvMDiT)b$0=UAVexw*Lr&z7+w zLf$PN3OgJxH|_O>a*K)E+kDPh97X3a9Yh`Ydc=q-QDLCMoI3SqfuAvSgx0+*O}_5P zM2Ntp!oZ-l#L~(NGu0E^4o)aan+gH(6i&Alr82{av+fp>>wO-nBv5%Ra8Kf+FRXDv1mL4{;7|o zGjIl<9wysc`0qXW3=Z+2l@$+84?VR{U6b1*y-%8L~5(vFo1SR6->PqM?CJ(WX;0kD^0o)Zc6$_4B5UHP|si zGHHL#;lZ&KUV(`m!KU1BI({?J>yx;EQ*t8{V2V*A7mr?NZk`QVjSe_k)av9CZcc&sQe$gh6Tko;9^`KJ3;k8 zkV^YYTg}I#=0`pan%?CyvQbplkMtJ#!9Y6&ahn#Ri;(C9d>&Q=lX#a)-zFNPaPo*R zGk;3-p|94>>wfL#WtZ4lO>a|HchA-1`Oe(cFBEJ|G_Y@Dy><=AOXc%vmdxe^L@ulI zpn$b}3H7Wb2z0Q|yu*sG8G5Wf9nflqM3wu9@5urnAl62Nr~1>{!FS0lNk)9jE<5hw zj5Y|_%aug6YvvvKqd)bJKnjo*lZDYqxqIi1;Ydav|5RzU_cn5AES5dA%_Fh|+*&SS z?E?<@&4tK7j;TJ#k!vH}wQt`AZjCfbtKajaKTWze%C*&5ac&4Sb-Dtbt^_ zVIE%%xqQzQfTO0(n?G7r3QWNZj8pChro~I1El?cgp5gUriISQ@!skd3*Oh+jt}vO6 zO=&Nh^FelVG^Ovay&XAnWZ}^M{m*7LA=KDLnj5X87rc5qu^{xQ>GRzS7ZvGO6&7S*&HSsQNs&AV^Ufw1r7zS*Q<{oKfZV|=Y3}N z=84B~uo;h!0-*>B8LBEyCX~zl>DiV#FIqzj9so|%oPfY!>40NT=}Ln?aN$MwjKYkf z5Mn&^9f>m3kd9eK&~lm)_L?_G^oo=zoK_Fy0cdMHIUv9SN>$S@r+xf(?_PX=H&C9c`SqE26D9V8LVM}m;^#y;+$wAtZlkv0&9aT_j zq|VvEfq)G=Aom%LG1yWV~i9@(1B^`Cvd`(Z1mb(7BQwr;Tu3facL1j@#$=B z4Z$76mGRTT^h#fv+L?Va9@J9IrL;wlN7NA$Tow1_kxZK3EKGA!^%%7Vn zDTVsqaV}w#2^>#H$8Z!Ans_*~qd3Fa?~8VdUOO^v+1o14E|Qm1U$L$1T(LFlEV~!i zhcOIdjiBH^)< zcTI3P69x|?DDmjuzZ6Xc=Hcr&-?zD6BCF+tFcIV?(JyuxiaICkq%&B9X*2zk%E%C51S#{MIfF-f{2OW2Lu>fl!wxex8^dmUDh-0w4kn zH;&@e{PD5Y?yPID3^AUCLrQn<_6BQHQkaddl^-MeE}95i$;{O!AUk&NEv zlrTDEyA>>GT;*0VkIu)6Vq832e#IgK*jxDJ3>-29IzbYV0Oq)O)u>sW+qTvAdd$dj za$fH1!uJCwK;RJ6xo{IwW8M^7*_#-=4jAx-l_D&(`M+l0-xkw*=EQBHfg%A)UCFCg zUs83WaAyTg4IJs}#BC>WVlGz|AJ7BF>0G0Al5yigj?T8*SL>HU;t8cmgsp?@Easg* zLMuw2M^EnqmY|?)CJ|Y*OOXxGtZ2^^fS|kuqhKD95T@O}-9p0n7*}{0ATi+a3+N6z zJAa3A)!@%R1+RDRn+n_t1@ltFi!X~TvQhI+h!+Jtms3G3Og6RXv%=0{rCuWCfGy(x z@Tqol!6bbpsTo)vJk#VyRmG}j8)JKS|YolUddUh3hxK4wY zSh7iwlvNXq=rz42_Bh14VA0R2?6q^$TlH%uPq+PXvXa`N;4%Jq-)5Kmk<+?W3@|uz zmC0FzW9#Edx1Nee!rW}du{LI_Ngc$ZBV2O~gh-^IT`H|-mfb7bsR2e75SZv&3J+ZJ zVHJ(R;7}k2^nDJMLyi0L1)%E7w{Nw%ILtN@($M>xVxe-XNx<%Kr(^y%W2TaL|BGL= zWlWF~M8*soZ^>tJ+_a9C>M9dRI5{5Ag2BUec$|~dYLFN=!b5HC+xue^(w0oeFoiH! z;}Ec!eku=6m9?bB(kAJs&^^$b4(~w;wR6jsUD6Vuz~pnB=3=s)%msk`0ym}NMGn%)@94_Uoaw8* zh%|u|pL3Lyq2N19>f+yv<5cw?2gYUq&hMMyB92NpIjIT4)Iellc)L-qSHBruqwN$9 zUVzn!kzFcGZELKLa@WLST!tM%+1IbQ!AZz9*yDUy#eEa@3+3ia>Y~=h?fWuj z*TKLwKvh_sIOabA=A`6$%{V!^tzch);d6HZhtu!fBZp<{q&_NgqUsd%^Z>IfI}&q} zF*h5wYxK2-w-f$FYs4p=eywa_Xi18vsTiHIL$C~8&oH1RsP$`>UCrdqL5K$~lG+Rh zGZv4;zD=_&6{y+Kt|fU%`hENMo%QH3rBo6k8YwC*XEF{qi~ald0bClb0c6oDXtxCzA;(~(iqishp z!FsLVCGao1g$rpm!_lI7=(e!t%aNK}%a_tY=LJyF#(^uzyVkU{Vs2lq*BYGq`1zfj z92d?BX|tG7xzC=(8ZEbjT*j|J5|C`cx)gL(ZO}F918UwE7#E@=0hSv1sP|}*v=j$> ztI~tpWt3qQ#I<&GLzeYyW1;aD)vmKzIE8?s6ojvauSKNZFd>u@1P#UwF8>;1Ip8dW z@Six(%%zuhwax!G5u_nm`7jD6stz<$vyIkzb%D)q2|*=IC>bW;{n0zpyE!HZry zkNM42zM)EhJLDO=`PfW!aWsC2*saJyIlKdvW1~PF%vp9csjf=B$>5_Mn6{{NJpN!2 zR3Pd}W^U0qFOc@EL$_Nut}l`eiyXi?DiGGo|adF1^0rc}Ktz zTp8(mTAn1=$9gbdX2q4^qZ=UKYSz4Yz)LcOR@SR}c`YKcQN5G}n%;ayeJc!1)sE<| zJ$=6)r!P3S&(^IJ^HlDRLk#ELPD{gz%2$nGaA|h?S1pS&!~ABnT#E^F$8KED6EjV1 z&BB*chFHYgMb&z=nFU>I^uG?4Z>x`!F~28pN=KlF+OXzDOE4*V_S(3QEKGi#-RMbF z(>(@e{ouSxb{^cf@0M9Pbp=_5`rbX#F!)}UK_?43Thd%`aGVj_W`%3N_tWJ2_iNuw zscxFl$s}`HEEx}1kzf6*Q6ux9({B{pM9ZQco7D*m^+|8ktlh1wf4xUZa!@rwNZj=j z2Ezb7z0>#CfdN9tn7e#A^N6AX^seEx7oQ(a&u6-wn?#wwlhJ$neQXrlw`dXc>}X7v zg$oyI1NKdvJ}=XuoC?@W6$_Mx4~3#6%OO^+^{KmA3zjbJ{q10{=G6D{dC%geA2~Pz z5&#_;$R)-%_s#ui%QzVh(t-do@ZL_(9L0$n)-;JcfqmBd)e@E)Rbs$P@>pVAc<657 zgt9*D+03*3^%?Zq;obK;Fq)j+Wl#MkbM(G=-zuZG*Dj-rFfCNuwhePn?earDwtra5 zb~trD%Go?f!D4b-yW~PW}kc4L&c$cm?PR`BtXxyg$sM8=*hTiD`!TteHY zLu;EEBIo2{u_*)k&pZ^<dtuua<~}|-xW8#cmgDqT#qgCiO|yS) zO}%?pJ~|LC^~KSRa+;{0TG#v4db`V0bVfh((1PMF>=@?C4ahj# z(vG@$!l6BB%T$)m?di4X$=MJx;>-mf!4jZ@rCF?4MHWY=?!x^c89aAT0f! zJ#P}%iMJ{1mCFkl(hW|^8INq05wheF4#2_`v)u5s)Kr@D*+8V2^?0dLoqix=35n1- zo1B=5eyH%oJubjS-H)~Bo=!!July1umWTZ;69x`|?4AFl{|AdkcY;eF{@mQviwJUs z@+0^=@nn-u>Z7Sgj+jf{Y?{%y?Fv`zoreq)o+MU<=owOU2emfDsfSoEz77rNX;+Y<~99Nt+Uzjtu|d*FYju zS4M|y-V|O$ZY8`8gq`@J_@$#J_%cI+6)AtYAuA46oTkfSk=`07%(q(GqjI3>owT&4 ze1EcbzEj#BOYWsdWzosAcTB{1KX_vT8zN^0YeWj%bm%&`hr%@#`=#zUwS`C{DQ8Hh zavIR5WKN0j%qf1#5!|Nlzk8$+S#;#Ji^Wt35=UJxXI{+h%=Ub%k{cNm7MMo4oTvm| z-SBtX*;#D||I-nSZ#oCc+9il)I39URAJDSA9kaV0-F{;s|*>JM%8j;Q*blP9D&0{qj# z;+)<5`5lbHM?*rBZ-!WX-b>I*gsx_VR%WypNaGgvp| z64{5>v3CRvXm5JD3xx%dcAB9f5(}Xh;T|+g_F}LWi5&%;J6JONOCOn-u5KC_eo9IR z@e9P6;j5QCYLZ6?vR0$?E6)o$MN4*eUH^>h8)i5@A!ere-k+&w_RAbomIdehFbn%Q z#h~qc*QgOdfByL}+qWLZw8{0%S5LP?YsqFR!5qLSgf?+gYBR+(?9;TaZF)Iva*zqW zfhG=jc(=Paa8avGKDnL)Rc!+EIC?69<8u{LplEiV))7o}RZ6Bk3 ztDL7#(^k5;T>Nk)$Jf#54S5h{#h$qep_<%9C!QffPZOS7QNEHUA`36FsY(suE=l{f28RL_wSE{e-Zo0 zcEH^J|NbF`{z(s=#sp62T`V5uy4ELyxg zKK*&Tz7LJzP*eOv*G`V~qEDi~rnrpSzKI4oOg~&W3B5CDs|_BUef8?x`$xyx1qwPo zB%?iZOL`pj-eWY`sD#kz_^&-!6{V!%5aTIR?m!x@Rbbx#@MmZw1q_O-T`!@El`M37 zmktw5g@krnJJTguj--VJ8!mA*-rWj*tnY>qt@fF7=N3bZ`fx@5kYbPQvqS6F6vbyj zdqA)1XY8{F&|KQyE_e|F_=xxh6!t z_DWy2%yW6yTN`8s4ymeyEig&>0b;*w-PQvWr9?$uTC zvQ^rtPT#gq4wI21U}0^p{FxF6KDqkvI~XlId`MYwHu1~t)KsR{1iYk>^#A7ANR0S+ z{klg=JKxvP(S|pwp;8=v4`LD3WrP$w9K6tQ(kCaE`re5VMh#>`iW?l?FzV{u7 zgx#!rI6~||3A1`T)-1bsoO3LX#G7IIJkYtG-~N){Yz+*7p%X5yoFi^0frm6262vFW z)Ei>l+sCOe8!U45_Dpv_rUVo&yFzSh=X)Y__r;Hgqp@n5a&T~OC=$y3z>R^t|CEQ2{a+ zjsjVDc0njca-b$6y_@dWy?gh` zBj+y83eO_@IJODm_$tm=DU(Zj#|`cMRw(E`d9*m^eN*D!8B zi**%hi3+{lYb`Xqavu-sJo3^?(Oz^uc{Vmy9x#+mpSg6%5e^S~8jf&ju}uld6znt} z&o#%_k!YO5nspjsxy(hU{7?UPhH1-?Pl@44FS(KG*6kT#dJ#l@!^}2DG;zb}XGu@N z2cbwg^Y3B|F7XT6VNfvfJZul7LJ#^mf|ixqU;(2Olk}3Dy_Y;4&DHIVc06I%s|^iT z5eBfa&(hP^O=W|z3H4HhvB2YR7Ln%6>=pOyuo#@?NO3gdjDz{{2lS5x$baz-Mm&hC z%(1^T3>gk!!Kvfp-o$PAfCXFqG{_&CHcP;O`&9WV2(bf9Xm|?!k<^r5J`dA_8)ZlUbpW zqPMs0=e+}z{ztI-9fs>ZDbI?>T(OYlAvnJIYn!f4x4QT{*%HO`kq2dBJNTNMQX00`I`S}?1ThYr{kVAq`%d&d z86Zx7<_bf)@7|4kpEec6kzNy81CJp&Ls*EA=}?s6td9jmUuqK&?2hUE0wR_R>4Ix- zpyR@TS%d8}#w#O(6N*BRcI5ILtZ+7A#$aFsrmmua%XBs8ih5uW*(F!U?bLIS9hM5o@nC;XZTkj(L1$?B)b z9`kJkYe38xR?7#}VF&Ryrq@zao9W0Yj}D=lq?q64u7X4FF(>b3(>o4qCJ8T-AL&Ad zy*~Zr+kLe4j1KM8Ov%2A&-QGTKsYok=Gs#iP@l?VnP@{qnBWs1*uP)U(s|2A{RDJa zFcget#z%QIREwWTWeq;l?t+rwiUN~256dd%;|N_nfr*93r%#zyNYRQrPOapI$Wx9$ z4k+~pxh0KJYBjv=DQ_WSxl>bxgh0vDrFFf=*s}O%>ht~n&uICCs4TEwE_%~I)g(xC zD4BjIl0~6hM-VlBmEdgC5~TEJPwv>dbscfWFa)IH(kQx2MAgajXfY2*)b4X9d^_X^ z?HbR=)X_Aqs8wFRe96M=CC_SA)s)pPbT2*_kEpAve!_b=OcqXG3aB1Op_4)%hVA;2 zB}-PHT70=A#Zq4`2_lp4LyA~SO@cnf;s?qkP7^Un<8m0jqb$ zPdhL$YQl`*Md{I8oDp5KSCEUM<#~ z-y%-t0(85#)Gb3A#OWw3`Vk=t?hP_ zC7nh8RC4aX(3NvJr?J-P+_B@#71N4zs|0?p=Hr+O6a3vfn==jK#wT0w*18wY-DYPd zu`w5S-nxA|Be24)BlU(llkDJ5sO3C^LENw=McQM`3sA@jr1^RCsg!M0?}pJ(^^U)n z^~d>+nLripg<^ObO{}7d62G_c9oIQKyJ5IeX>8+9SmC+t>CUXR2apkE?8YhM>lTT6dAv+DT1raY`eb7dx1}dt!LD3gO z(m?Bk%c!HLH!VztU0xQUA2fuozZSoUm=(P{Lpfu>6k{DkHp#AM`#Z$uz^~yg0ZRCN zq1qvVTy4zo>(5N4g>1mc(ICQJ%GcaT85+rWT9H7{3=Qkix9^$z{Ya*SJOrH|tBGam zci;d~_95sLnStp9=FO~9IM*Up7=hIxA-Qpjn{RuzZpt5*-{#fIgF zk4hETmfu-H!H*2d3n{qACKta;TzB-NzAk-b-|&L0#RsCS)CNp zOq7EAX5rycV_~~4WJCUjIYsA8Z*y5-+pOVY(Uf8WhT)+HoFak@#5H3;^0TbRiuE~W zdcFNX<j$+9#T<+uvSak-jRmIa1qdj*33V zKY5dS_H(LJM11dDuWKp1O0P>91N;2}0Sn&?3##x=P_r8nEr9}(w%821-G;w7;GsUOezjX3k#*g9%{j~7D-;u#w}sIpiAt`- zF&J4*Mae8U?Jc+M+!?TeQm2;UF1}d7JcNG>-wQ#d;_+!T>+}l_(u#o6PQOlJQ`Ouh zz5Y2$GChbrFzQ-KaCpA+n@s;7ZG%n#sgmKpfb_q}>SOD#Gp{4lqq)3$0~vl>!L}C< zwfd@n{~n{c=g^_rnTi57!N*(NTJadO{=YBMmGhMq8%cixKp6cualq%{W?)oPwDZNe0eEeItQ1^4QMs-*J_wziH>yfzMKl}eG z_s}^o@%XQ$!^@1(uKzqbGDenQiu_Oe_KV+-|Md=@8GZlrt)AO`Ce;)|)JXG$3m2#m zz$xTpN<`C4u3lm?zYEP{ie>rFK1C2-!!bmY+k6l+F2+i+7ohixT&@2?b}h#(tl zFm;p;$b0PD=Jot7Kk@&i2$8UwDRO>BG4??9B|JPzTOLE4q`aoOm@vw~?K*RpDD6xbwx*Cc0er;Q3KC#cp?Lwgs~c!XtNWU*1ck905k)t&1OY^9ksCtZT6wN+--rELIo_Ng?hNP=Mjp0^ z0-PD+abqe=`sjApf^c!5P51Mi*v*T$C2X;P2B2|oKUBw3H{4mj60AaH;Zc{WUFeud zwovj7amc(?dnx@lO*~#f*cXf%u})M;o7C8ldl;(RA?8Oe&@*BrI0$ zw9r!H1#};@I)W?uWXBUD*mGL z+!ikq3%#T@j*jg>&5_(MwX-ujW}tX)ll>2vhb|t8%Cvu00ge8=2_@|ttwv4jxztAj zaCxAKnAe^WOVzIs@jkxfQPZ+_KMrH)|Nd`V{@>q{_mA*&Ehkz0>+ipDkhdob7T0vp zpxzG(6>l3_Re!Yc_t`=dwCxL0?yDSpVG zq3x#l)(O3T{R|m*J&O7JXTOwczkf7l)_-Dj-H-pPrmOqW{nr1#qiR%5gMYkycE6ii S@I$ Date: Tue, 25 Nov 2025 05:02:21 +0800 Subject: [PATCH 08/43] [XPU]fix Kimi-VL-A3B-thinking on xpu (#29309) Signed-off-by: Yan Ma --- vllm/model_executor/models/moonvit.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/vllm/model_executor/models/moonvit.py b/vllm/model_executor/models/moonvit.py index 2e3e6dc166ad8..63ea6b259a71d 100644 --- a/vllm/model_executor/models/moonvit.py +++ b/vllm/model_executor/models/moonvit.py @@ -56,10 +56,13 @@ from transformers.utils import is_flash_attn_2_available from vllm.model_executor.layers.conv import Conv2dLayer from vllm.model_executor.layers.linear import ReplicatedLinear from vllm.model_executor.models.utils import maybe_prefix +from vllm.platforms import current_platform from vllm.transformers_utils.configs.moonvit import MoonViTConfig if is_flash_attn_2_available(): from flash_attn import flash_attn_varlen_func +elif current_platform.is_xpu(): + from vllm.attention.utils.fa_utils import flash_attn_varlen_func else: flash_attn_varlen_func = None @@ -106,10 +109,10 @@ def multihead_attention( q, k, v, - q_cu_seqlens, - k_cu_seqlens, - max_seqlen_q, - max_seqlen_k, + cu_seqlens_q=q_cu_seqlens, + cu_seqlens_k=k_cu_seqlens, + max_seqlen_q=max_seqlen_q, + max_seqlen_k=max_seqlen_k, causal=False, ) attn_out = attn_out.flatten(start_dim=-2) @@ -291,7 +294,12 @@ class Rope2DPosEmb(nn.Module): """ def __init__( - self, dim: int, max_height: int, max_width: int, theta_base=10000, device="cuda" + self, + dim: int, + max_height: int, + max_width: int, + theta_base=10000, + device=current_platform.device_type, ): super().__init__() self.dim = dim @@ -437,7 +445,7 @@ class MoonVitEncoderLayer(nn.Module): self.hidden_size_per_attention_head = self.hidden_dim // self.num_heads self.attn_implementation = attn_implementation # use fa2 in vllm by default - if is_flash_attn_2_available(): + if is_flash_attn_2_available() or current_platform.is_xpu(): self.attn_implementation = "flash_attention_2" self.norm0 = nn.LayerNorm(hidden_dim) From f32c7d6f5455de2684686c7238f9c7ecca6b58b7 Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Mon, 24 Nov 2025 13:54:59 -0800 Subject: [PATCH 09/43] [Model Runner V2] Simplify Eagle bookkeeping with num_rejected (#29347) Signed-off-by: Woosuk Kwon --- vllm/v1/worker/gpu/input_batch.py | 19 ++++-------- vllm/v1/worker/gpu/model_runner.py | 30 ++++++++++++++----- vllm/v1/worker/gpu/spec_decode/eagle.py | 19 ++++++------ .../gpu/spec_decode/rejection_sample.py | 12 ++++++++ 4 files changed, 50 insertions(+), 30 deletions(-) diff --git a/vllm/v1/worker/gpu/input_batch.py b/vllm/v1/worker/gpu/input_batch.py index 3ac43ea4952de..2a7048ae3c0e0 100644 --- a/vllm/v1/worker/gpu/input_batch.py +++ b/vllm/v1/worker/gpu/input_batch.py @@ -344,8 +344,8 @@ def _post_update_kernel( sampled_tokens_ptr, sampled_tokens_stride, num_sampled_ptr, + num_rejected_ptr, query_start_loc_ptr, - cu_num_logits_ptr, ): req_id = tl.program_id(0) req_state_idx = tl.load(idx_mapping_ptr + req_id) @@ -360,17 +360,10 @@ def _post_update_kernel( query_start = tl.load(query_start_loc_ptr + req_id) query_end = tl.load(query_start_loc_ptr + req_id + 1) query_len = query_end - query_start + num_rejected = tl.load(num_rejected_ptr + req_id) num_computed = tl.load(num_computed_tokens_ptr + req_state_idx) - num_computed += query_len - # Consider the rejected tokens in spec decoding. - if num_sampled > 0: - # NOTE(woosuk): We must skip num_sampled == 0 to account for chunked prefills. - logits_start = tl.load(cu_num_logits_ptr + req_id) - logits_end = tl.load(cu_num_logits_ptr + req_id + 1) - num_logits = logits_end - logits_start - num_rejected = num_logits - num_sampled - num_computed -= num_rejected + num_computed += query_len - num_rejected tl.store(num_computed_tokens_ptr + req_state_idx, num_computed) @@ -385,10 +378,10 @@ def post_update( sampled_tokens: torch.Tensor, # [num_reqs] num_sampled: torch.Tensor, + # [num_reqs] + num_rejected: torch.Tensor, # [num_reqs + 1] query_start_loc: torch.Tensor, - # [num_reqs + 1] - cu_num_logits: torch.Tensor, ) -> None: num_reqs = idx_mapping.shape[0] _post_update_kernel[(num_reqs,)]( @@ -398,7 +391,7 @@ def post_update( sampled_tokens, sampled_tokens.stride(0), num_sampled, + num_rejected, query_start_loc, - cu_num_logits, num_warps=1, ) diff --git a/vllm/v1/worker/gpu/model_runner.py b/vllm/v1/worker/gpu/model_runner.py index e0ed183d3c5b0..e34a45f979807 100644 --- a/vllm/v1/worker/gpu/model_runner.py +++ b/vllm/v1/worker/gpu/model_runner.py @@ -46,7 +46,10 @@ from vllm.v1.worker.gpu.input_batch import ( ) from vllm.v1.worker.gpu.sampler import Sampler, compute_prompt_logprobs from vllm.v1.worker.gpu.spec_decode import init_speculator -from vllm.v1.worker.gpu.spec_decode.rejection_sample import rejection_sample +from vllm.v1.worker.gpu.spec_decode.rejection_sample import ( + get_num_rejected, + rejection_sample, +) from vllm.v1.worker.gpu.states import RequestState, SamplingMetadata from vllm.v1.worker.gpu.structured_outputs import apply_grammar_bitmask from vllm.v1.worker.kv_connector_model_runner_mixin import KVConnectorModelRunnerMixin @@ -311,12 +314,14 @@ class GPUModelRunner(LoRAModelRunnerMixin, KVConnectorModelRunnerMixin): device=self.device, ) num_sampled = torch.ones(num_reqs, dtype=torch.int32, device=self.device) + num_rejected = torch.zeros(num_reqs, dtype=torch.int32, device=self.device) self.propose_draft( input_batch=input_batch, sampling_metadata=sampling_metadata, last_hidden_states=hidden_states, aux_hidden_states=aux_hidden_states, num_sampled=num_sampled, + num_rejected=num_rejected, ) @torch.inference_mode() @@ -606,7 +611,7 @@ class GPUModelRunner(LoRAModelRunnerMixin, KVConnectorModelRunnerMixin): input_batch: InputBatch, sampling_metadata: SamplingMetadata, grammar_output: GrammarOutput | None, - ) -> tuple[SamplerOutput, torch.Tensor]: + ) -> tuple[SamplerOutput, torch.Tensor, torch.Tensor]: sample_hidden_states = hidden_states[input_batch.logits_indices] logits = self.model.compute_logits(sample_hidden_states) if grammar_output is not None: @@ -632,6 +637,7 @@ class GPUModelRunner(LoRAModelRunnerMixin, KVConnectorModelRunnerMixin): # No draft tokens (common case). # 0 if chunked-prefilling, 1 if not. num_sampled = (~is_chunked_prefilling).int() + num_rejected = torch.zeros_like(num_sampled) else: # Draft tokens for spec decoding. input_ids = input_batch.input_ids[input_batch.logits_indices] @@ -642,9 +648,13 @@ class GPUModelRunner(LoRAModelRunnerMixin, KVConnectorModelRunnerMixin): self.num_speculative_steps, ) num_sampled *= ~is_chunked_prefilling + num_rejected = get_num_rejected( + input_batch.cu_num_logits, + num_sampled, + ) sampler_output.sampled_token_ids = sampled_tokens # TODO(woosuk): Support logprobs with spec decoding. - return sampler_output, num_sampled + return sampler_output, num_sampled, num_rejected def compute_prompt_logprobs( self, @@ -750,6 +760,7 @@ class GPUModelRunner(LoRAModelRunnerMixin, KVConnectorModelRunnerMixin): input_batch: InputBatch, sampled_tokens: torch.Tensor, num_sampled: torch.Tensor, + num_rejected: torch.Tensor, ) -> None: # Update the number of computed tokens. post_update( @@ -758,8 +769,8 @@ class GPUModelRunner(LoRAModelRunnerMixin, KVConnectorModelRunnerMixin): self.req_states.last_sampled_tokens, sampled_tokens, num_sampled, + num_rejected, input_batch.query_start_loc, - input_batch.cu_num_logits, ) # Update the number of computed prefill tokens. @@ -779,6 +790,7 @@ class GPUModelRunner(LoRAModelRunnerMixin, KVConnectorModelRunnerMixin): last_hidden_states: torch.Tensor, aux_hidden_states: list[torch.Tensor] | None, num_sampled: torch.Tensor, + num_rejected: torch.Tensor, ) -> torch.Tensor: num_reqs = input_batch.num_reqs idx_mapping_np = input_batch.idx_mapping_np @@ -800,6 +812,7 @@ class GPUModelRunner(LoRAModelRunnerMixin, KVConnectorModelRunnerMixin): last_hidden_states, aux_hidden_states, num_sampled, + num_rejected, self.req_states.last_sampled_tokens, next_prefill_tokens, ) @@ -958,7 +971,7 @@ class GPUModelRunner(LoRAModelRunnerMixin, KVConnectorModelRunnerMixin): self.execute_model_state = None # type: ignore assert sampling_metadata is not None - sampler_output, num_sampled_tokens = self.sample( + sampler_output, num_sampled, num_rejected = self.sample( hidden_states, input_batch, sampling_metadata, grammar_output ) prompt_logprobs_dict = self.compute_prompt_logprobs(hidden_states, input_batch) @@ -979,7 +992,7 @@ class GPUModelRunner(LoRAModelRunnerMixin, KVConnectorModelRunnerMixin): async_output = AsyncOutput( model_runner_output=model_runner_output, sampler_output=sampler_output, - num_sampled_tokens=num_sampled_tokens, + num_sampled_tokens=num_sampled, copy_stream=self.output_copy_stream, copy_event=self.output_copy_event, ) @@ -990,7 +1003,7 @@ class GPUModelRunner(LoRAModelRunnerMixin, KVConnectorModelRunnerMixin): # This sequencing may slightly reduce latency as async D2H copy does not # need to wait for the postprocess to finish. self.postprocess( - input_batch, sampler_output.sampled_token_ids, num_sampled_tokens + input_batch, sampler_output.sampled_token_ids, num_sampled, num_rejected ) if self.do_spec_decode: _ = self.propose_draft( @@ -998,7 +1011,8 @@ class GPUModelRunner(LoRAModelRunnerMixin, KVConnectorModelRunnerMixin): sampling_metadata, hidden_states, None, # aux_hidden_states - num_sampled_tokens, + num_sampled, + num_rejected, ) if self.use_async_scheduling: diff --git a/vllm/v1/worker/gpu/spec_decode/eagle.py b/vllm/v1/worker/gpu/spec_decode/eagle.py index 59d0f313d96a2..3c8621cc69c97 100644 --- a/vllm/v1/worker/gpu/spec_decode/eagle.py +++ b/vllm/v1/worker/gpu/spec_decode/eagle.py @@ -60,6 +60,8 @@ class EagleSpeculator: aux_hidden_states: list[torch.Tensor] | None, # [num_reqs] num_sampled: torch.Tensor, + # [num_reqs] + num_rejected: torch.Tensor, # [max_num_reqs, 1] last_sampled: torch.Tensor, # [num_reqs] @@ -84,6 +86,7 @@ class EagleSpeculator: self.input_ids, input_batch, num_sampled, + num_rejected, last_sampled, next_prefill_tokens, ) @@ -139,8 +142,8 @@ def _prepare_eagle_inputs_kernel( last_sampled_ptr, next_prefill_tokens_ptr, num_sampled_ptr, + num_rejected_ptr, query_start_loc_ptr, - cu_num_logits_ptr, BLOCK_SIZE: tl.constexpr, ): batch_idx = tl.program_id(0) @@ -149,17 +152,13 @@ def _prepare_eagle_inputs_kernel( query_len = query_end - query_start # Get the true query length and next token after accounting for rejected tokens. + num_rejected = tl.load(num_rejected_ptr + batch_idx) + query_len -= num_rejected + num_sampled = tl.load(num_sampled_ptr + batch_idx) if num_sampled > 0: req_state_idx = tl.load(idx_mapping_ptr + batch_idx) next_token = tl.load(last_sampled_ptr + req_state_idx).to(tl.int32) - - logits_start = tl.load(cu_num_logits_ptr + batch_idx) - logits_end = tl.load(cu_num_logits_ptr + batch_idx + 1) - num_logits = logits_end - logits_start - - num_rejected = num_logits - num_sampled - query_len -= num_rejected else: # Chunked prefilling. # Get the next prefill token. @@ -182,6 +181,8 @@ def prepare_eagle_inputs( input_batch: InputBatch, # [num_reqs] num_sampled: torch.Tensor, + # [num_reqs] + num_rejected: torch.Tensor, # [max_num_reqs, 1] last_sampled: torch.Tensor, # [max_num_reqs] @@ -201,8 +202,8 @@ def prepare_eagle_inputs( last_sampled, next_prefill_tokens, num_sampled, + num_rejected, input_batch.query_start_loc, - input_batch.cu_num_logits, BLOCK_SIZE=1024, ) return last_token_indices diff --git a/vllm/v1/worker/gpu/spec_decode/rejection_sample.py b/vllm/v1/worker/gpu/spec_decode/rejection_sample.py index 8a7bf28bacbd4..43c6ac518bccc 100644 --- a/vllm/v1/worker/gpu/spec_decode/rejection_sample.py +++ b/vllm/v1/worker/gpu/spec_decode/rejection_sample.py @@ -69,3 +69,15 @@ def rejection_sample( num_warps=1, ) return sampled, num_sampled + + +@torch.compile(dynamic=True) +def get_num_rejected( + cu_num_logits: torch.Tensor, + num_sampled: torch.Tensor, +) -> torch.Tensor: + num_logits = cu_num_logits[1:] - cu_num_logits[:-1] + num_rejected = num_logits - num_sampled + # No token is rejected for chunked prefills. + num_rejected *= num_sampled > 0 + return num_rejected From 84371daf75507c849a38a9a44b2fb2af89e96dd3 Mon Sep 17 00:00:00 2001 From: Nick Hill Date: Mon, 24 Nov 2025 14:04:31 -0800 Subject: [PATCH 10/43] [Tests] Verify gpt_oss package is installed in harmony tests (#29336) Signed-off-by: Nick Hill --- tests/entrypoints/openai/test_response_api_with_harmony.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/entrypoints/openai/test_response_api_with_harmony.py b/tests/entrypoints/openai/test_response_api_with_harmony.py index 6251e1776c30a..8fd3545eccffa 100644 --- a/tests/entrypoints/openai/test_response_api_with_harmony.py +++ b/tests/entrypoints/openai/test_response_api_with_harmony.py @@ -1,6 +1,6 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project - +import importlib import json import time @@ -35,6 +35,10 @@ GET_WEATHER_SCHEMA = { @pytest.fixture(scope="module") def server(): + assert importlib.util.find_spec("gpt_oss") is not None, ( + "Harmony tests require gpt_oss package to be installed" + ) + args = ["--enforce-eager", "--tool-server", "demo", "--max_model_len", "5000"] env_dict = dict( VLLM_ENABLE_RESPONSES_API_STORE="1", From 4dd42db566097cc2cacb2dddff3a8f3b0c007be0 Mon Sep 17 00:00:00 2001 From: Tyler Michael Smith Date: Mon, 24 Nov 2025 17:16:05 -0500 Subject: [PATCH 11/43] Remove VLLM_SKIP_WARMUP tip (#29331) Signed-off-by: Tyler Michael Smith --- docs/features/quantization/inc.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/docs/features/quantization/inc.md b/docs/features/quantization/inc.md index 5e86e9388f328..9875bc44c9144 100644 --- a/docs/features/quantization/inc.md +++ b/docs/features/quantization/inc.md @@ -22,9 +22,6 @@ export QUANT_CONFIG=/path/to/quant/config/inc/meta-llama-3.1-405b-instruct/maxab vllm serve meta-llama/Llama-3.1-405B-Instruct --quantization inc --kv-cache-dtype fp8_inc --tensor_paralel_size 8 ``` -!!! tip - If you are just prototyping or testing your model with FP8, you can use the `VLLM_SKIP_WARMUP=true` environment variable to disable the warmup stage, which can take a long time. However, we do not recommend disabling this feature in production environments as it causes a significant performance drop. - !!! tip When using FP8 models, you may experience timeouts caused by the long compilation time of FP8 operations. To mitigate this problem, you can use the below environment variables: `VLLM_ENGINE_ITERATION_TIMEOUT_S` - to adjust the vLLM server timeout. You can set the value in seconds, e.g., 600 equals 10 minutes. From 71df2a57effc15b5f67cdbf55f3d1e1b71f90e86 Mon Sep 17 00:00:00 2001 From: Chen Zhang Date: Mon, 24 Nov 2025 14:28:32 -0800 Subject: [PATCH 12/43] [Hybrid Allocator] Better layer padding strategy for gpt-oss eagle (#29303) Signed-off-by: Chen Zhang --- tests/v1/core/test_kv_cache_utils.py | 59 ++++++++++++++++++++++++++++ vllm/v1/core/kv_cache_utils.py | 11 +++++- 2 files changed, 69 insertions(+), 1 deletion(-) diff --git a/tests/v1/core/test_kv_cache_utils.py b/tests/v1/core/test_kv_cache_utils.py index 24611a4aaa1b8..12ed59b6e863b 100644 --- a/tests/v1/core/test_kv_cache_utils.py +++ b/tests/v1/core/test_kv_cache_utils.py @@ -1436,6 +1436,65 @@ def test_get_kv_cache_config_one_worker(): ], ) + # 6 full + 5 sliding, pad to 6 full + 6 sliding. This is a typical case for gpt-oss + # eagle where there is only one more full attention layer than sliding window layers + kv_cache_specs_hybrid = { + "layer_1": new_kv_cache_spec(), + "layer_2": new_kv_cache_spec(), + "layer_3": new_kv_cache_spec(), + "layer_4": new_kv_cache_spec(), + "layer_5": new_kv_cache_spec(), + "layer_6": new_kv_cache_spec(), + "layer_7": new_sliding_window_spec(), + "layer_8": new_sliding_window_spec(), + "layer_9": new_sliding_window_spec(), + "layer_10": new_sliding_window_spec(), + "layer_11": new_sliding_window_spec(), + } + + kv_cache_config_hybrid = get_kv_cache_configs( + vllm_config, [kv_cache_specs_hybrid], [mem_per_block_per_layer * 6 * 32] + )[0] + print(kv_cache_config_hybrid) + assert kv_cache_config_hybrid == KVCacheConfig( + num_blocks=32, + kv_cache_tensors=[ + KVCacheTensor( + size=mem_per_block_per_layer * 32, + shared_by=["layer_1", "layer_7"], + ), + KVCacheTensor( + size=mem_per_block_per_layer * 32, + shared_by=["layer_2", "layer_8"], + ), + KVCacheTensor( + size=mem_per_block_per_layer * 32, + shared_by=["layer_3", "layer_9"], + ), + KVCacheTensor( + size=mem_per_block_per_layer * 32, + shared_by=["layer_4", "layer_10"], + ), + KVCacheTensor( + size=mem_per_block_per_layer * 32, + shared_by=["layer_5", "layer_11"], + ), + KVCacheTensor( + size=mem_per_block_per_layer * 32, + shared_by=["layer_6"], + ), + ], + kv_cache_groups=[ + KVCacheGroupSpec( + ["layer_1", "layer_2", "layer_3", "layer_4", "layer_5", "layer_6"], + new_kv_cache_spec(), + ), + KVCacheGroupSpec( + ["layer_7", "layer_8", "layer_9", "layer_10", "layer_11"], + new_sliding_window_spec(), + ), + ], + ) # different hidden size kv_cache_specs_hybrid = { "layer_1": new_kv_cache_spec(head_size=128), diff --git a/vllm/v1/core/kv_cache_utils.py b/vllm/v1/core/kv_cache_utils.py index b18ba8e8b2c7b..a0033fa650baa 100644 --- a/vllm/v1/core/kv_cache_utils.py +++ b/vllm/v1/core/kv_cache_utils.py @@ -971,7 +971,16 @@ def _get_kv_cache_groups_uniform_page_size( # is the minimum number of layers among all attention types. Need a better # strategy if we want to support more complex patterns (e.g., 20 full + 30 # sw, where the group size should be 10). - group_size = min([len(layers) for layers in same_type_layers.values()]) + min_num_layers = min([len(layers) for layers in same_type_layers.values()]) + group_size = min_num_layers + max_num_layers = max([len(layers) for layers in same_type_layers.values()]) + if max_num_layers < min_num_layers * 1.25: + # If the number of layers is not much larger than the minimum number of layers, + # use the maximum number of layers as the group size to avoid too many padding + # layers. A typical example is gpt-oss-20b + eagle, with 12 sw + 13 full. We + # pad it to (13 sw, 13 full) instead of (12 sw, 24 full). 1.25 is just a + # magic number to avoid too many padding layers. + group_size = max_num_layers grouped_layers = [] for layers in same_type_layers.values(): num_padding_layers = group_size - len(layers) % group_size From c17610e2baf5e40b3b0638b272bfe7e04e471bfe Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Mon, 24 Nov 2025 18:22:46 -0500 Subject: [PATCH 13/43] [Bugfix] Only use triton_kernels for MXFP4 on SM90 and SM100 (#29339) Signed-off-by: mgoin --- vllm/model_executor/layers/quantization/mxfp4.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/vllm/model_executor/layers/quantization/mxfp4.py b/vllm/model_executor/layers/quantization/mxfp4.py index 255b5aad17853..198feb03be3e4 100644 --- a/vllm/model_executor/layers/quantization/mxfp4.py +++ b/vllm/model_executor/layers/quantization/mxfp4.py @@ -132,12 +132,15 @@ def get_mxfp4_backend(with_lora_support: bool) -> Mxfp4Backend: ) # If FlashInfer is not available, try either Marlin or Triton - if ( - envs.VLLM_MXFP4_USE_MARLIN - or current_platform.get_device_capability()[0] < 9 - or not has_triton_kernels() - or not is_torch_equal_or_newer("2.8.0") - ): + triton_kernels_supported = ( + has_triton_kernels() + and is_torch_equal_or_newer("2.8.0") + # NOTE: triton_kernels are only confirmed to work on SM90 and SM100 + # SM110 fails with this error: https://github.com/vllm-project/vllm/issues/29317 + # SM120 needs this fix: https://github.com/triton-lang/triton/pull/8498 + and (9, 0) <= current_platform.get_device_capability() < (11, 0) + ) + if envs.VLLM_MXFP4_USE_MARLIN or not triton_kernels_supported: logger.info_once("Using Marlin backend") return Mxfp4Backend.MARLIN else: From 699bca76c00b81ba6c7ead38fed01712f5f56aa1 Mon Sep 17 00:00:00 2001 From: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Date: Mon, 24 Nov 2025 19:49:01 -0500 Subject: [PATCH 14/43] [UX] Raise error for attn backend of batch invariant (#29348) Signed-off-by: yewentao256 --- vllm/model_executor/layers/batch_invariant.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/vllm/model_executor/layers/batch_invariant.py b/vllm/model_executor/layers/batch_invariant.py index 8b33727f05fbc..be7f673e5618f 100644 --- a/vllm/model_executor/layers/batch_invariant.py +++ b/vllm/model_executor/layers/batch_invariant.py @@ -812,19 +812,19 @@ def override_envs_for_invariance(): # "TRITON_MLA", ] if curr_attn_backend not in supported_backends: - warning = ( - "Forcibly updating attention backend to" - f" {supported_backends[0]} for batch_invariant. " - f" Supported backends: {supported_backends}." + error = ( + "VLLM batch_invariant mode requires an attention backend in " + f"{supported_backends}, but got '{curr_attn_backend}'. " + "Please set the 'VLLM_ATTENTION_BACKEND' environment variable " + "to one of the supported backends before enabling batch_invariant." ) - logger.warning_once(warning) - os.environ["VLLM_ATTENTION_BACKEND"] = supported_backends[0] + raise RuntimeError(error) if os.environ["VLLM_ATTENTION_BACKEND"] != supported_backends[0]: warning = ( "You are using a decode-invariant form of batch invariance. " "This will not be invariant between prefill and decode." ) - logger.warning_once(warning) + logger.warning_once(warning, scope="local") os.environ["VLLM_ALLREDUCE_USE_SYMM_MEM"] = "0" os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8" From 5f9679a43bf92fc0fc8610f0ba5cc9c857148ccf Mon Sep 17 00:00:00 2001 From: Hanjie Qiu <50634613+hjjq@users.noreply.github.com> Date: Mon, 24 Nov 2025 20:13:12 -0500 Subject: [PATCH 15/43] [Spec Decode] Add support for EAGLE3 heads that do not use_aux_hidden_states (#27688) Signed-off-by: hjjq Signed-off-by: Benjamin Chislett Co-authored-by: Benjamin Chislett --- vllm/model_executor/models/llama_eagle3.py | 38 ++++++++++++++-------- vllm/v1/spec_decode/eagle.py | 19 +++++++++++ vllm/v1/worker/gpu_model_runner.py | 4 ++- 3 files changed, 46 insertions(+), 15 deletions(-) diff --git a/vllm/model_executor/models/llama_eagle3.py b/vllm/model_executor/models/llama_eagle3.py index 3eaf2d80082f1..7a57644db1b13 100644 --- a/vllm/model_executor/models/llama_eagle3.py +++ b/vllm/model_executor/models/llama_eagle3.py @@ -142,6 +142,12 @@ class LlamaModel(nn.Module): # Get drafter's quantization config self.quant_config = get_draft_quant_config(vllm_config) + eagle_config = getattr(self.config, "eagle_config", None) + if eagle_config is not None and "use_aux_hidden_state" in eagle_config: + self.use_aux_hidden_state = eagle_config["use_aux_hidden_state"] + else: + self.use_aux_hidden_state = True + current_vllm_config = get_current_vllm_config() self.embed_tokens = VocabParallelEmbedding( @@ -161,20 +167,20 @@ class LlamaModel(nn.Module): for layer_idx in range(self.config.num_hidden_layers) ] ) - if hasattr(self.config, "target_hidden_size"): - fc_input_size = self.config.target_hidden_size * 3 - else: - fc_input_size = self.config.hidden_size * 3 - self.fc = ReplicatedLinear( - input_size=fc_input_size, - output_size=self.config.hidden_size, - bias=False, - params_dtype=vllm_config.model_config.dtype, - quant_config=self.quant_config, - prefix=maybe_prefix(prefix, "fc"), - return_bias=False, - ) - + if self.use_aux_hidden_state: + if hasattr(self.config, "target_hidden_size"): + fc_input_size = self.config.target_hidden_size * 3 + else: + fc_input_size = self.config.hidden_size * 3 + self.fc = ReplicatedLinear( + input_size=fc_input_size, + output_size=self.config.hidden_size, + bias=False, + params_dtype=vllm_config.model_config.dtype, + quant_config=self.quant_config, + prefix=maybe_prefix(prefix, "fc"), + return_bias=False, + ) self.norm = RMSNorm( self.config.hidden_size, eps=self.config.rms_norm_eps, @@ -332,6 +338,8 @@ class Eagle3LlamaForCausalLM(LlamaForCausalLM): self, hidden_states: torch.Tensor, ) -> torch.Tensor: + if not self.model.use_aux_hidden_state: + return hidden_states # combine multiple auxiliary hidden states returned by eagle3 return self.model.fc(hidden_states) @@ -357,6 +365,8 @@ class Eagle3LlamaForCausalLM(LlamaForCausalLM): skip_substrs.append("draft_id_to_target_id") if not includes_embed_tokens: skip_substrs.append("embed_tokens") + if not self.model.use_aux_hidden_state: + skip_substrs.append("fc.") loader = AutoWeightsLoader( self, skip_prefixes=None, diff --git a/vllm/v1/spec_decode/eagle.py b/vllm/v1/spec_decode/eagle.py index 3de418f1d13c8..afa16573eea10 100644 --- a/vllm/v1/spec_decode/eagle.py +++ b/vllm/v1/spec_decode/eagle.py @@ -83,6 +83,9 @@ class EagleProposer: self.draft_indexer_metadata_builder: AttentionMetadataBuilder | None = None self.attn_layer_names: list[str] = [] self.indexer_layer_names: list[str] = [] + self.eagle3_use_aux_hidden_state: bool = ( + self._get_eagle3_use_aux_hidden_state_from_config() + ) self.use_cuda_graph = False @@ -1169,6 +1172,22 @@ class EagleProposer: ) return builder + def _get_eagle3_use_aux_hidden_state_from_config(self) -> bool: + """ + Some eagle3 heads (e.g., nvidia/gpt-oss-120b-Eagle3-v2) do not use auxiliary + hidden states and directly uses the last layer output just like eagle1. + They might indicate this by setting "use_aux_hidden_state" to False + inside the "eagle_config" dict of their hf_config. + """ + if self.method != "eagle3": + return False + # Assume that eagle3 heads use aux hidden states by default + use_aux_hidden_state = True + eagle_config = getattr(self.draft_model_config.hf_config, "eagle_config", None) + if eagle_config is not None: + use_aux_hidden_state = eagle_config.get("use_aux_hidden_state", True) + return use_aux_hidden_state + def validate_same_kv_cache_group(self, kv_cache_config: KVCacheConfig) -> None: """ Validate that all eagle layers belong to the same KVCacheGroup. diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index cbafc9c993cc2..6a83ac14e0b3f 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -375,7 +375,9 @@ class GPUModelRunner( elif self.speculative_config.use_eagle(): self.drafter = EagleProposer(self.vllm_config, self.device, self) if self.speculative_config.method == "eagle3": - self.use_aux_hidden_state_outputs = True + self.use_aux_hidden_state_outputs = ( + self.drafter.eagle3_use_aux_hidden_state + ) elif self.speculative_config.method == "medusa": self.drafter = MedusaProposer( vllm_config=self.vllm_config, device=self.device From b8328b49fb9954575bd1d7b30b22bb626ee47624 Mon Sep 17 00:00:00 2001 From: Kunshang Ji Date: Tue, 25 Nov 2025 09:34:47 +0800 Subject: [PATCH 16/43] [XPU] upgrade torch & ipex 2.9 on XPU platform (#29307) Signed-off-by: Kunshang Ji --- docker/Dockerfile.xpu | 13 +++++++++---- requirements/xpu.txt | 6 +++--- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/docker/Dockerfile.xpu b/docker/Dockerfile.xpu index 5d5b82c4fa5af..adac43c6accbe 100644 --- a/docker/Dockerfile.xpu +++ b/docker/Dockerfile.xpu @@ -1,4 +1,4 @@ -FROM intel/deep-learning-essentials:2025.1.3-0-devel-ubuntu24.04 AS vllm-base +FROM intel/deep-learning-essentials:2025.2.2-0-devel-ubuntu24.04 AS vllm-base RUN wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | gpg --dearmor | tee /usr/share/keyrings/oneapi-archive-keyring.gpg > /dev/null && \ echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main" | tee /etc/apt/sources.list.d/oneAPI.list && \ @@ -25,10 +25,14 @@ RUN apt clean && apt-get update -y && \ RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.12 1 RUN update-alternatives --install /usr/bin/python python /usr/bin/python3.12 1 -RUN apt install -y libze1 libze-dev libze-intel-gpu1 intel-opencl-icd libze-intel-gpu-raytracing +RUN apt install -y libze1 libze-dev libze-intel-gpu1 intel-opencl-icd libze-intel-gpu-raytracing intel-ocloc + +# This oneccl contains the BMG support which is not the case for default version of oneapi 2025.2. +RUN wget https://github.com/uxlfoundation/oneCCL/releases/download/2021.15.6/intel-oneccl-2021.15.6.9_offline.sh +RUN bash intel-oneccl-2021.15.6.9_offline.sh -a --silent --eula accept && \ + echo "source /opt/intel/oneapi/setvars.sh --force" >> /root/.bashrc && \ + echo "source /opt/intel/oneapi/ccl/2021.15/env/vars.sh --force" >> /root/.bashrc -RUN wget https://github.com/uxlfoundation/oneCCL/releases/download/2021.15.4/intel-oneccl-2021.15.4.11_offline.sh -RUN bash intel-oneccl-2021.15.4.11_offline.sh -a --silent --eula accept && echo "source /opt/intel/oneapi/setvars.sh --force" >> /root/.bashrc SHELL ["bash", "-c"] CMD ["bash", "-c", "source /root/.bashrc && exec bash"] @@ -72,6 +76,7 @@ RUN python3 -m pip install -e tests/vllm_test_utils ENV NIXL_VERSION=0.7.0 RUN python3 /workspace/vllm/tools/install_nixl_from_source_ubuntu.py +# remove torch bundled oneccl to avoid conflicts RUN --mount=type=cache,target=/root/.cache/pip \ pip uninstall oneccl oneccl-devel -y diff --git a/requirements/xpu.txt b/requirements/xpu.txt index 59ea710684a2c..c1dc4195b5231 100644 --- a/requirements/xpu.txt +++ b/requirements/xpu.txt @@ -10,9 +10,9 @@ wheel jinja2>=3.1.6 datasets # for benchmark scripts numba == 0.61.2 # Required for N-gram speculative decoding -torch==2.8.0+xpu +--extra-index-url=https://download.pytorch.org/whl/xpu +torch==2.9.0+xpu torchaudio torchvision ---extra-index-url=https://download.pytorch.org/whl/xpu -intel-extension-for-pytorch @ https://intel-extension-for-pytorch.s3.us-east-1.amazonaws.com/ipex_dev/xpu/intel_extension_for_pytorch-2.8.10.post1%2Bxpu-cp312-cp312-linux_x86_64.whl +intel-extension-for-pytorch @ https://intel-extension-for-pytorch.s3.us-east-1.amazonaws.com/ipex_dev/xpu/intel_extension_for_pytorch-2.9.10.post0%2Bxpu-cp312-cp312-linux_x86_64.whl From a178a0b40b50bf448ab50a853b7eb1744af18f31 Mon Sep 17 00:00:00 2001 From: Nick Hill Date: Mon, 24 Nov 2025 17:54:26 -0800 Subject: [PATCH 17/43] [BugFix] Fix duplicate id tool-call race condition (#29355) Signed-off-by: Nick Hill --- vllm/entrypoints/openai/serving_chat.py | 13 +++++++++---- vllm/entrypoints/openai/serving_engine.py | 10 +++++++--- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/vllm/entrypoints/openai/serving_chat.py b/vllm/entrypoints/openai/serving_chat.py index 6cc685acd6728..2a870dbc3afac 100644 --- a/vllm/entrypoints/openai/serving_chat.py +++ b/vllm/entrypoints/openai/serving_chat.py @@ -273,6 +273,11 @@ class OpenAIServingChat(OpenAIServing): try: for i, engine_prompt in enumerate(engine_prompts): prompt_text, _, _ = self._get_prompt_components(request_prompts[i]) + # If we are creating sub requests for multiple prompts, ensure that they + # have unique request ids. + sub_request_id = ( + request_id if len(engine_prompts) == 1 else f"{request_id}_{i}" + ) if self.default_sampling_params is None: self.default_sampling_params = {} @@ -301,7 +306,7 @@ class OpenAIServingChat(OpenAIServing): ) self._log_inputs( - request_id, + sub_request_id, request_prompts[i], params=sampling_params, lora_request=lora_request, @@ -316,14 +321,14 @@ class OpenAIServingChat(OpenAIServing): if isinstance(sampling_params, BeamSearchParams): generator = self.beam_search( prompt=engine_prompt, - request_id=request_id, + request_id=sub_request_id, params=sampling_params, lora_request=lora_request, trace_headers=trace_headers, ) else: engine_request, tokenization_kwargs = await self._process_inputs( - request_id, + sub_request_id, engine_prompt, sampling_params, lora_request=lora_request, @@ -334,7 +339,7 @@ class OpenAIServingChat(OpenAIServing): generator = self.engine_client.generate( engine_request, sampling_params, - request_id, + sub_request_id, lora_request=lora_request, trace_headers=trace_headers, priority=request.priority, diff --git a/vllm/entrypoints/openai/serving_engine.py b/vllm/entrypoints/openai/serving_engine.py index 7dab5dbacd28c..de22c48809dc8 100644 --- a/vllm/entrypoints/openai/serving_engine.py +++ b/vllm/entrypoints/openai/serving_engine.py @@ -1242,16 +1242,19 @@ class OpenAIServing: ): prompt_text, _, _ = self._get_prompt_components(request_prompt) orig_priority = priority + sub_request = 0 while True: + # Ensure that each sub-request has a unique request id. + sub_request_id = f"{request_id}_{sub_request}" self._log_inputs( - request_id, + sub_request_id, request_prompt, params=sampling_params, lora_request=lora_request, ) trace_headers = kwargs.get("trace_headers") engine_request, tokenization_kwargs = await self._process_inputs( - request_id, + sub_request_id, engine_prompt, sampling_params, lora_request=lora_request, @@ -1262,7 +1265,7 @@ class OpenAIServing: generator = self.engine_client.generate( engine_request, sampling_params, - request_id, + sub_request_id, lora_request=lora_request, priority=priority, prompt_text=prompt_text, @@ -1295,6 +1298,7 @@ class OpenAIServing: sampling_params.max_tokens = self.max_model_len - len(prompt_token_ids) # OPTIMIZATION priority = orig_priority - 1 + sub_request += 1 def _get_prompt_components( self, From a4ad43ad5a819aabc7d9b48b46a7f11e2552befc Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Tue, 25 Nov 2025 01:58:58 +0000 Subject: [PATCH 18/43] Scheduled removal of `ParallelConfig`'s direct child EPLB fields (#29324) Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- vllm/config/parallel.py | 50 ---------------------------------------- vllm/engine/arg_utils.py | 24 ------------------- 2 files changed, 74 deletions(-) diff --git a/vllm/config/parallel.py b/vllm/config/parallel.py index ad438a8b464e0..913e97250d3d3 100644 --- a/vllm/config/parallel.py +++ b/vllm/config/parallel.py @@ -141,22 +141,6 @@ class ParallelConfig: - "deepep_high_throughput": Use deepep high-throughput kernels - "deepep_low_latency": Use deepep low-latency kernels - "flashinfer_all2allv": Use flashinfer alltoallv kernels for mnnvl""" - num_redundant_experts: int | None = None - """`num_redundant_experts` is deprecated and has been replaced with - `eplb_config.num_redundant_experts`. This will be removed in v0.12.0. - Please use `eplb_config.num_redundant_experts` instead.""" - eplb_window_size: int | None = None - """`eplb_window_size` is deprecated and has been replaced with - `eplb_config.window_size`. This will be removed in v0.12.0. - Please use `eplb_config.window_size` instead.""" - eplb_step_interval: int | None = None - """`eplb_step_interval` is deprecated and has been replaced with - `eplb_config.step_interval`. This will be removed in v0.12.0. - Please use `eplb_config.step_interval` instead.""" - eplb_log_balancedness: bool | None = None - """`eplb_log_balancedness` is deprecated and has been replaced with - `eplb_config.log_balancedness`. This will be removed in v0.12.0. - Please use `eplb_config.log_balancedness` instead.""" max_parallel_loading_workers: int | None = None """Maximum number of parallel loading workers when loading model @@ -516,40 +500,6 @@ class ParallelConfig: "--all2all-backend command-line argument instead." ) - # Forward deprecated fields to their new location - if self.num_redundant_experts is not None: - self.eplb_config.num_redundant_experts = self.num_redundant_experts - logger.warning_once( - "num_redundant_experts is deprecated and has been replaced " - "with eplb_config.num_redundant_experts. This will be removed " - "in v0.12.0. Changing this field after initialization will " - "have no effect." - ) - if self.eplb_window_size is not None: - self.eplb_config.window_size = self.eplb_window_size - logger.warning_once( - "eplb_window_size is deprecated and has been replaced " - "with eplb_config.window_size. This will be removed " - "in v0.12.0. Changing this field after initialization will " - "have no effect." - ) - if self.eplb_step_interval is not None: - self.eplb_config.step_interval = self.eplb_step_interval - logger.warning_once( - "eplb_step_interval is deprecated and has been replaced " - "with eplb_config.step_interval. This will be removed " - "in v0.12.0. Changing this field after initialization will " - "have no effect." - ) - if self.eplb_log_balancedness is not None: - self.eplb_config.log_balancedness = self.eplb_log_balancedness - logger.warning_once( - "eplb_log_balancedness is deprecated and has been replaced " - "with eplb_config.log_balancedness. This will be removed " - "in v0.12.0. Changing this field after initialization will " - "have no effect." - ) - # Continue with the rest of the initialization self.world_size = ( self.pipeline_parallel_size diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index b7c8f56e18c52..a7c6b11ccd5a8 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -855,30 +855,6 @@ class EngineArgs: "--expert-placement-strategy", **parallel_kwargs["expert_placement_strategy"], ) - parallel_group.add_argument( - "--num-redundant-experts", - type=int, - help="[DEPRECATED] --num-redundant-experts will be removed in v0.12.0.", - deprecated=True, - ) - parallel_group.add_argument( - "--eplb-window-size", - type=int, - help="[DEPRECATED] --eplb-window-size will be removed in v0.12.0.", - deprecated=True, - ) - parallel_group.add_argument( - "--eplb-step-interval", - type=int, - help="[DEPRECATED] --eplb-step-interval will be removed in v0.12.0.", - deprecated=True, - ) - parallel_group.add_argument( - "--eplb-log-balancedness", - action=argparse.BooleanOptionalAction, - help="[DEPRECATED] --eplb-log-balancedness will be removed in v0.12.0.", - deprecated=True, - ) parallel_group.add_argument( "--max-parallel-loading-workers", From 6f1355a1b74e4502e6a4e6ba9a811cc50729ee1f Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Mon, 24 Nov 2025 21:01:40 -0500 Subject: [PATCH 19/43] [Perf] Disable DeepGEMM MoE by default when TP=8 is used (#29346) Signed-off-by: mgoin --- .../model_executor/layers/quantization/fp8.py | 24 +++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/vllm/model_executor/layers/quantization/fp8.py b/vllm/model_executor/layers/quantization/fp8.py index 9e2718057038d..e033032903e87 100644 --- a/vllm/model_executor/layers/quantization/fp8.py +++ b/vllm/model_executor/layers/quantization/fp8.py @@ -28,6 +28,7 @@ from vllm.model_executor.layers.fused_moe import ( FusedMoeWeightScaleSupported, ) from vllm.model_executor.layers.fused_moe.config import ( + FusedMoEParallelConfig, FusedMoEQuantConfig, RoutingMethodType, fp8_w8a8_moe_quant_config, @@ -118,7 +119,9 @@ class Fp8MoeBackend(Enum): TRITON = 6 -def get_fp8_moe_backend(block_quant: bool) -> Fp8MoeBackend: +def get_fp8_moe_backend( + block_quant: bool, moe_parallel_config: FusedMoEParallelConfig +) -> Fp8MoeBackend: """ Select the primary FP8 MoE backend Note: Shape-specific fallbacks may still occur at runtime. @@ -159,8 +162,19 @@ def get_fp8_moe_backend(block_quant: bool) -> Fp8MoeBackend: logger.info_once("Using Marlin backend for FP8 MoE") return Fp8MoeBackend.MARLIN - # deepGEMM on supported platforms with block-quantized weights - if envs.VLLM_USE_DEEP_GEMM and envs.VLLM_MOE_USE_DEEP_GEMM and block_quant: + # Determine if we should use DeepGEMM with block-quantized weights: + # - If explicitly set by user, respect their choice + # - If not explicitly set (default), disable when TP size is >= 8 + moe_use_deep_gemm = envs.VLLM_MOE_USE_DEEP_GEMM + if not envs.is_set("VLLM_MOE_USE_DEEP_GEMM") and moe_parallel_config.tp_size >= 8: + moe_use_deep_gemm = False + logger.info_once( + "DeepGEMM MoE is disabled by default when TP size is >= 8. " + "Set VLLM_MOE_USE_DEEP_GEMM=1 to enable it.", + scope="local", + ) + + if envs.VLLM_USE_DEEP_GEMM and moe_use_deep_gemm and block_quant: if not has_deep_gemm(): logger.warning_once( "DeepGEMM backend requested but not available.", scope="local" @@ -641,7 +655,9 @@ class Fp8MoEMethod(FusedMoEMethodBase): self.quant_config = quant_config self.weight_block_size = self.quant_config.weight_block_size self.block_quant: bool = self.weight_block_size is not None - self.fp8_backend = get_fp8_moe_backend(self.block_quant) + self.fp8_backend = get_fp8_moe_backend( + self.block_quant, layer.moe_parallel_config + ) self.use_marlin = self.fp8_backend == Fp8MoeBackend.MARLIN self.flashinfer_moe_backend: FlashinferMoeBackend | None = None From 77e10c9cab751c83de0b2200977212922cc3776f Mon Sep 17 00:00:00 2001 From: Pleaplusone Date: Tue, 25 Nov 2025 10:05:46 +0800 Subject: [PATCH 20/43] [Perf][Deepseek] optimize gather_and_maybe_dequant_cache kernel's perf for extremely long sequence (#28029) Signed-off-by: ganyi --- csrc/cache.h | 11 +- csrc/cache_kernels.cu | 176 +++++++++++------------ csrc/torch_bindings.cpp | 3 +- tests/kernels/attention/test_cache.py | 12 +- vllm/_custom_ops.py | 6 +- vllm/v1/attention/backends/mla/common.py | 28 +++- 6 files changed, 131 insertions(+), 105 deletions(-) diff --git a/csrc/cache.h b/csrc/cache.h index b162a4a2bc31f..f2a5ec0acf5cd 100644 --- a/csrc/cache.h +++ b/csrc/cache.h @@ -41,11 +41,12 @@ void convert_fp8(torch::Tensor& dst_cache, torch::Tensor& src_cache, const double scale, const std::string& kv_cache_dtype); void gather_and_maybe_dequant_cache( - torch::Tensor const& src_cache, // [NUM_BLOCKS, BLOCK_SIZE, ENTRIES...] - torch::Tensor const& dst, // [TOT_TOKENS, ENTRIES...] - torch::Tensor const& block_table, // [BATCH, BLOCK_INDICES] - torch::Tensor const& cu_seq_lens, // [BATCH+1] - int64_t batch_size, const std::string& kv_cache_dtype, + torch::Tensor const& src_cache, // [NUM_BLOCKS, BLOCK_SIZE, ENTRIES...] + torch::Tensor const& dst, // [TOT_TOKENS, ENTRIES...] + torch::Tensor const& block_table, // [BATCH, BLOCK_INDICES] + torch::Tensor const& cu_seq_lens, // [BATCH+1] + torch::Tensor const& token_to_seq, // [MAX_TOKEN_ACROSS_CHUNKS] + int64_t num_tokens, const std::string& kv_cache_dtype, torch::Tensor const& scale, std::optional seq_starts = std::nullopt); diff --git a/csrc/cache_kernels.cu b/csrc/cache_kernels.cu index 32960cc8073bb..8a5457206c706 100644 --- a/csrc/cache_kernels.cu +++ b/csrc/cache_kernels.cu @@ -905,91 +905,79 @@ void convert_fp8(torch::Tensor& dst_cache, torch::Tensor& src_cache, namespace vllm { // grid is launched with dimensions (batch, num_splits) -template +template __global__ void gather_and_maybe_dequant_cache( - const cache_t* __restrict__ src_cache, // [NUM_BLOCKS, BLOCK_SIZE, - // ENTRIES...] - scalar_t* __restrict__ dst, // [TOT_TOKENS, ENTRIES...] - const int32_t* __restrict__ block_table, // [BATCH, BLOCK_INDICES] - const int32_t* __restrict__ cu_seq_lens, // [BATCH+1] - const int32_t block_size, const int32_t entry_size, + const cache_t* __restrict__ src_cache, // [NUM_BLOCKS, BLOCK_SIZE, + // ENTRIES...] + scalar_t* __restrict__ dst, // [TOT_TOKENS, ENTRIES...] + const int32_t* __restrict__ block_table, // [BATCH, BLOCK_INDICES] + const int32_t* __restrict__ cu_seq_lens, // [BATCH+1] + const int32_t* __restrict__ token_to_seq, // [MAX_TOKEN_ACROSS_CHUNK] + const int32_t num_tokens, const int32_t block_size, const int64_t block_table_stride, const int64_t cache_block_stride, const int64_t cache_entry_stride, const int64_t dst_entry_stride, const float* __restrict__ scale, const int32_t* __restrict__ seq_starts) { // Optional: starting offsets per // batch + constexpr int vec_size = sizeof(float4) / sizeof(scalar_t); + using ltype = vllm::vec_n_t; + using stype = vllm::vec_n_t; + // We are adding this for code readability which will be optimized out when + // build in release. + assert(CTA_SIZE == blockDim.x); - const int64_t bid = blockIdx.x; // Batch ID - const int32_t num_splits = gridDim.y; - const int32_t split = blockIdx.y; - const int32_t seq_start = cu_seq_lens[bid]; - const int32_t seq_end = cu_seq_lens[bid + 1]; - const int32_t seq_len = seq_end - seq_start; - const int32_t tot_blocks = cuda_utils::ceil_div(seq_len, block_size); - const int32_t split_blocks = cuda_utils::ceil_div(tot_blocks, num_splits); +#pragma unroll + for (int token_id = blockIdx.x; token_id < num_tokens; + token_id += gridDim.x) { + int64_t batch_id = token_to_seq[token_id]; + int64_t batch_start = cu_seq_lens[batch_id]; + int64_t batch_end = cu_seq_lens[batch_id + 1]; + int32_t batch_offset = token_id - batch_start; - const int32_t split_start = split * split_blocks; - const int32_t split_end = min((split + 1) * split_blocks, tot_blocks); + if (token_id >= batch_end) return; + int32_t offset = 0; + if (seq_starts != nullptr) { + offset = seq_starts[batch_id]; + } + batch_offset += offset; + int32_t block_table_id = batch_offset / block_size; + int32_t slot_id = batch_offset % block_size; + int32_t block_table_offset = batch_id * block_table_stride + block_table_id; + int32_t block_id = block_table[block_table_offset]; + int64_t cache_offset = + block_id * cache_block_stride + slot_id * cache_entry_stride; + constexpr int32_t vec_iter_cnt = ENTRY_SIZE / vec_size; + scalar_t* dst_ = dst + token_id * dst_entry_stride; + cache_t* src_ = const_cast(src_cache) + cache_offset; - const bool is_active_split = (split_start < tot_blocks); - const bool is_last_split = (split_end == tot_blocks); - - if (!is_active_split) return; - - int32_t full_blocks_end = split_end; - int32_t partial_block_size = 0; - - // Adjust the pointer for the block_table for this batch. - // If seq_starts is provided, compute an offset based on (seq_starts[bid] / - // page_size) - const int32_t batch_offset = bid * block_table_stride; - int32_t offset = 0; - if (seq_starts != nullptr) { - offset = seq_starts[bid] / block_size; - } - const int32_t* batch_block_table = block_table + batch_offset + offset; - - // Adjust dst pointer based on the cumulative sequence lengths. - dst += seq_start * dst_entry_stride; - - if (is_last_split) { - partial_block_size = seq_len % block_size; - if (partial_block_size) full_blocks_end -= 1; - } - - auto copy_entry = [&](const cache_t* __restrict__ _src, - scalar_t* __restrict__ _dst) { - for (int i = threadIdx.x; i < entry_size; i += blockDim.x) { +#pragma unroll + for (int idx = threadIdx.x; idx < vec_iter_cnt; idx += CTA_SIZE) { if constexpr (kv_dt == Fp8KVCacheDataType::kAuto) { - _dst[i] = static_cast(_src[i]); + reinterpret_cast(dst_)[idx] = + static_cast(reinterpret_cast(src_)[idx]); } else { - _dst[i] = - fp8::scaled_convert(_src[i], *scale); + ltype loaded_val = reinterpret_cast(src_)[idx]; + stype store_val; +#pragma unroll + for (int j = 0; j < vec_size; ++j) { + store_val.val[j] = fp8::scaled_convert( + loaded_val.val[j], *scale); + } + reinterpret_cast(dst_)[idx] = store_val; } } - }; - - const auto loop_end = - std::min((int64_t)full_blocks_end, block_table_stride - offset); - for (int pid = split_start; pid < loop_end; ++pid) { - auto block_id = batch_block_table[pid]; - auto block_start_ptr = src_cache + block_id * cache_block_stride; - auto block_dst_ptr = dst + pid * block_size * dst_entry_stride; - for (int eid = 0; eid < block_size; ++eid) { - copy_entry(block_start_ptr + eid * cache_entry_stride, - block_dst_ptr + eid * dst_entry_stride); - } - } - - if (partial_block_size) { - if (offset + full_blocks_end < block_table_stride) { - auto block_id = batch_block_table[full_blocks_end]; - auto block_start_ptr = src_cache + block_id * cache_block_stride; - auto block_dst_ptr = - dst + full_blocks_end * block_size * dst_entry_stride; - for (int eid = 0; eid < partial_block_size; ++eid) { - copy_entry(block_start_ptr + eid * cache_entry_stride, - block_dst_ptr + eid * dst_entry_stride); + // process tail + constexpr int32_t tail_cnt = ENTRY_SIZE % vec_size; + dst_ = dst_ + ENTRY_SIZE - tail_cnt; + src_ = src_ + ENTRY_SIZE - tail_cnt; +#pragma unroll + for (int idx = threadIdx.x; idx < tail_cnt; idx += CTA_SIZE) { + if constexpr (kv_dt == Fp8KVCacheDataType::kAuto) { + dst_[idx] = static_cast(src_[idx]); + } else { + dst_[idx] = + fp8::scaled_convert(src_[idx], *scale); } } } @@ -1001,34 +989,38 @@ __global__ void gather_and_maybe_dequant_cache( // SCALAR_T is the data type of the destination tensor. // CACHE_T is the stored data type of kv-cache. // KV_DTYPE is the real data type of kv-cache. -#define CALL_GATHER_CACHE(SCALAR_T, CACHE_T, KV_DTYPE) \ - vllm::gather_and_maybe_dequant_cache \ - <<>>( \ - reinterpret_cast(src_cache.data_ptr()), \ - reinterpret_cast(dst.data_ptr()), \ - block_table.data_ptr(), cu_seq_lens.data_ptr(), \ - block_size, entry_size, block_table_stride, cache_block_stride, \ - cache_entry_stride, dst_entry_stride, \ - reinterpret_cast(scale.data_ptr()), seq_starts_ptr); +#define CALL_GATHER_CACHE(SCALAR_T, CACHE_T, KV_DTYPE) \ + vllm::gather_and_maybe_dequant_cache \ + <<>>( \ + reinterpret_cast(src_cache.data_ptr()), \ + reinterpret_cast(dst.data_ptr()), \ + block_table.data_ptr(), cu_seq_lens.data_ptr(), \ + token_to_seq.data_ptr(), num_tokens, block_size, \ + block_table_stride, cache_block_stride, cache_entry_stride, \ + dst_entry_stride, reinterpret_cast(scale.data_ptr()), \ + seq_starts_ptr); // Gather sequences from the cache into the destination tensor. // - cu_seq_lens contains the cumulative sequence lengths for each batch // - block_table contains the cache block indices for each sequence +// - token_to_seq contains the back mapping from token_id to batch_id // - Optionally, seq_starts (if provided) offsets the starting block index by // (seq_starts[bid] / page_size) void gather_and_maybe_dequant_cache( - torch::Tensor const& src_cache, // [NUM_BLOCKS, BLOCK_SIZE, ENTRIES...] - torch::Tensor const& dst, // [TOT_TOKENS, ENTRIES...] - torch::Tensor const& block_table, // [BATCH, BLOCK_INDICES] - torch::Tensor const& cu_seq_lens, // [BATCH+1] - int64_t batch_size, const std::string& kv_cache_dtype, + torch::Tensor const& src_cache, // [NUM_BLOCKS, BLOCK_SIZE, ENTRIES...] + torch::Tensor const& dst, // [TOT_TOKENS, ENTRIES...] + torch::Tensor const& block_table, // [BATCH, BLOCK_INDICES] + torch::Tensor const& cu_seq_lens, // [BATCH+1] + torch::Tensor const& token_to_seq, // [MAX_TOKEN_ACROSS_CHUNKS] + int64_t num_tokens, const std::string& kv_cache_dtype, torch::Tensor const& scale, std::optional seq_starts = std::nullopt) { at::cuda::OptionalCUDAGuard device_guard(src_cache.device()); const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); int32_t block_size = src_cache.size(1); - int32_t entry_size = src_cache.flatten(2, -1).size(2); + int32_t head_dim = dst.size(-1); TORCH_CHECK(block_table.dtype() == torch::kInt32, "block_table must be int32"); @@ -1038,6 +1030,9 @@ void gather_and_maybe_dequant_cache( TORCH_CHECK(seq_starts.value().dtype() == torch::kInt32, "seq_starts must be int32"); } + TORCH_CHECK(head_dim == 576, + "gather_and_maybe_dequant_cache only support the head_dim to 576 " + "for better performance") TORCH_CHECK(src_cache.device() == dst.device(), "src_cache and dst must be on the same device"); @@ -1055,10 +1050,9 @@ void gather_and_maybe_dequant_cache( int64_t cache_entry_stride = src_cache.stride(1); int64_t dst_entry_stride = dst.stride(0); - // Decide on the number of splits based on the batch size. - int num_splits = batch_size > 128 ? 2 : batch_size > 64 ? 4 : 16; - dim3 grid(batch_size, num_splits); - dim3 block(1024); + constexpr int32_t thread_block_size = 64; + dim3 grid(num_tokens); + dim3 block(thread_block_size); const int32_t* seq_starts_ptr = seq_starts.has_value() ? seq_starts.value().data_ptr() : nullptr; diff --git a/csrc/torch_bindings.cpp b/csrc/torch_bindings.cpp index 5af74c2c2a6b0..14913bef13125 100644 --- a/csrc/torch_bindings.cpp +++ b/csrc/torch_bindings.cpp @@ -695,7 +695,8 @@ TORCH_LIBRARY_EXPAND(CONCAT(TORCH_EXTENSION_NAME, _cache_ops), cache_ops) { cache_ops.def( "gather_and_maybe_dequant_cache(Tensor src_cache, Tensor! dst, " " Tensor block_table, Tensor cu_seq_lens, " - " int batch_size, " + " Tensor token_to_seq, " + " int num_tokens, " " str kv_cache_dtype, " " Tensor scale, Tensor? seq_starts) -> ()"); cache_ops.impl("gather_and_maybe_dequant_cache", torch::kCUDA, diff --git a/tests/kernels/attention/test_cache.py b/tests/kernels/attention/test_cache.py index 028e164cb801b..acf46d75d62eb 100644 --- a/tests/kernels/attention/test_cache.py +++ b/tests/kernels/attention/test_cache.py @@ -921,12 +921,16 @@ def test_gather_and_maybe_dequant_cache_mla( ) _fill_mla_cache(src_cache, kv_cache_dtype=kv_cache_dtype) - seq_len_tensor = torch.randint(0, max_seq_len + 1, (batch_size,), device=device) + seq_len_tensor = torch.randint( + max_seq_len, max_seq_len + 1, (batch_size,), device=device + ) total_tokens = seq_len_tensor.sum() cu_seq_lens = torch.empty((batch_size + 1), dtype=torch.int32, device=device) cu_seq_lens[0] = 0 cu_seq_lens[1:] = seq_len_tensor.cumsum(dim=0).to(dtype=torch.int32) + token_to_seq = torch.arange(0, batch_size, dtype=torch.int32, device=device) + token_to_seq = torch.repeat_interleave(token_to_seq, seq_len_tensor) print("seq_len_tensor", seq_len_tensor) tot_blocks_tensor = (seq_len_tensor + block_size - 1) // block_size @@ -977,7 +981,8 @@ def test_gather_and_maybe_dequant_cache_mla( dst, block_table, cu_seq_lens, - batch_size, + token_to_seq, + total_tokens, kv_cache_dtype, scale, None, @@ -990,7 +995,8 @@ def test_gather_and_maybe_dequant_cache_mla( dst, block_table, cu_seq_lens, - batch_size, + token_to_seq, + total_tokens, kv_cache_dtype, scale, None, diff --git a/vllm/_custom_ops.py b/vllm/_custom_ops.py index 0f625a7945241..4a1bcc761f994 100644 --- a/vllm/_custom_ops.py +++ b/vllm/_custom_ops.py @@ -2201,7 +2201,8 @@ def gather_and_maybe_dequant_cache( dst: torch.Tensor, block_table: torch.Tensor, cu_seq_lens: torch.Tensor, - batch_size: int, + token_to_seq: torch.Tensor, + num_tokens: int, kv_cache_dtype: str, scale: torch.Tensor, seq_starts: torch.Tensor | None = None, @@ -2211,7 +2212,8 @@ def gather_and_maybe_dequant_cache( dst, block_table, cu_seq_lens, - batch_size, + token_to_seq, + num_tokens, kv_cache_dtype, scale, seq_starts, diff --git a/vllm/v1/attention/backends/mla/common.py b/vllm/v1/attention/backends/mla/common.py index 43aef8a7cca91..87a3aac21d2c3 100755 --- a/vllm/v1/attention/backends/mla/common.py +++ b/vllm/v1/attention/backends/mla/common.py @@ -340,6 +340,8 @@ class MLACommonPrefillMetadata: max_seq_lens: list[int] seq_lens: torch.Tensor workspace: torch.Tensor + token_to_seq: torch.Tensor + chunk_total_token: list[int] # for mla DCP padded_local_chunk_seq_lens: list[list[int]] | None = None @@ -839,6 +841,19 @@ class MLACommonMetadataBuilder(AttentionMetadataBuilder[M]): torch.cumsum( chunk_seq_lens, dim=1, out=cu_seq_lens_cpu[:, 1:], dtype=torch.int32 ) + chunk_total_token = cu_seq_lens_cpu[:, -1] + + max_token_num_over_chunk = chunk_total_token.max().item() + token_to_seq_tensor_cpu = torch.zeros( + [num_chunks, max_token_num_over_chunk], dtype=torch.int32 + ) + range_idx = torch.arange(num_prefills, dtype=torch.int32) + for i in range(num_chunks): + chunk_token_to_seq_tensor = torch.repeat_interleave( + range_idx, chunk_seq_lens[i] + ) + chunk_len = chunk_token_to_seq_tensor.shape[0] + token_to_seq_tensor_cpu[i, :chunk_len] = chunk_token_to_seq_tensor if self.dcp_world_size > 1: local_context_lens_allranks = get_dcp_local_seq_lens( @@ -906,6 +921,10 @@ class MLACommonMetadataBuilder(AttentionMetadataBuilder[M]): seq_tot=padded_local_chunk_seq_lens.sum(dim=1).tolist(), max_seq_lens=chunk_seq_lens.max(dim=1).values.tolist(), seq_lens=chunk_seq_lens, + token_to_seq=token_to_seq_tensor_cpu.to( + device, non_blocking=True + ), + chunk_total_token=chunk_total_token.tolist(), workspace=self.chunked_prefill_workspace, padded_local_chunk_seq_lens=padded_local_chunk_seq_lens.tolist(), local_context_lens_allranks=local_context_lens_allranks.tolist(), @@ -922,6 +941,10 @@ class MLACommonMetadataBuilder(AttentionMetadataBuilder[M]): seq_tot=chunk_seq_lens.sum(dim=1).tolist(), max_seq_lens=chunk_seq_lens.max(dim=1).values.tolist(), seq_lens=chunk_seq_lens, + token_to_seq=token_to_seq_tensor_cpu.to( + device, non_blocking=True + ), + chunk_total_token=chunk_total_token, workspace=self.chunked_prefill_workspace, ) @@ -1638,16 +1661,15 @@ class MLACommonImpl(MLACommonBaseImpl[M], Generic[M]): output = None iters = len(prefill_metadata.chunked_context.seq_tot) workspace = prefill_metadata.chunked_context.workspace - for i in range(iters): toks = prefill_metadata.chunked_context.seq_tot[i] - ops.gather_and_maybe_dequant_cache( src_cache=kv_c_and_k_pe_cache, dst=workspace, block_table=prefill_metadata.block_table, cu_seq_lens=prefill_metadata.chunked_context.cu_seq_lens[i], - batch_size=attn_metadata.num_prefills, + token_to_seq=prefill_metadata.chunked_context.token_to_seq[i], + num_tokens=prefill_metadata.chunked_context.chunk_total_token[i], kv_cache_dtype=self.kv_cache_dtype, scale=k_scale, seq_starts=prefill_metadata.chunked_context.starts[i], From cb7214d8eaa231c67416282668f1ca274f8068ba Mon Sep 17 00:00:00 2001 From: gbyu-amd Date: Tue, 25 Nov 2025 10:15:02 +0800 Subject: [PATCH 21/43] [ROCm][MLA] enable fp8 MLA decode on ROCm (#28032) Signed-off-by: guanbao Signed-off-by: Guanbao Yu Signed-off-by: gbyu-amd Co-authored-by: guanbao --- vllm/_aiter_ops.py | 10 ++++++++++ vllm/v1/attention/backends/mla/rocm_aiter_mla.py | 12 +++++++++++- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/vllm/_aiter_ops.py b/vllm/_aiter_ops.py index db79b3f5e8bcb..a8f472d147a0d 100644 --- a/vllm/_aiter_ops.py +++ b/vllm/_aiter_ops.py @@ -294,6 +294,8 @@ def _rocm_aiter_mla_decode_fwd_impl( kv_last_page_lens: torch.Tensor | None = None, sm_scale: float = 1.0, logit_cap: float = 0.0, + q_scale: torch.Tensor | None = None, + kv_scale: torch.Tensor | None = None, ) -> None: from aiter.mla import mla_decode_fwd @@ -308,6 +310,8 @@ def _rocm_aiter_mla_decode_fwd_impl( max_seqlen_qo, sm_scale=sm_scale, logit_cap=logit_cap, + q_scale=q_scale, + kv_scale=kv_scale, ) @@ -322,6 +326,8 @@ def _rocm_aiter_mla_decode_fwd_fake( kv_last_page_lens: torch.Tensor | None = None, sm_scale: float = 1.0, logit_cap: float = 0.0, + q_scale: torch.Tensor | None = None, + kv_scale: torch.Tensor | None = None, ) -> None: pass @@ -806,6 +812,8 @@ class rocm_aiter_ops: kv_indices: torch.Tensor | None = None, kv_last_page_lens: torch.Tensor | None = None, logit_cap: float = 0.0, + q_scale: torch.Tensor | None = None, + kv_scale: torch.Tensor | None = None, ): torch.ops.vllm.rocm_aiter_mla_decode_fwd( q, @@ -818,6 +826,8 @@ class rocm_aiter_ops: kv_last_page_lens, sm_scale=sm_scale, logit_cap=logit_cap, + q_scale=q_scale, + kv_scale=kv_scale, ) @staticmethod diff --git a/vllm/v1/attention/backends/mla/rocm_aiter_mla.py b/vllm/v1/attention/backends/mla/rocm_aiter_mla.py index 56f9c7a281e7f..00a0a77a1c2f7 100644 --- a/vllm/v1/attention/backends/mla/rocm_aiter_mla.py +++ b/vllm/v1/attention/backends/mla/rocm_aiter_mla.py @@ -49,6 +49,8 @@ class AiterMLADecodeMetadata(MLACommonDecodeMetadata): paged_kv_last_page_len: torch.Tensor | None = None # The query indptr, shape : [num_decode + 1] qo_indptr: torch.Tensor | None = None + # The dtype of MLA out tensor + attn_out_dtype: torch.dtype = torch.bfloat16 class AiterMLAMetadata(MLACommonMetadata[AiterMLADecodeMetadata]): @@ -74,6 +76,7 @@ class AiterMLAMetadataBuilder(MLACommonMetadataBuilder[AiterMLAMetadata]): ) self.compilation_config = vllm_config.compilation_config + self.decode_attn_out_dtype = vllm_config.model_config.dtype # kernel block size is always 1. max_num_pages_per_req = vllm_config.model_config.max_model_len max_num_reqs = vllm_config.scheduler_config.max_num_seqs @@ -162,6 +165,7 @@ class AiterMLAMetadataBuilder(MLACommonMetadataBuilder[AiterMLAMetadata]): paged_kv_last_page_len=paged_kv_last_page_len, qo_indptr=qo_indptr, dcp_tot_seq_lens=dcp_tot_seq_lens_device, + attn_out_dtype=self.decode_attn_out_dtype, ) return attn_metadata @@ -242,7 +246,11 @@ class AiterMLAImpl(MLACommonImpl[AiterMLAMetadata]): assert isinstance(q, torch.Tensor) B = q.shape[0] o = torch.zeros( - B, self.num_heads, self.kv_lora_rank, dtype=q.dtype, device=q.device + B, + self.num_heads, + self.kv_lora_rank, + dtype=attn_metadata.decode.attn_out_dtype, + device=q.device, ) kv_buffer = kv_c_and_k_pe_cache.unsqueeze(2) @@ -260,6 +268,8 @@ class AiterMLAImpl(MLACommonImpl[AiterMLAMetadata]): attn_metadata.decode.paged_kv_indptr, attn_metadata.decode.paged_kv_indices, attn_metadata.decode.paged_kv_last_page_len, + q_scale=layer._q_scale, + kv_scale=layer._k_scale, ) return o, None From 22b42b5402f887c7d4b9f9aa4e82c970a6fd11a9 Mon Sep 17 00:00:00 2001 From: Divakar Verma <137818590+divakar-amd@users.noreply.github.com> Date: Mon, 24 Nov 2025 20:15:39 -0600 Subject: [PATCH 22/43] [CI][ROCm] Install arctic-inference on ROCm tests (#29344) Signed-off-by: Divakar Verma --- requirements/rocm-test.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/requirements/rocm-test.txt b/requirements/rocm-test.txt index 2d57e7e167869..f9bddc23420b4 100644 --- a/requirements/rocm-test.txt +++ b/requirements/rocm-test.txt @@ -45,3 +45,6 @@ multiprocess==0.70.16 # Plugins test terratorch @ git+https://github.com/IBM/terratorch.git@07184fcf91a1324f831ff521dd238d97fe350e3e + +# Required for suffix decoding test +arctic-inference == 0.1.1 \ No newline at end of file From 7012d8b45e677a4316e38be6fb9547de2993b519 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=B1=AA=E5=BF=97=E9=B9=8F?= Date: Tue, 25 Nov 2025 10:54:00 +0800 Subject: [PATCH 23/43] [Docker] Optimize Dockerfile: consolidate apt-get and reduce image size by ~200MB (#29060) Signed-off-by: princepride --- docker/Dockerfile | 39 ++++++++++++++++++++++++++++++--------- 1 file changed, 30 insertions(+), 9 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index e03b9989a190c..84a1802dbe03a 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -85,7 +85,20 @@ ARG GET_PIP_URL RUN echo 'tzdata tzdata/Areas select America' | debconf-set-selections \ && echo 'tzdata tzdata/Zones/America select Los_Angeles' | debconf-set-selections \ && apt-get update -y \ - && apt-get install -y ccache software-properties-common git curl sudo python3-pip libibverbs-dev \ + && apt-get install -y --no-install-recommends \ + ccache \ + software-properties-common \ + git \ + curl \ + sudo \ + python3-pip \ + libibverbs-dev \ + # Upgrade to GCC 10 to avoid https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92519 + # as it was causing spam when compiling the CUTLASS kernels + gcc-10 \ + g++-10 \ + && update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 110 --slave /usr/bin/g++ g++ /usr/bin/g++-10 \ + && rm -rf /var/lib/apt/lists/* \ && curl -LsSf https://astral.sh/uv/install.sh | sh \ && $HOME/.local/bin/uv venv /opt/venv --python ${PYTHON_VERSION} \ && rm -f /usr/bin/python3 /usr/bin/python3-config /usr/bin/pip \ @@ -110,10 +123,6 @@ ENV UV_INDEX_STRATEGY="unsafe-best-match" # Use copy mode to avoid hardlink failures with Docker cache mounts ENV UV_LINK_MODE=copy -# Upgrade to GCC 10 to avoid https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92519 -# as it was causing spam when compiling the CUTLASS kernels -RUN apt-get install -y gcc-10 g++-10 -RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 110 --slave /usr/bin/g++ g++ /usr/bin/g++-10 RUN < Date: Tue, 25 Nov 2025 03:15:13 +0000 Subject: [PATCH 24/43] [Metrics] Scheduled removal of deprecated metrics (#29330) Signed-off-by: Mark McLoughlin --- tests/entrypoints/openai/test_metrics.py | 3 - vllm/v1/metrics/loggers.py | 134 +++++++---------------- 2 files changed, 37 insertions(+), 100 deletions(-) diff --git a/tests/entrypoints/openai/test_metrics.py b/tests/entrypoints/openai/test_metrics.py index 4e7b765d7713f..65a6fd20bd0d1 100644 --- a/tests/entrypoints/openai/test_metrics.py +++ b/tests/entrypoints/openai/test_metrics.py @@ -183,9 +183,6 @@ async def test_metrics_counts( EXPECTED_METRICS_V1 = [ "vllm:num_requests_running", "vllm:num_requests_waiting", - "vllm:gpu_cache_usage_perc", - "vllm:gpu_prefix_cache_queries", - "vllm:gpu_prefix_cache_hits", "vllm:kv_cache_usage_perc", "vllm:prefix_cache_queries", "vllm:prefix_cache_hits", diff --git a/vllm/v1/metrics/loggers.py b/vllm/v1/metrics/loggers.py index e2d82241ce210..bd18a152ffc08 100644 --- a/vllm/v1/metrics/loggers.py +++ b/vllm/v1/metrics/loggers.py @@ -440,57 +440,6 @@ class PrometheusStatLogger(AggregateStatLoggerBase): # Setting default values self.record_sleep_state() - # GPU cache - # - # Deprecated in 0.9.2 - Renamed as vllm:kv_cache_usage_perc - # With 0.11.x you can enable with --show-hidden-metrics-for-version=0.10 - # TODO: remove in 0.12.0 - if self.show_hidden_metrics: - gauge_gpu_cache_usage = self._gauge_cls( - name="vllm:gpu_cache_usage_perc", - documentation=( - "GPU KV-cache usage. 1 means 100 percent usage." - "DEPRECATED: Use vllm:kv_cache_usage_perc instead." - ), - multiprocess_mode="mostrecent", - labelnames=labelnames, - ) - self.gauge_gpu_cache_usage = make_per_engine( - gauge_gpu_cache_usage, engine_indexes, model_name - ) - - # Deprecated in 0.9.2 - Renamed as vllm:prefix_cache_queries - # With 0.11.x you can enable with --show-hidden-metrics-for-version=0.10 - # TODO: remove in 0.12.0 - if self.show_hidden_metrics: - counter_gpu_prefix_cache_queries = self._counter_cls( - name="vllm:gpu_prefix_cache_queries", - documentation=( - "GPU prefix cache queries, in terms of number of queried" - "tokens. DEPRECATED: Use vllm:prefix_cache_queries instead." - ), - labelnames=labelnames, - ) - self.counter_gpu_prefix_cache_queries = make_per_engine( - counter_gpu_prefix_cache_queries, engine_indexes, model_name - ) - - # Deprecated in 0.9.2 - Renamed as vllm:prefix_cache_hits - # With 0.11.x you can enable with --show-hidden-metrics-for-version=0.10 - # TODO: remove in 0.12.0 - if self.show_hidden_metrics: - counter_gpu_prefix_cache_hits = self._counter_cls( - name="vllm:gpu_prefix_cache_hits", - documentation=( - "GPU prefix cache hits, in terms of number of cached " - "tokens. DEPRECATED: Use vllm:prefix_cache_hits instead." - ), - labelnames=labelnames, - ) - self.counter_gpu_prefix_cache_hits = make_per_engine( - counter_gpu_prefix_cache_hits, engine_indexes, model_name - ) - gauge_kv_cache_usage = self._gauge_cls( name="vllm:kv_cache_usage_perc", documentation="KV-cache usage. 1 means 100 percent usage.", @@ -735,39 +684,41 @@ class PrometheusStatLogger(AggregateStatLoggerBase): ) # Deprecated in 0.11 - Renamed as vllm:inter_token_latency_seconds - # TODO: in 0.12, only enable if show_hidden_metrics=True - histogram_time_per_output_token = self._histogram_cls( - name="vllm:time_per_output_token_seconds", - documentation=( - "Histogram of time per output token in seconds." - "DEPRECATED: Use vllm:inter_token_latency_seconds instead." - ), - buckets=[ - 0.01, - 0.025, - 0.05, - 0.075, - 0.1, - 0.15, - 0.2, - 0.3, - 0.4, - 0.5, - 0.75, - 1.0, - 2.5, - 5.0, - 7.5, - 10.0, - 20.0, - 40.0, - 80.0, - ], - labelnames=labelnames, - ) - self.histogram_time_per_output_token = make_per_engine( - histogram_time_per_output_token, engine_indexes, model_name - ) + # With 0.12.x you can enable with --show-hidden-metrics-for-version=0.11 + # TODO: remove in 0.13.0 + if self.show_hidden_metrics: + histogram_time_per_output_token = self._histogram_cls( + name="vllm:time_per_output_token_seconds", + documentation=( + "Histogram of time per output token in seconds." + "DEPRECATED: Use vllm:inter_token_latency_seconds instead." + ), + buckets=[ + 0.01, + 0.025, + 0.05, + 0.075, + 0.1, + 0.15, + 0.2, + 0.3, + 0.4, + 0.5, + 0.75, + 1.0, + 2.5, + 5.0, + 7.5, + 10.0, + 20.0, + 40.0, + 80.0, + ], + labelnames=labelnames, + ) + self.histogram_time_per_output_token = make_per_engine( + histogram_time_per_output_token, engine_indexes, model_name + ) histogram_inter_token_latency = self._histogram_cls( name="vllm:inter_token_latency_seconds", @@ -966,20 +917,8 @@ class PrometheusStatLogger(AggregateStatLoggerBase): self.gauge_scheduler_waiting[engine_idx].set( scheduler_stats.num_waiting_reqs ) - if self.show_hidden_metrics: - self.gauge_gpu_cache_usage[engine_idx].set( - scheduler_stats.kv_cache_usage - ) self.gauge_kv_cache_usage[engine_idx].set(scheduler_stats.kv_cache_usage) - if self.show_hidden_metrics: - self.counter_gpu_prefix_cache_queries[engine_idx].inc( - scheduler_stats.prefix_cache_stats.queries - ) - self.counter_gpu_prefix_cache_hits[engine_idx].inc( - scheduler_stats.prefix_cache_stats.hits - ) - self.counter_prefix_cache_queries[engine_idx].inc( scheduler_stats.prefix_cache_stats.queries ) @@ -1050,7 +989,8 @@ class PrometheusStatLogger(AggregateStatLoggerBase): self.histogram_time_to_first_token[engine_idx].observe(ttft) for itl in iteration_stats.inter_token_latencies_iter: self.histogram_inter_token_latency[engine_idx].observe(itl) - self.histogram_time_per_output_token[engine_idx].observe(itl) + if self.show_hidden_metrics: + self.histogram_time_per_output_token[engine_idx].observe(itl) for finished_request in iteration_stats.finished_requests: self.counter_request_success[finished_request.finish_reason][ From 87185c88d54bd97c4c08f1fd3c5a8564e4924e2a Mon Sep 17 00:00:00 2001 From: Maryam Tahhan Date: Tue, 25 Nov 2025 03:19:52 +0000 Subject: [PATCH 25/43] =?UTF-8?q?[Bugfix]=20Make=20deprecated=20`--task=20?= =?UTF-8?q?embedding`=20consistent=20with=20`--runner=E2=80=A6=20(#29312)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Maryam Tahhan --- vllm/config/model.py | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/vllm/config/model.py b/vllm/config/model.py index 49688e17cf932..c37dd7c15f2a7 100644 --- a/vllm/config/model.py +++ b/vllm/config/model.py @@ -585,16 +585,26 @@ class ModelConfig: else: # task == "auto" pass else: - debug_info = { - "architectures": architectures, - "is_generative_model": is_generative_model, - "is_pooling_model": is_pooling_model, - } - raise AssertionError( - "The model should be a generative or " - "pooling model when task is set to " - f"{self.task!r}. Found: {debug_info}" - ) + # Neither generative nor pooling model - try to convert if possible + if is_pooling_task: + runner = "pooling" + convert = _task_to_convert(self.task) + msg_hint = ( + "Please replace this option with `--runner pooling " + f"--convert {convert}` to continue using this model " + "as a pooling model." + ) + else: + debug_info = { + "architectures": architectures, + "is_generative_model": is_generative_model, + "is_pooling_model": is_pooling_model, + } + raise AssertionError( + "The model should be a generative or " + "pooling model when task is set to " + f"{self.task!r}. Found: {debug_info}" + ) self.runner = runner self.convert = convert From 92effb07a48e56c531a95b696acd5f699baf16da Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Tue, 25 Nov 2025 11:28:51 +0800 Subject: [PATCH 26/43] [Model] Add HunyuanOCR support (#29327) Signed-off-by: manayang Signed-off-by: Isotr0py Signed-off-by: Roger Wang Co-authored-by: sergeywang Co-authored-by: manayang Co-authored-by: manayang Co-authored-by: Roger Wang --- docs/models/supported_models.md | 1 + examples/offline_inference/vision_language.py | 26 + tests/models/registry.py | 4 + vllm/config/model.py | 5 + .../layers/rotary_embedding/__init__.py | 13 + .../layers/rotary_embedding/xdrope.py | 102 ++ vllm/model_executor/models/hunyuan_v1.py | 11 +- vllm/model_executor/models/hunyuan_vision.py | 1028 +++++++++++++++++ vllm/model_executor/models/interfaces.py | 51 +- vllm/model_executor/models/registry.py | 4 + vllm/transformers_utils/config.py | 18 + vllm/transformers_utils/configs/__init__.py | 8 + vllm/transformers_utils/configs/hunyuan_vl.py | 322 ++++++ .../transformers_utils/processors/__init__.py | 10 +- .../processors/hunyuan_vl.py | 233 ++++ .../processors/hunyuan_vl_image.py | 477 ++++++++ vllm/v1/worker/gpu_input_batch.py | 2 + vllm/v1/worker/gpu_model_runner.py | 104 +- 18 files changed, 2415 insertions(+), 4 deletions(-) create mode 100644 vllm/model_executor/layers/rotary_embedding/xdrope.py create mode 100644 vllm/model_executor/models/hunyuan_vision.py create mode 100644 vllm/transformers_utils/configs/hunyuan_vl.py create mode 100644 vllm/transformers_utils/processors/hunyuan_vl.py create mode 100644 vllm/transformers_utils/processors/hunyuan_vl_image.py diff --git a/docs/models/supported_models.md b/docs/models/supported_models.md index 404519f887dc6..25579835faf63 100644 --- a/docs/models/supported_models.md +++ b/docs/models/supported_models.md @@ -680,6 +680,7 @@ These models primarily accept the [`LLM.generate`](./generative_models.md#llmgen | `Glm4vMoeForConditionalGeneration` | GLM-4.5V | T + IE+ + VE+ | `zai-org/GLM-4.5V`, etc. | ✅︎ | ✅︎ | | `GraniteSpeechForConditionalGeneration` | Granite Speech | T + A | `ibm-granite/granite-speech-3.3-8b` | ✅︎ | ✅︎ | | `H2OVLChatModel` | H2OVL | T + IE+ | `h2oai/h2ovl-mississippi-800m`, `h2oai/h2ovl-mississippi-2b`, etc. | | ✅︎ | +| `HunYuanVLForConditionalGeneration` | HunyuanOCR | T + IE+ | `tencent/HunyuanOCR`, etc. | ✅︎ | ✅︎ | | `Idefics3ForConditionalGeneration` | Idefics3 | T + I | `HuggingFaceM4/Idefics3-8B-Llama3`, etc. | ✅︎ | | | `InternS1ForConditionalGeneration` | Intern-S1 | T + IE+ + VE+ | `internlm/Intern-S1`, `internlm/Intern-S1-mini`, etc. | ✅︎ | ✅︎ | | `InternVLChatModel` | InternVL 3.5, InternVL 3.0, InternVideo 2.5, InternVL 2.5, Mono-InternVL, InternVL 2.0 | T + IE+ + (VE+) | `OpenGVLab/InternVL3_5-14B`, `OpenGVLab/InternVL3-9B`, `OpenGVLab/InternVideo2_5_Chat_8B`, `OpenGVLab/InternVL2_5-4B`, `OpenGVLab/Mono-InternVL-2B`, `OpenGVLab/InternVL2-4B`, etc. | ✅︎ | ✅︎ | diff --git a/examples/offline_inference/vision_language.py b/examples/offline_inference/vision_language.py index 624de2a2debc3..65ea4df4a3099 100644 --- a/examples/offline_inference/vision_language.py +++ b/examples/offline_inference/vision_language.py @@ -538,6 +538,31 @@ def run_h2ovl(questions: list[str], modality: str) -> ModelRequestData: ) +# HunyuanOCR +def run_hunyuan_vl(questions: list[str], modality: str) -> ModelRequestData: + assert modality == "image" + + model_name = "tencent/HunyuanOCR" + + engine_args = EngineArgs( + model=model_name, + max_model_len=8192, + limit_mm_per_prompt={modality: 1}, + ) + + placeholder = "<|hy_place▁holder▁no▁100|><|hy_place▁holder▁no▁102|><|hy_place▁holder▁no▁101|>" # noqa: E501 + prompts = [ + f"<|hy_begin▁of▁sentence|>{placeholder}{question}<|hy_User|>" + for question in questions + ] + + return ModelRequestData( + engine_args=engine_args, + prompts=prompts, + stop_token_ids=None, + ) + + # naver-hyperclovax/HyperCLOVAX-SEED-Vision-Instruct-3B def run_hyperclovax_seed_vision( questions: list[str], modality: str @@ -1820,6 +1845,7 @@ model_example_map = { "glm4_5v": run_glm4_5v, "glm4_5v_fp8": run_glm4_5v_fp8, "h2ovl_chat": run_h2ovl, + "hunyuan_vl": run_hunyuan_vl, "hyperclovax_seed_vision": run_hyperclovax_seed_vision, "idefics3": run_idefics3, "interns1": run_interns1, diff --git a/tests/models/registry.py b/tests/models/registry.py index 758ec54493aa3..f8b3470e6d39b 100644 --- a/tests/models/registry.py +++ b/tests/models/registry.py @@ -626,6 +626,10 @@ _MULTIMODAL_EXAMPLE_MODELS = { "naver-hyperclovax/HyperCLOVAX-SEED-Vision-Instruct-3B", trust_remote_code=True, ), + "HunYuanVLForConditionalGeneration": _HfExamplesInfo( + "tencent/HunyuanOCR", + is_available_online=False, + ), "Idefics3ForConditionalGeneration": _HfExamplesInfo( "HuggingFaceM4/Idefics3-8B-Llama3", extras={"tiny": "HuggingFaceTB/SmolVLM-256M-Instruct"}, diff --git a/vllm/config/model.py b/vllm/config/model.py index c37dd7c15f2a7..caa9a3440c41d 100644 --- a/vllm/config/model.py +++ b/vllm/config/model.py @@ -33,6 +33,7 @@ from vllm.transformers_utils.config import ( try_get_safetensors_metadata, try_get_tokenizer_config, uses_mrope, + uses_xdrope_dim, ) from vllm.transformers_utils.gguf_utils import ( maybe_patch_hf_config_from_gguf, @@ -1615,6 +1616,10 @@ class ModelConfig: def uses_mrope(self) -> bool: return uses_mrope(self.hf_config) + @property + def uses_xdrope_dim(self) -> int: + return uses_xdrope_dim(self.hf_config) + @property def is_multimodal_model(self) -> bool: return self.multimodal_config is not None diff --git a/vllm/model_executor/layers/rotary_embedding/__init__.py b/vllm/model_executor/layers/rotary_embedding/__init__.py index 152d9401b8e94..0f10bff6ac4f5 100644 --- a/vllm/model_executor/layers/rotary_embedding/__init__.py +++ b/vllm/model_executor/layers/rotary_embedding/__init__.py @@ -17,6 +17,7 @@ from .llama4_vision_rope import Llama4VisionRotaryEmbedding from .mrope import MRotaryEmbedding from .ntk_scaling_rope import NTKScalingRotaryEmbedding from .phi3_long_rope_scaled_rope import Phi3LongRoPEScaledRotaryEmbedding +from .xdrope import XDRotaryEmbedding from .yarn_scaling_rope import YaRNScalingRotaryEmbedding _ROPE_DICT: dict[tuple, RotaryEmbedding] = {} @@ -184,6 +185,18 @@ def get_rope( raise ValueError( "Dynamic rope scaling must contain either 'alpha' or 'factor' field" ) + elif scaling_type == "xdrope": + scaling_alpha = rope_parameters["alpha"] + rotary_emb = XDRotaryEmbedding( + head_size, + rotary_dim, + max_position, + base, + is_neox_style, + scaling_alpha, + dtype, + xdrope_section=rope_parameters["xdrope_section"], + ) elif scaling_type == "yarn": scaling_factor = rope_parameters["factor"] original_max_position = rope_parameters["original_max_position_embeddings"] diff --git a/vllm/model_executor/layers/rotary_embedding/xdrope.py b/vllm/model_executor/layers/rotary_embedding/xdrope.py new file mode 100644 index 0000000000000..2432273faf195 --- /dev/null +++ b/vllm/model_executor/layers/rotary_embedding/xdrope.py @@ -0,0 +1,102 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project + +import numpy as np +import torch + +from .common import apply_rotary_emb_dispatch +from .dynamic_ntk_alpha_rope import DynamicNTKAlphaRotaryEmbedding + + +class XDRotaryEmbedding(DynamicNTKAlphaRotaryEmbedding): + """DynamicNTKAlphaRotaryEmbedding extended with MultiModal(XD) Sections. + + Based on the original DynamicNTKAlphaRotaryEmbedding implementation. + """ + + def __init__( + self, + head_size: int, + rotary_dim: int, + max_position_embeddings: int, + base: float, + is_neox_style: bool, + scaling_alpha: float, + dtype: torch.dtype, + xdrope_section: list[int], + ) -> None: + self.xdrope_section = xdrope_section + super().__init__( + head_size, + rotary_dim, + max_position_embeddings, + base, + is_neox_style, + scaling_alpha, + dtype, + ) + + def forward( + self, + positions: torch.Tensor, + query: torch.Tensor, + key: torch.Tensor | None = None, + offsets: torch.Tensor | None = None, + ) -> tuple[torch.Tensor, torch.Tensor | None]: + """PyTorch-native implementation equivalent to forward(). + + Args: + positions: + [4, num_tokens] (P/W/H/T positions with multimodal inputs) + query: [num_tokens, num_heads * head_size] + key: [num_tokens, num_kv_heads * head_size] + """ + assert positions.ndim == 2 + assert key is not None + + num_tokens = positions.shape[-1] + cos_sin = self.cos_sin_cache[positions] + cos, sin = cos_sin.chunk(2, dim=-1) + cos = torch.cat( + [m[i] for i, m in enumerate(cos.split(self.xdrope_section, dim=-1))], dim=-1 + ) + sin = torch.cat( + [m[i] for i, m in enumerate(sin.split(self.xdrope_section, dim=-1))], dim=-1 + ) + + query_shape = query.shape + query = query.view(num_tokens, -1, self.head_size) + query_rot = query[..., : self.rotary_dim] + query_pass = query[..., self.rotary_dim :] + query_rot = apply_rotary_emb_dispatch(query_rot, cos, sin, self.is_neox_style) + query = torch.cat((query_rot, query_pass), dim=-1).reshape(query_shape) + + key_shape = key.shape + key = key.view(num_tokens, -1, self.head_size) + key_rot = key[..., : self.rotary_dim] + key_pass = key[..., self.rotary_dim :] + key_rot = apply_rotary_emb_dispatch(key_rot, cos, sin, self.is_neox_style) + key = torch.cat((key_rot, key_pass), dim=-1).reshape(key_shape) + return query, key + + @staticmethod + def get_next_input_positions( + context_len: int, + seq_len: int, + xd_sections: int = 4, + ) -> list[list[int]]: + return [list(range(context_len, seq_len)) for _ in range(xd_sections)] + + @staticmethod + def get_next_input_positions_tensor( + out: np.ndarray, + out_offset: int, + context_len: int, + num_new_tokens: int, + ): + values = np.arange( + context_len, + context_len + num_new_tokens, + dtype=out.dtype, + ) + out[:, out_offset : out_offset + num_new_tokens] = values diff --git a/vllm/model_executor/models/hunyuan_v1.py b/vllm/model_executor/models/hunyuan_v1.py index 9fa5e2bd33f21..53fb444ed622d 100644 --- a/vllm/model_executor/models/hunyuan_v1.py +++ b/vllm/model_executor/models/hunyuan_v1.py @@ -576,7 +576,16 @@ class HunYuanDecoderLayer(nn.Module): return hidden_states, residual, ori_kv_states -@support_torch_compile +@support_torch_compile( + dynamic_arg_dims={ + "input_ids": 0, + # positions is of shape (xd, seq_len) if xdrope is enabled for hunyuan-vl, + # otherwise (seq_len, ). + "positions": -1, + "intermediate_tensors": 0, + "inputs_embeds": 0, + } +) class HunYuanModel(nn.Module): def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): super().__init__() diff --git a/vllm/model_executor/models/hunyuan_vision.py b/vllm/model_executor/models/hunyuan_vision.py new file mode 100644 index 0000000000000..e83addd0c092f --- /dev/null +++ b/vllm/model_executor/models/hunyuan_vision.py @@ -0,0 +1,1028 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project + +# coding=utf-8 +# Copyright 2025 The HunYuan team. +# Copyright 2025 The vLLM team. +# Copyright 2025 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Inference-only HunYuan-VL model compatible with HuggingFace weights.""" + +from collections.abc import Callable, Iterable, Mapping, Sequence +from functools import partial +from typing import Annotated, Any, Literal, TypeAlias + +import torch +import torch.nn as nn +import torch.nn.functional as F +from transformers import BatchFeature + +from vllm.attention.backends.registry import AttentionBackendEnum +from vllm.attention.layer import MultiHeadAttention +from vllm.config import MultiModalConfig, VllmConfig +from vllm.config.multimodal import BaseDummyOptions +from vllm.distributed import parallel_state +from vllm.distributed import utils as dist_utils +from vllm.logger import init_logger +from vllm.model_executor.layers.activation import get_act_fn +from vllm.model_executor.layers.layernorm import RMSNorm +from vllm.model_executor.layers.linear import ( + ColumnParallelLinear, + QKVParallelLinear, + RowParallelLinear, +) +from vllm.model_executor.layers.quantization import QuantizationConfig +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.models.module_mapping import MultiModelKeys +from vllm.multimodal import MULTIMODAL_REGISTRY +from vllm.multimodal.inputs import ( + ImageItem, + ModalityData, + MultiModalDataDict, + MultiModalFeatureSpec, + MultiModalFieldConfig, + MultiModalKwargsItems, +) +from vllm.multimodal.parse import ( + DictEmbeddingItems, + ImageSize, + MultiModalDataItems, + MultiModalDataParser, +) +from vllm.multimodal.processing import ( + BaseMultiModalProcessor, + BaseProcessingInfo, + PromptReplacement, + PromptUpdate, +) +from vllm.multimodal.profiling import BaseDummyInputsBuilder +from vllm.sequence import IntermediateTensors +from vllm.transformers_utils.configs.hunyuan_vl import ( + HunYuanVLConfig, + HunYuanVLVisionConfig, +) +from vllm.transformers_utils.processors.hunyuan_vl import HunYuanVLProcessor +from vllm.transformers_utils.processors.hunyuan_vl_image import smart_resize +from vllm.utils.tensor_schema import TensorSchema, TensorShape + +from .interfaces import ( + MultiModalEmbeddings, + SupportsLoRA, + SupportsMultiModal, + SupportsPP, + SupportsQuant, + SupportsXDRoPE, +) +from .utils import ( + AutoWeightsLoader, + WeightsMapper, + init_vllm_registered_model, + maybe_prefix, +) + +logger = init_logger(__name__) + +# === Vision Inputs === # + + +class HunYuanVLImagePixelInputs(TensorSchema): + """ + Dimensions: + - np: Number of patches + - ni: Number of images + - cps: Number of channels * patch_size * patch_size + """ + + type: Literal["pixel_values"] + + pixel_values: Annotated[ + torch.Tensor, + TensorShape("np", "cps"), + ] + + image_grid_thw: Annotated[ + torch.Tensor, + TensorShape("ni", 3), + ] + + +class HunYuanVLImageEmbeddingInputs(TensorSchema): + """ + Dimensions: + - nf: Number of image features + - hs: Hidden size + - ni: Number of images + """ + + type: Literal["image_embeds"] + + image_embeds: Annotated[ + torch.Tensor, + TensorShape("nf", "hs"), + ] + + image_grid_thw: Annotated[ + torch.Tensor, + TensorShape("ni", 3), + ] + + +HunYuanVLImageInputs: TypeAlias = ( + HunYuanVLImagePixelInputs | HunYuanVLImageEmbeddingInputs +) + +# === Vision Encoder === # + + +class HunYuanVisionMLP(nn.Module): + def __init__( + self, + in_features: int, + hidden_features: int, + bias: bool = True, + act_fn: Callable[[torch.Tensor], torch.Tensor] = F.gelu, + quant_config: QuantizationConfig | None = None, + prefix: str = "", + use_data_parallel: bool = False, + ): + super().__init__() + self.dense_h_to_4h = ColumnParallelLinear( + in_features, + hidden_features, + bias=bias, + quant_config=quant_config, + prefix=f"{prefix}.dense_h_to_4h", + disable_tp=use_data_parallel, + ) + self.dense_4h_to_h = RowParallelLinear( + hidden_features, + in_features, + bias=bias, + quant_config=quant_config, + prefix=f"{prefix}.dense_4h_to_h", + disable_tp=use_data_parallel, + ) + self.act_fn = act_fn + + def forward(self, x: torch.Tensor): + x_up, _ = self.dense_h_to_4h(x) + x_down, _ = self.dense_4h_to_h(self.act_fn(x_up)) + return x_down + + +class HunYuanVisionAttention(nn.Module): + def __init__( + self, + embed_dim: int, + num_heads: int, + projection_size: int, + quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, + prefix: str = "", + use_data_parallel: bool = False, + ) -> None: + super().__init__() + # Per attention head and per partition values. + self.tp_size = ( + 1 + if use_data_parallel + else parallel_state.get_tensor_model_parallel_world_size() + ) + self.hidden_size_per_attention_head = dist_utils.divide( + projection_size, num_heads + ) + self.num_attention_heads_per_partition = dist_utils.divide( + num_heads, self.tp_size + ) + + self.qkv = QKVParallelLinear( + hidden_size=embed_dim, + head_size=self.hidden_size_per_attention_head, + total_num_heads=num_heads, + total_num_kv_heads=num_heads, + bias=True, + quant_config=quant_config, + prefix=f"{prefix}.qkv", + disable_tp=use_data_parallel, + ) + + self.o_proj = RowParallelLinear( + input_size=projection_size, + output_size=embed_dim, + quant_config=quant_config, + prefix=f"{prefix}.o_proj", + disable_tp=use_data_parallel, + ) + + self.scale = self.hidden_size_per_attention_head**-0.5 + self.attn = MultiHeadAttention( + self.num_attention_heads_per_partition, + self.hidden_size_per_attention_head, + self.scale, + prefix=f"{prefix}.attn", + multimodal_config=multimodal_config, + ) + + def forward( + self, + x: torch.Tensor, + ) -> torch.Tensor: + qkv, _ = self.qkv(x) + q, k, v = qkv.chunk(3, dim=-1) + out = self.attn(q, k, v) + output, _ = self.o_proj(out) + return output + + +class HunYuanVisionBlock(nn.Module): + def __init__( + self, + dim: int, + num_heads: int, + mlp_hidden_dim: int, + act_fn: Callable[[torch.Tensor], torch.Tensor] = F.gelu, + norm_layer: Callable[[int], nn.Module] | None = None, + quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, + prefix: str = "", + use_data_parallel: bool = False, + ) -> None: + super().__init__() + if norm_layer is None: + norm_layer = partial(nn.LayerNorm, eps=1e-6) + self.input_layernorm = norm_layer(dim) + self.post_attention_layernorm = norm_layer(dim) + self.self_attn = HunYuanVisionAttention( + embed_dim=dim, + num_heads=num_heads, + projection_size=dim, + quant_config=quant_config, + multimodal_config=multimodal_config, + prefix=f"{prefix}.self_attn", + use_data_parallel=use_data_parallel, + ) + self.mlp = HunYuanVisionMLP( + dim, + mlp_hidden_dim, + act_fn=act_fn, + bias=True, + quant_config=quant_config, + prefix=f"{prefix}.mlp", + use_data_parallel=use_data_parallel, + ) + + def forward( + self, + x: torch.Tensor, + ) -> torch.Tensor: + x = x + self.self_attn(self.input_layernorm(x)) + x = x + self.mlp(self.post_attention_layernorm(x)) + return x + + +class HunYuanVisionPatchEmbed(nn.Module): + def __init__(self, config: HunYuanVLVisionConfig): + super().__init__() + + self.config = config + self.embed_dim = config.hidden_size + self.patch_size = config.patch_size + self.num_channels = config.num_channels + self.spatial_merge_size = config.spatial_merge_size + self.interpolate_mode = config.interpolate_mode + + self.patch_embedding = nn.Conv2d( + in_channels=config.num_channels, + out_channels=self.embed_dim, + kernel_size=self.patch_size, + stride=self.patch_size, + bias=True, + ) + + self.max_num_patches = (config.max_image_size // self.patch_size) ** 2 + + self.num_positions = self.max_num_patches + 1 + self.position_edge = int(self.num_positions**0.5) + # first token is cls token, skip it + self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) + + self.patch_pos_embed = None + + def forward( + self, pixel_values: torch.Tensor, grid_thw: list[list[int]] + ) -> torch.Tensor: + num_patches = pixel_values.size(0) + pixel_values = pixel_values.reshape( + num_patches, self.num_channels, self.patch_size, self.patch_size + ) + + patch_embeds = self.patch_embedding(pixel_values) + patch_embeds = patch_embeds.squeeze(-1).squeeze(-1).unsqueeze(0) + + if self.patch_pos_embed is None: + patch_pos_shape = ( + 1, + self.position_edge, + self.position_edge, + self.embed_dim, + ) + self.patch_pos_embed = ( + self.position_embedding.weight[1:, :] + .reshape(patch_pos_shape) + .permute(0, 3, 1, 2) + .float() + ) + + patch_pos_embed_list = [] + for grid in grid_thw: + _, h0, w0 = grid + # we add a small number to avoid floating point error in the interpolation + # see discussion at https://github.com/facebookresearch/dino/issues/8 + h0, w0 = h0 + 0.1, w0 + 0.1 + patch_pos_embed = nn.functional.interpolate( + self.patch_pos_embed, + scale_factor=(h0 / self.position_edge, w0 / self.position_edge), + mode=self.interpolate_mode, + align_corners=False, + ) + + patch_pos_embed = ( + patch_pos_embed.reshape(self.embed_dim, -1) + .transpose(0, 1) + .unsqueeze(0) + .to(patch_embeds.dtype) + ) + patch_pos_embed_list.append(patch_pos_embed) + + patch_pos_embed = torch.cat(patch_pos_embed_list, dim=1) + embeddings = patch_embeds + patch_pos_embed + + return embeddings + + +class HunYuanVisionPatchMerger(nn.Module): + def __init__( + self, + in_channels, + out_channels, + spatial_merge_size=2, + rms_norm_eps=1e-5, + prefix="", + ): + super().__init__() + self.spatial_merge_size = spatial_merge_size + embed_std = out_channels**-0.5 + + self.proj = nn.Sequential( + nn.Conv2d( + in_channels, + in_channels * 2, + kernel_size=spatial_merge_size, + stride=spatial_merge_size, + ), + nn.GELU(), + nn.Conv2d(in_channels * 2, in_channels * 4, kernel_size=1), + ) + self.mlp = nn.Linear(in_channels * 4, out_channels) + + self.image_newline = nn.Parameter(torch.randn(in_channels * 4) * embed_std) + self.image_begin = nn.Parameter(torch.randn(out_channels) * embed_std) + self.image_end = nn.Parameter(torch.randn(out_channels) * embed_std) + self.image_sep = nn.Parameter(torch.randn(out_channels) * embed_std) + + self.before_rms = RMSNorm(in_channels, eps=rms_norm_eps) + self.after_rms = RMSNorm(out_channels, eps=rms_norm_eps) + + def forward(self, x, size=(16, 16)): + x = self.before_rms(x) + + h, w = size + dtype = x.dtype + x = x.permute(0, 2, 1).reshape(x.shape[0], -1, h, w) + + x = self.proj(x) # b,c,h,w + b, c, h, w = x.shape + x = torch.cat( + [x, self.image_newline.reshape(1, c, 1, 1).expand(b, c, h, 1).to(dtype)], + dim=-1, + ) + x = x.reshape(b, c, -1).permute(0, 2, 1) + x = self.mlp(x) + + begin = self.image_begin.reshape(1, 1, -1).expand(b, 1, x.shape[-1]).to(dtype) + end = self.image_end.reshape(1, 1, -1).expand(b, 1, x.shape[-1]).to(dtype) + x = torch.cat([begin, x, end], dim=1) + + return self.after_rms(x) + + +class HunYuanVisionTransformer(nn.Module): + def __init__( + self, + vision_config: HunYuanVLVisionConfig, + quant_config: QuantizationConfig | None = None, + prefix: str = "", + use_data_parallel: bool = False, + multimodal_config: MultiModalConfig | None = None, + attn_backend_override: AttentionBackendEnum | None = None, + ) -> None: + super().__init__() + + num_hidden_layers = vision_config.num_hidden_layers + self.hidden_size = vision_config.hidden_size + self.num_heads = vision_config.num_attention_heads + self.spatial_merge_size = vision_config.spatial_merge_size + + from vllm.compilation.backends import set_model_tag + + with set_model_tag("HunYuanVisionPatchEmbed"): + self.embeddings = HunYuanVisionPatchEmbed(vision_config) + + norm_layer = partial(nn.LayerNorm, eps=vision_config.rms_norm_eps) + + with set_model_tag("HunYuanVisionBlock"): + self.layers = nn.ModuleList( + [ + HunYuanVisionBlock( + dim=vision_config.hidden_size, + num_heads=vision_config.num_attention_heads, + mlp_hidden_dim=vision_config.intermediate_size, + act_fn=get_act_fn(vision_config.hidden_act), + norm_layer=norm_layer, + quant_config=quant_config, + multimodal_config=multimodal_config, + prefix=f"{prefix}.layers.{layer_idx}", + use_data_parallel=use_data_parallel, + ) + for layer_idx in range(num_hidden_layers) + ] + ) + + with set_model_tag("HunYuanVisionPatchMerger"): + self.perceive = HunYuanVisionPatchMerger( + vision_config.hidden_size, + vision_config.out_hidden_size, + spatial_merge_size=vision_config.spatial_merge_size, + rms_norm_eps=vision_config.rms_norm_eps, + prefix=f"{prefix}.perceive", + ) + + @property + def dtype(self) -> torch.dtype: + return self.embeddings.patch_embedding.weight.dtype + + @property + def device(self) -> torch.device: + return self.embeddings.patch_embedding.weight.device + + def forward( + self, + x: torch.Tensor, + grid_thw: list[list[int]], + ) -> torch.Tensor: + # patchify + seq_len = x.size(0) + cu_seqlens: list = [0] + + hidden_states = x.to(device=self.device, dtype=self.dtype) + hidden_states = self.embeddings(hidden_states, grid_thw) + + for t, h, w in grid_thw: + t, h, w = int(t), int(h), int(w) + cu_seqlens.append(h * w) + + cu_seqlens = torch.tensor(cu_seqlens, dtype=torch.int32) + cu_seqlens = torch.cumsum(cu_seqlens, dim=0, dtype=torch.int32) + + cu_seqlens = cu_seqlens.to(device=self.device, non_blocking=True) + + hidden_states = hidden_states.reshape(seq_len, -1) + hidden_states = hidden_states.unsqueeze(0) + for layer_num, layer in enumerate(self.layers): + hidden_states = layer(hidden_states) + + # adapter + split_lengths = (cu_seqlens[1:] - cu_seqlens[:-1]).tolist() + split_items = hidden_states.split(split_lengths, dim=1) + image_embeds_list = [] + for grid, split_item in zip(grid_thw, split_items): + image_embeds_list.append( + self.perceive(split_item.contiguous(), size=grid[1:]).squeeze(0) + ) + + return image_embeds_list + + def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]: + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + (".qkv", ".q_proj", "q"), + (".qkv", ".k_proj", "k"), + (".qkv", ".v_proj", "v"), + ] + params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: set[str] = set() + + for name, loaded_weight in weights: + for param_name, weight_name, shard_id in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", default_weight_loader) + weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params + + +def _hunyuan_vl_field_config(hf_inputs: Mapping[str, torch.Tensor]): + image_grid_thw = hf_inputs.get("image_grid_thw", torch.empty((0, 3))) + image_grid_sizes = image_grid_thw.prod(-1) + return dict( + pixel_values=MultiModalFieldConfig.flat_from_sizes("image", image_grid_sizes), + image_embeds=MultiModalFieldConfig.flat_from_sizes("image", image_grid_sizes), + image_grid_thw=MultiModalFieldConfig.batched("image"), + ) + + +class HunYuanVLMultiModalDataParser(MultiModalDataParser): + def _parse_image_data( + self, + data: dict[str, torch.Tensor] | ModalityData[ImageItem], + ): + if isinstance(data, dict): + return DictEmbeddingItems( + data, + modality="image", + required_fields={"image_embeds", "image_grid_thw"}, + fields_factory=_hunyuan_vl_field_config, + ) + + return super()._parse_image_data(data) + + +class HunYuanVLProcessingInfo(BaseProcessingInfo): + def get_hf_config(self): + return self.ctx.get_hf_config(HunYuanVLConfig) + + def get_hf_processor( + self, + **kwargs: object, + ) -> HunYuanVLProcessor: + return self.ctx.get_hf_processor( + HunYuanVLProcessor, + use_fast=kwargs.pop("use_fast", True), + **kwargs, + ) + + def get_image_processor( + self, + **kwargs: object, + ) -> HunYuanVLProcessor: + return self.get_hf_processor(**kwargs).image_processor + + def get_supported_mm_limits(self) -> Mapping[str, int | None]: + return {"image": None} + + def get_mm_max_tokens_per_item( + self, + seq_len: int, + mm_counts: Mapping[str, int], + ) -> Mapping[str, int]: + max_image_tokens = self.get_max_image_tokens() + # TODO: support video + max_video_tokens = 0 + return {"image": max_image_tokens, "video": max_video_tokens} + + def _get_vision_info( + self, + *, + image_width: int, + image_height: int, + num_frames: int = 1, + do_resize: bool = True, + image_processor: HunYuanVLProcessor | None, + ) -> tuple[ImageSize, int]: + if image_processor is None: + image_processor = self.get_image_processor() + + hf_config = self.get_hf_config() + vision_config = hf_config.vision_config + patch_size = vision_config.patch_size + spatial_merge_size = vision_config.spatial_merge_size + + if do_resize: + resized_height, resized_width = smart_resize( + height=image_height, + width=image_width, + factor=patch_size * spatial_merge_size, + min_pixels=image_processor.min_pixels, + max_pixels=image_processor.max_pixels, + ) + preprocessed_size = ImageSize(width=resized_width, height=resized_height) + else: + preprocessed_size = ImageSize(width=image_width, height=image_height) + + grid_t = 1 + grid_h = preprocessed_size.height // patch_size + grid_w = preprocessed_size.width // patch_size + + num_vision_tokens = ( + grid_t * grid_h // spatial_merge_size * (grid_w // spatial_merge_size + 1) + + 2 + ) + + return preprocessed_size, num_vision_tokens + + def get_num_image_tokens( + self, + *, + image_width: int, + image_height: int, + image_processor: HunYuanVLProcessor | None, + ) -> int: + _, num_image_tokens = self._get_vision_info( + image_width=image_width, + image_height=image_height, + image_processor=image_processor, + ) + return num_image_tokens + + def get_image_size_with_most_features(self) -> ImageSize: + max_image_size, _ = self._get_vision_info( + image_width=512, + image_height=8192, + image_processor=None, + ) + return max_image_size + + def get_max_image_tokens(self) -> int: + target_width, target_height = self.get_image_size_with_most_features() + return self.get_num_image_tokens( + image_width=target_width, + image_height=target_height, + image_processor=None, + ) + + +class HunYuanVLDummyInputsBuilder(BaseDummyInputsBuilder[HunYuanVLProcessingInfo]): + def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str: + num_images = mm_counts.get("image", 0) + + hf_processor = self.info.get_hf_processor() + image_token: str = hf_processor.image_token + + return image_token * num_images + + def get_dummy_mm_data( + self, + seq_len: int, + mm_counts: Mapping[str, int], + mm_options: Mapping[str, BaseDummyOptions] | None = None, + ) -> MultiModalDataDict: + num_images = mm_counts.get("image", 1) + + target_width, target_height = self.info.get_image_size_with_most_features() + + return { + "image": self._get_dummy_images( + width=target_width, height=target_height, num_images=num_images + ), + } + + +class HunYuanVLMultiModalProcessor(BaseMultiModalProcessor[HunYuanVLProcessingInfo]): + def _get_data_parser(self) -> MultiModalDataParser: + return HunYuanVLMultiModalDataParser() + + def _call_hf_processor( + self, + prompt: str, + mm_data: Mapping[str, object], + mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], + ) -> BatchFeature: + return self.info.ctx.call_hf_processor( + self.info.get_hf_processor(**mm_kwargs), + dict(text=prompt, **mm_data), + dict(**mm_kwargs, **tok_kwargs), + ) + + def _get_prompt_updates( + self, + mm_items: MultiModalDataItems, + hf_processor_mm_kwargs: Mapping[str, Any], + out_mm_kwargs: MultiModalKwargsItems, + ) -> Sequence[PromptUpdate]: + hf_processor = self.info.get_hf_processor(**hf_processor_mm_kwargs) + image_processor = self.info.get_image_processor(**hf_processor_mm_kwargs) + + placeholder = { + "image": hf_processor.image_token_id, + } + + merge_size = image_processor.merge_size + + def get_replacement_hunyuan_vl(item_idx: int, modality: str): + out_item = out_mm_kwargs[modality][item_idx] + grid_thw = out_item[f"{modality}_grid_thw"].data + assert isinstance(grid_thw, torch.Tensor) + + _, grid_h, grid_w = grid_thw + num_tokens = (int(grid_h) // merge_size) * ( + int(grid_w) // merge_size + 1 + ) + 2 + return [placeholder[modality]] * num_tokens + + return [ + PromptReplacement( + modality=modality, + target=[placeholder[modality]], + replacement=partial(get_replacement_hunyuan_vl, modality=modality), + ) + for modality in ("image",) + ] + + def _get_mm_fields_config( + self, + hf_inputs: BatchFeature, + hf_processor_mm_kwargs: Mapping[str, object], + ) -> Mapping[str, MultiModalFieldConfig]: + return _hunyuan_vl_field_config(hf_inputs) + + +@MULTIMODAL_REGISTRY.register_processor( + HunYuanVLMultiModalProcessor, + info=HunYuanVLProcessingInfo, + dummy_inputs=HunYuanVLDummyInputsBuilder, +) +class HunYuanVLForConditionalGeneration( + nn.Module, + SupportsMultiModal, + SupportsLoRA, + SupportsPP, + SupportsQuant, + SupportsXDRoPE, +): + multimodal_cpu_fields = {"image_grid_thw"} + + # To ensure correct weight loading and mapping. + hf_to_vllm_mapper = WeightsMapper( + orig_to_new_prefix={ + # mapping for new names in checkpoint saved after transformers v4.52 + "vit.vit.": "visual.", + "vit.": "visual.", + "model.": "language_model.model.", + } + ) + + supports_encoder_tp_data = True + + def get_xdrope_input_positions( + self, + input_tokens: list[int], + mm_features: list[MultiModalFeatureSpec], + ) -> torch.Tensor: + kwargs = MultiModalFeatureSpec.gather_kwargs( + mm_features, + {"image_grid_thw"}, + ) + image_grid_thw = [item.tolist() for item in kwargs.get("image_grid_thw", [])] + + hf_config = self.config + image_start_token_id = hf_config.image_start_token_id + spatial_merge_size = hf_config.vision_config.spatial_merge_size + xd_num = len(hf_config.rope_scaling["xdrope_section"]) + + input_tokens_tensor = torch.tensor(input_tokens) + image_start_indices = torch.argwhere( + input_tokens_tensor == image_start_token_id + ).squeeze(1) + + p_index = torch.arange(len(input_tokens_tensor)) + w_index = torch.arange(len(input_tokens_tensor)) + h_index = torch.arange(len(input_tokens_tensor)) + t_index = torch.arange(len(input_tokens_tensor)) + for image_index in range(len(image_start_indices)): + # +1 : first image_token, +2: for xdrope positions + pos = image_start_indices[image_index] + 2 + t, h, w = image_grid_thw[image_index] + _, llm_grid_h, llm_grid_w = ( + t, + h // spatial_merge_size, + w // spatial_merge_size, + ) + + token_num = (llm_grid_w + 1) * llm_grid_h + w_index[pos : pos + token_num].copy_( + torch.arange(0, llm_grid_w + 1) + .reshape(1, -1) + .expand(llm_grid_h, -1) + .reshape(-1) + ) + h_index[pos : pos + token_num].copy_( + torch.arange(0, llm_grid_h) + .reshape(-1, 1) + .expand(-1, llm_grid_w + 1) + .reshape(-1) + ) + h_index[pos : pos + token_num] = 0 + + if xd_num == 4: + llm_positions = torch.stack([p_index, w_index, h_index, t_index]) + elif xd_num == 3: + llm_positions = torch.stack([w_index, h_index, t_index]) + + return llm_positions + + @classmethod + def get_placeholder_str(cls, modality: str, i: int) -> str | None: + if modality.startswith("image"): + return "<|hy_place▁holder▁no▁100|><|hy_place▁holder▁no▁102|><|hy_place▁holder▁no▁101|>" # noqa: E501 + + raise ValueError("Only image modality is supported") + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__() + config: HunYuanVLConfig = vllm_config.model_config.hf_config + multimodal_config = vllm_config.model_config.multimodal_config + + self.config = config + self.multimodal_config = multimodal_config + + if multimodal_config.get_limit_per_prompt("image"): + attn_backend_override = ( + multimodal_config.mm_encoder_attn_backend + if multimodal_config is not None + else None + ) + self.visual = HunYuanVisionTransformer( + config.vision_config, + quant_config=self.quant_config, + prefix=maybe_prefix(prefix, "visual"), + multimodal_config=multimodal_config, + attn_backend_override=attn_backend_override, + ) + else: + self.visual = None + + self.language_model = init_vllm_registered_model( + vllm_config=vllm_config, + prefix=maybe_prefix(prefix, "language_model.model"), + architectures=[ + "HunYuanDenseV1ForCausalLM", + "HunYuanMoEV1ForCausalLM", + ], + ) + + self.make_empty_intermediate_tensors = ( + self.language_model.make_empty_intermediate_tensors + ) + + def _parse_and_validate_image_input( + self, **kwargs: object + ) -> HunYuanVLImageInputs | None: + pixel_values = kwargs.pop("pixel_values", None) + image_embeds = kwargs.pop("image_embeds", None) + image_grid_thw = kwargs.pop("image_grid_thw", None) + + if pixel_values is None and image_embeds is None: + return None + + # TODO: refine + if isinstance(pixel_values, list): + pixel_values = torch.cat(pixel_values, dim=0) + if len(pixel_values.shape) == 3: + last_dim = pixel_values.shape[-1] + pixel_values = pixel_values.reshape(-1, last_dim) + image_grid_thw = image_grid_thw.reshape(-1, 3) + + if pixel_values is not None: + return HunYuanVLImagePixelInputs( + type="pixel_values", + pixel_values=pixel_values, + image_grid_thw=image_grid_thw, + ) + + if image_embeds is not None: + return HunYuanVLImageEmbeddingInputs( + type="image_embeds", + image_embeds=image_embeds, + image_grid_thw=image_grid_thw, + ) + + def _process_image_input( + self, image_input: HunYuanVLImageInputs + ) -> tuple[torch.Tensor, ...]: + grid_thw = image_input["image_grid_thw"] + assert grid_thw.ndim == 2 + grid_thw_list = grid_thw.tolist() + + if image_input["type"] == "image_embeds": + image_embeds = image_input["image_embeds"].type(self.visual.dtype) + else: + pixel_values = image_input["pixel_values"] + + # TODO: use_data_parallel (split image_embeds in visual) + image_embeds = self.visual(pixel_values, grid_thw=grid_thw_list) + + return image_embeds + + def _parse_and_validate_multimodal_inputs(self, **kwargs: object) -> dict: + mm_input_by_modality = {} + + # Preserve the order of modalities if there are multiple of them + # from the order of kwargs. + for input_key in kwargs: + if ( + input_key in ("pixel_values", "image_embeds") + and "image" not in mm_input_by_modality + ): + mm_input_by_modality["image"] = self._parse_and_validate_image_input( + **kwargs + ) + return mm_input_by_modality + + def get_language_model(self) -> torch.nn.Module: + return self.language_model + + def embed_multimodal(self, **kwargs: object) -> MultiModalEmbeddings: + mm_input_by_modality = self._parse_and_validate_multimodal_inputs(**kwargs) + if not mm_input_by_modality: + return [] + + # The result multimodal_embeddings is tuple of tensors, with each + # tensor correspoending to a multimodal data item (image or video). + multimodal_embeddings: tuple[torch.Tensor, ...] = () + + # NOTE: It is important to iterate over the keys in this dictionary + # to preserve the order of the modalities. + for modality in mm_input_by_modality: + multimodal_input = mm_input_by_modality[modality] + if modality == "image": + image_embeddings = self._process_image_input(multimodal_input) + multimodal_embeddings += tuple(image_embeddings) + return multimodal_embeddings + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + intermediate_tensors: IntermediateTensors | None, + inputs_embeds: torch.Tensor | None, + **kwargs: object, + ) -> torch.Tensor | IntermediateTensors: + if intermediate_tensors is not None: + inputs_embeds = None + + hidden_states = self.language_model( + input_ids=input_ids, + positions=positions, + intermediate_tensors=intermediate_tensors, + inputs_embeds=inputs_embeds, + ) + return hidden_states + + def compute_logits( + self, + hidden_states: torch.Tensor, + ) -> torch.Tensor | None: + return self.language_model.compute_logits(hidden_states) + + def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]: + loader = AutoWeightsLoader( + self, + skip_prefixes=(["lm_head."] if self.config.tie_word_embeddings else None), + ) + return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper) + + def get_mm_mapping(self) -> MultiModelKeys: + """ + Get the module prefix in multimodal models + """ + return MultiModelKeys.from_string_field( + language_model="language_model.model", + connector="visual.perceive", + tower_model="visual", + ) diff --git a/vllm/model_executor/models/interfaces.py b/vllm/model_executor/models/interfaces.py index 9966498e1b4c9..6f6ce32538b71 100644 --- a/vllm/model_executor/models/interfaces.py +++ b/vllm/model_executor/models/interfaces.py @@ -1047,7 +1047,7 @@ class SupportsMRoPE(Protocol): supports_mrope: ClassVar[Literal[True]] = True """ A flag that indicates this model supports M-RoPE. - + Note: There is no need to redefine this flag if this class is in the MRO of your model class. @@ -1088,3 +1088,52 @@ def supports_mrope( model: type[object] | object, ) -> TypeIs[type[SupportsMRoPE]] | TypeIs[SupportsMRoPE]: return isinstance(model, SupportsMRoPE) + + +@runtime_checkable +class SupportsXDRoPE(Protocol): + """The interface required for all models that support XD-RoPE.""" + + supports_xdrope: ClassVar[Literal[True]] = True + """ + A flag that indicates this model supports XD-RoPE. + + Note: + There is no need to redefine this flag if this class is in the + XDRope of your model class. + """ + + def get_xdrope_input_positions( + self, + input_tokens: list[int], + mm_features: list["MultiModalFeatureSpec"], + ) -> torch.Tensor: + """ + Get XD-RoPE input positions and delta value for this specific model. + + This method should be implemented by each model that supports XD-RoPE + to provide model-specific logic for computing input positions. + + Args: + input_tokens: List of input token IDs + mm_features: Information about each multi-modal data item + + Returns: + llm_positions: Tensor of shape `[xdrope_dim, num_tokens]` with + 4D(P/W/H/T) or 3D(W/H/T) positions. + """ + ... + + +@overload +def supports_xdrope(model: type[object]) -> TypeIs[type[SupportsXDRoPE]]: ... + + +@overload +def supports_xdrope(model: object) -> TypeIs[SupportsXDRoPE]: ... + + +def supports_xdrope( + model: type[object] | object, +) -> TypeIs[type[SupportsXDRoPE]] | TypeIs[SupportsXDRoPE]: + return isinstance(model, SupportsXDRoPE) diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index b3da64af750c7..a0d8a78a2ae76 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -287,6 +287,10 @@ _MULTIMODAL_MODELS = { "GraniteSpeechForConditionalGeneration", ), "H2OVLChatModel": ("h2ovl", "H2OVLChatModel"), + "HunYuanVLForConditionalGeneration": ( + "hunyuan_vision", + "HunYuanVLForConditionalGeneration", + ), "InternVLChatModel": ("internvl", "InternVLChatModel"), "NemotronH_Nano_VL_V2": ("nano_nemotron_vl", "NemotronH_Nano_VL_V2"), "OpenCUAForConditionalGeneration": ( diff --git a/vllm/transformers_utils/config.py b/vllm/transformers_utils/config.py index 3d282da8c6112..c1880a3fba0ee 100644 --- a/vllm/transformers_utils/config.py +++ b/vllm/transformers_utils/config.py @@ -86,6 +86,7 @@ _CONFIG_REGISTRY: dict[str, type[PretrainedConfig]] = LazyConfigDict( deepseek_vl_v2="DeepseekVLV2Config", deepseek_v32="DeepseekV3Config", flex_olmo="FlexOlmoConfig", + hunyuan_vl="HunYuanVLConfig", kimi_linear="KimiLinearConfig", kimi_vl="KimiVLConfig", RefinedWeb="RWConfig", # For tiiuae/falcon-40b(-instruct) @@ -549,6 +550,23 @@ def thinker_uses_mrope(config: PretrainedConfig) -> bool: return uses_mrope(thinker_text_config) +def uses_xdrope_dim(config: PretrainedConfig) -> int: + """Detect if the model with this config uses XD-ROPE.""" + xdrope_section = getattr(config, "xdrope_section", None) + if xdrope_section is not None and isinstance(xdrope_section, list): + return len(xdrope_section) + rope_scaling = getattr(config, "rope_scaling", None) + if rope_scaling is None: + return 0 + + if isinstance(rope_scaling, dict) and "xdrope_section" in rope_scaling: + xdrope_section = rope_scaling["xdrope_section"] + if xdrope_section is not None and isinstance(xdrope_section, list): + return len(xdrope_section) + + return 0 + + def is_encoder_decoder(config: PretrainedConfig) -> bool: """Detect if the model with this config is used as an encoder/decoder.""" diff --git a/vllm/transformers_utils/configs/__init__.py b/vllm/transformers_utils/configs/__init__.py index d28fd8d033373..109f2b6986514 100644 --- a/vllm/transformers_utils/configs/__init__.py +++ b/vllm/transformers_utils/configs/__init__.py @@ -23,6 +23,11 @@ from vllm.transformers_utils.configs.eagle import EAGLEConfig # `FalconConfig` class from the official HuggingFace transformers library. from vllm.transformers_utils.configs.falcon import RWConfig from vllm.transformers_utils.configs.flex_olmo import FlexOlmoConfig +from vllm.transformers_utils.configs.hunyuan_vl import ( + HunYuanVLConfig, + HunYuanVLTextConfig, + HunYuanVLVisionConfig, +) from vllm.transformers_utils.configs.jais import JAISConfig from vllm.transformers_utils.configs.kimi_linear import KimiLinearConfig from vllm.transformers_utils.configs.kimi_vl import KimiVLConfig @@ -53,6 +58,9 @@ __all__ = [ "DotsOCRConfig", "EAGLEConfig", "FlexOlmoConfig", + "HunYuanVLConfig", + "HunYuanVLTextConfig", + "HunYuanVLVisionConfig", "RWConfig", "JAISConfig", "Lfm2MoeConfig", diff --git a/vllm/transformers_utils/configs/hunyuan_vl.py b/vllm/transformers_utils/configs/hunyuan_vl.py new file mode 100644 index 0000000000000..a826ed9b5155d --- /dev/null +++ b/vllm/transformers_utils/configs/hunyuan_vl.py @@ -0,0 +1,322 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project + +# adapted from https://github.com/ManaEstras/transformers/blob/v4.57.1.hyvl/src/transformers/models/hunyuan_vl/configuration_hunyuan_vl.py + +from transformers import PretrainedConfig + + +class HunYuanVLVisionConfig(PretrainedConfig): + model_type = "hunyuan_vl" + base_config_key = "vision_config" + + def __init__( + self, + hidden_act="gelu", + hidden_size=1152, + intermediate_size=4304, + interpolate_mode="bilinear", + rms_norm_eps=1e-05, + learnable_mlp_pooling_size=0, + num_attention_heads=16, + num_key_value_heads=None, + num_channels=3, + num_hidden_layers=27, + out_hidden_size=4096, + patch_size=16, + remove_prenorm=True, + spatial_merge_size=2, + temporal_patch_size=1, + resize_resolution=2048, + img_max_token_num=4096, + max_image_size=2048, + video_max_image_size=768, + video_min_image_size=256, + min_image_size=512, + anyres_vit_max_image_size=2048, + max_vit_seq_len=16384, + text_hidden_size=3072, + **kwargs, + ): + super().__init__(**kwargs) + + self.hidden_act = hidden_act + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.interpolate_mode = interpolate_mode + self.learnable_mlp_pooling_size = learnable_mlp_pooling_size + self.num_attention_heads = num_attention_heads + if not num_key_value_heads: + self.num_key_value_heads = num_attention_heads + else: + self.num_key_value_heads = num_key_value_heads + self.num_channels = num_channels + self.num_hidden_layers = num_hidden_layers + self.out_hidden_size = out_hidden_size + self.patch_size = patch_size + self.remove_prenorm = remove_prenorm + self.spatial_merge_size = spatial_merge_size + self.temporal_patch_size = temporal_patch_size + self.rms_norm_eps = rms_norm_eps + + self.resize_resolution = resize_resolution + self.img_max_token_num = img_max_token_num + self.max_image_size = max_image_size + self.min_image_size = min_image_size + self.video_max_image_size = video_max_image_size + self.video_min_image_size = video_min_image_size + self.anyres_vit_max_image_size = anyres_vit_max_image_size + self.max_vit_seq_len = max_vit_seq_len + self.text_hidden_size = text_hidden_size + + +class HunYuanVLTextConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`HunYuanVLTextConfig`]. It is used to instantiate an + HunYuan model according to the specified arguments, defining the model architecture. Instantiating a configuration + with the defaults will yield a similar configuration to that of the HunYuan-7B. + Hunyuan-7B-Instruct [tencent/Hunyuan-7B-Instruct](https://huggingface.co/tencent/Hunyuan-7B-Instruct). + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 290943): + Vocabulary size of the HunYuan model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`HunYuanVLTextConfig`] + hidden_size (`int`, *optional*, defaults to 4096): + Dimension of the hidden representations. + intermediate_size (`int`, *optional*, defaults to 11008): + Dimension of the MLP representations or shared MLP representations. + num_hidden_layers (`int`, *optional*, defaults to 32): + Number of hidden layers in the Transformer decoder. + num_attention_heads (`int`, *optional*, defaults to 32): + Number of attention heads for each attention layer in the Transformer decoder. + num_key_value_heads (`int`, *optional*): + This is the number of key_value heads that should be used to implement Grouped Query Attention. If + `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if + `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When + converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed + by meanpooling all the original heads within that group. For more details checkout [this + paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to + `num_attention_heads`. + hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): + The non-linear activation function (function or string) in the decoder. + max_position_embeddings (`int`, *optional*, defaults to 2048): + The maximum sequence length that this model might ever be used with. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + rms_norm_eps (`float`, *optional*, defaults to 1e-05): + The epsilon used by the rms normalization layers. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + pad_token_id (`int`, *optional*, defaults to 0): + Padding token id. + bos_token_id (`int`, *optional*, defaults to 1): + Beginning of stream token id. + eos_token_id (`int`, *optional*, defaults to 2): + End of stream token id. + eod_token_id (int, *optional*, defaults to 3): + Token ID representing the end-of-document marker. Used to indicate the termination of a text sequence. + Example: In multi-document processing, this token helps the model distinguish between separate documents. + pretraining_tp (`int`, *optional*, defaults to 1): + Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this + document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is + necessary to ensure exact reproducibility of the pretraining results. Please refer to [this + issue](https://github.com/pytorch/pytorch/issues/76232). + tie_word_embeddings (`bool`, *optional*, defaults to `False`): + Whether to tie weight embeddings + rope_theta (`float`, *optional*, defaults to 10000.0): + The base period of the RoPE embeddings. + rope_scaling (`Dict`, *optional*): + Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling + strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is + `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update + `max_position_embeddings` to the expected new maximum. See the following thread for more information on how + these scaling strategies behave: + https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an + experimental feature, subject to breaking API changes in future versions. + attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): + Whether to use a bias in the query, key, value and output projection layers during self-attention. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + head_dim (`int`, *optional*, defaults to 128): + The attention head dimension. + """ # noqa: E501 + + model_type = "hunyuan_vl_text" + keys_to_ignore_at_inference = ["past_key_values"] + + def __init__( + self, + vocab_size=290943, + hidden_size=4096, + intermediate_size: int = 11008, + num_hidden_layers=32, + num_attention_heads=32, + num_key_value_heads=None, + hidden_act="silu", + max_position_embeddings=2048, + initializer_range=0.02, + rms_norm_eps=1e-5, + use_cache=True, + pad_token_id=0, + bos_token_id=1, + eos_token_id=2, + eod_token_id=3, + pretraining_tp=1, + tie_word_embeddings=False, + rope_theta=10000.0, + rope_scaling=None, + attention_bias=False, + attention_dropout=0.0, + head_dim=None, + **kwargs, + ): + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.head_dim = head_dim + # for backward compatibility + if num_key_value_heads is None: + num_key_value_heads = num_attention_heads + + self.num_key_value_heads = num_key_value_heads + self.hidden_act = hidden_act + self.initializer_range = initializer_range + self.rms_norm_eps = rms_norm_eps + self.pretraining_tp = pretraining_tp + self.use_cache = use_cache + self.rope_theta = rope_theta + self.rope_scaling = rope_scaling + # self._rope_scaling_validation() # TODO: Need validation? + self.attention_bias = attention_bias + self.attention_dropout = attention_dropout + + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + tie_word_embeddings=tie_word_embeddings, + **kwargs, + ) + + def _rope_scaling_validation(self): + """ + Validate the `rope_scaling` configuration. + """ + if self.rope_scaling is None: + return + + if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: + raise ValueError( + "`rope_scaling` must be a dictionary with with two fields, `type` and " + f"`factor` or `type` and `alpha`, got {self.rope_scaling}" + ) + rope_scaling_type = self.rope_scaling.get("type", None) + rope_scaling_factor = self.rope_scaling.get("factor", None) + rope_scaling_alpha = self.rope_scaling.get("alpha", None) + if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: + raise ValueError( + "`rope_scaling`'s type field must be one of ['linear', 'dynamic'], " + f"got {rope_scaling_type}" + ) + if rope_scaling_factor is None and rope_scaling_alpha is None: + raise ValueError( + "`rope_scaling`'s factor or alpha field must be have one, " + "got both of none" + ) + if rope_scaling_factor is not None and ( + not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0 + ): + raise ValueError( + "`rope_scaling`'s factor field must be a float > 1.0, " + f"got {rope_scaling_factor}" + ) + if rope_scaling_alpha is not None and ( + not isinstance(rope_scaling_alpha, float) or rope_scaling_alpha <= 1.0 + ): + raise ValueError( + "`rope_scaling`'s alpha field must be a float > 1.0, " + f"got {rope_scaling_alpha}" + ) + + +class HunYuanVLConfig(PretrainedConfig): + model_type = "hunyuan_vl" + sub_configs = { + "vision_config": HunYuanVLVisionConfig, + "text_config": HunYuanVLTextConfig, + } + keys_to_ignore_at_inference = ["past_key_values"] + + def __init__( + self, + text_config=None, + vision_config=None, + im_start_id=120118, + im_end_id=120119, + image_token_id=120120, + im_newline_id=120121, + video_start_id=120122, + video_end_id=120123, + **kwargs, + ): + # We need to init super() here so that it does not reset values + # that are in text config to the BaseClass defaults. The Base + # config has many text related defaults and not all defaults are + # same as for `HunYuanVLTextConfig`. + super().__init__(**kwargs) + + if isinstance(vision_config, dict): + self.vision_config = self.sub_configs["vision_config"](**vision_config) + elif vision_config is None: + self.vision_config = self.sub_configs["vision_config"]() + + if isinstance(text_config, dict): + self.text_config = self.sub_configs["text_config"](**text_config) + elif text_config is None: + # For BC use all kwargs to init `TextConfig` + self.text_config = self.sub_configs["text_config"](**kwargs) + + self.image_token_id = image_token_id + self.im_start_id = im_start_id + self.im_end_id = im_end_id + self.im_newline_id = im_newline_id + self.video_start_id = video_start_id + self.video_end_id = video_end_id + + self.vision_config.text_hidden_size = self.text_config.hidden_size + + # Attention implementation to use. It sets it recursively on sub-configs + # so we call it again in the end. + self._attn_implementation = kwargs.pop("attn_implementation", None) + + def __setattr__(self, key, value): + if ( + (text_config := super().__getattribute__("__dict__").get("text_config")) + is not None + and key not in ["dtype", "_attn_implementation_internal"] + and key in text_config.__dict__ + ): + setattr(text_config, key, value) + else: + super().__setattr__(key, value) + + def __getattribute__(self, key): + if "text_config" in super().__getattribute__("__dict__") and key not in [ + "_name_or_path", + "model_type", + "dtype", + "_attn_implementation_internal", + ]: + text_config = super().__getattribute__("text_config") + if key in text_config.__dict__: + return getattr(text_config, key) + + return super().__getattribute__(key) diff --git a/vllm/transformers_utils/processors/__init__.py b/vllm/transformers_utils/processors/__init__.py index 76b6d3dc9c99a..b49fdbe9ce776 100644 --- a/vllm/transformers_utils/processors/__init__.py +++ b/vllm/transformers_utils/processors/__init__.py @@ -9,7 +9,15 @@ reasons: """ from vllm.transformers_utils.processors.deepseek_vl2 import DeepseekVLV2Processor +from vllm.transformers_utils.processors.hunyuan_vl import HunYuanVLProcessor +from vllm.transformers_utils.processors.hunyuan_vl_image import HunYuanVLImageProcessor from vllm.transformers_utils.processors.ovis import OvisProcessor from vllm.transformers_utils.processors.ovis2_5 import Ovis2_5Processor -__all__ = ["DeepseekVLV2Processor", "OvisProcessor", "Ovis2_5Processor"] +__all__ = [ + "DeepseekVLV2Processor", + "HunYuanVLProcessor", + "HunYuanVLImageProcessor", + "OvisProcessor", + "Ovis2_5Processor", +] diff --git a/vllm/transformers_utils/processors/hunyuan_vl.py b/vllm/transformers_utils/processors/hunyuan_vl.py new file mode 100644 index 0000000000000..615a8bff85912 --- /dev/null +++ b/vllm/transformers_utils/processors/hunyuan_vl.py @@ -0,0 +1,233 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project + +# adapted from https://github.com/ManaEstras/transformers/blob/v4.57.1.hyvl/src/transformers/models/hunyuan_vl/processing_hunyuan_vl.py + +import numpy as np +import torch +from transformers import AutoProcessor +from transformers.feature_extraction_utils import BatchFeature +from transformers.image_utils import ImageInput +from transformers.processing_utils import ProcessorMixin +from transformers.tokenization_utils_base import PreTokenizedInput, TextInput +from transformers.video_utils import VideoInput + + +class HunYuanVLProcessor(ProcessorMixin): + attributes = ["image_processor", "tokenizer"] + valid_kwargs = ["chat_template"] + image_processor_class = "AutoImageProcessor" + tokenizer_class = "AutoTokenizer" # ("AutoTokenizer", None) + + def __init__( + self, + image_processor=None, + tokenizer=None, + video_processor=None, + chat_template=None, + **kwargs, + ): + # TODO Fix the init + self.tokenizer = tokenizer + self.image_token_id = 120120 # self.tokenizer.image_token_id + self.image_token = self.tokenizer.convert_ids_to_tokens(self.image_token_id) + self.im_start_token_id = 120118 # self.tokenizer.im_start_id + self.im_start_token = self.tokenizer.convert_ids_to_tokens( + self.im_start_token_id + ) + self.im_end_token_id = 120119 # self.tokenizer.im_end_id + self.im_end_token = self.tokenizer.convert_ids_to_tokens(self.im_end_token_id) + self.placeholder_token = self.tokenizer.convert_ids_to_tokens( + self.tokenizer.vocab_size - 1 + ) + self.pad_id = 120002 # self.tokenizer.pad_token_id + + super().__init__( + image_processor, tokenizer, video_processor, chat_template=chat_template + ) + + def __call__( + self, + images: ImageInput = None, + text: TextInput + | PreTokenizedInput + | list[TextInput] + | list[PreTokenizedInput] = None, + videos: VideoInput = None, + **kwargs, + ) -> BatchFeature: + image_inputs = {} + if images is not None: + image_inputs = self.image_processor(images=images) + image_grid_thw = image_inputs["image_grid_thw"] + + if not isinstance(text, list): + text = [text] + + text = text.copy() # below lines change text in-place + + image_tokens_cumsum = [0] + if images is not None: + index = 0 + for i in range(len(text)): + while self.image_token in text[i]: + grid_h, grid_w = image_grid_thw[index][-2:] + patch_h = grid_h // self.image_processor.merge_size + patch_w = grid_w // self.image_processor.merge_size + num_image_tokens = patch_h * (patch_w + 1) + 2 + image_tokens_cumsum.append( + image_tokens_cumsum[-1] + num_image_tokens + ) + # text[i] = text[i].replace(self.image_token, self.im_start_token + self.placeholder_token * num_image_tokens + self.im_end_token, 1) # noqa: E501 + text[i] = text[i].replace( + self.image_token, self.placeholder_token * num_image_tokens, 1 + ) + index += 1 + text[i] = text[i].replace(self.placeholder_token, self.image_token) + # text[i] = self.tokenizer.bos_token + text[i] + + text_inputs = self.tokenizer(text, add_special_tokens=False, **kwargs) + self._check_special_mm_tokens(text, text_inputs, modalities=["image"]) + + input_ids = text_inputs["input_ids"] + position_ids = torch.arange(len(input_ids[0])) + position_ids_w = torch.arange(len(input_ids[0])) + position_ids_h = torch.arange(len(input_ids[0])) + position_ids_t = torch.arange(len(input_ids[0])) + + if images is not None: + image_token_pos_indices = torch.where(input_ids[0] == self.image_token_id)[ + 0 + ] + for i in range(len(image_grid_thw)): + grid_h, grid_w = image_grid_thw[i][-2:] + patch_h = grid_h // self.image_processor.merge_size + patch_w = grid_w // self.image_processor.merge_size + start_pos = image_token_pos_indices[image_tokens_cumsum[i]].item() + 1 + replace_num = (patch_w + 1) * patch_h + position_ids_w[start_pos : start_pos + replace_num] = torch.tensor( + list(range(patch_w + 1)) * patch_h, dtype=torch.int64 + ) + patch_h_list = [] + for h in range(patch_h): + patch_h_list += [h] * (patch_w + 1) + position_ids_h[start_pos : start_pos + replace_num] = torch.tensor( + patch_h_list, dtype=torch.int64 + ) + position_ids_t[start_pos : start_pos + replace_num] = 0 + + position_ids = torch.stack( + [position_ids, position_ids_w, position_ids_h, position_ids_t] + ).unsqueeze(0) + text_inputs["position_ids"] = position_ids + + attention_mask = input_ids.ne(self.pad_id) + text_inputs["attention_mask"] = attention_mask + text_inputs["imgs_pos"] = [self.get_imgs_pos(input_ids)] + # image_inputs["imgs"] = [[image_inputs["pixel_values"]]] + + return_tensors = kwargs.pop("return_tensors", None) + return BatchFeature( + data={**text_inputs, **image_inputs}, + tensor_type=return_tensors, + ) + + def batch_decode(self, *args, **kwargs): + return self.tokenizer.batch_decode(*args, **kwargs) + + def decode(self, *args, **kwargs): + return self.tokenizer.decode(*args, **kwargs) + + def post_process_image_text_to_text( + self, + generated_outputs, + skip_special_tokens=True, + clean_up_tokenization_spaces=False, + **kwargs, + ): + assert 0 + + def apply_chat_template(self, *args, **kwargs): + token_ids = self.tokenizer.apply_chat_template(*args, **kwargs) + return token_ids + + def get_imgs_pos(self, doc_ids): + doc_ids = np.array(doc_ids, dtype=np.int64) + img_begin_index = np.where(doc_ids == self.im_start_token_id)[0] + img_end_index = np.where(doc_ids == self.im_end_token_id)[0] + imgs_pos = np.concatenate( + ( + np.reshape(img_begin_index + 1, (-1, 1)), + np.reshape(img_end_index, (-1, 1)), + ), + axis=-1, + ).tolist() + return imgs_pos + + @property + def model_input_names(self): + tokenizer_input_names = self.tokenizer.model_input_names + image_processor_input_names = self.image_processor.model_input_names + return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + +def split_image_into_patch_blocks( + pixel_values: torch.Tensor, # shape: [batch_size, 3, H, W] + patch_size: int = 16, # e.g. 16 + adaptor_patch_div: int = 4, # e.g. 4 --> each patch_size is cut into 4x4 small regions, i.e. patch_size // 4 # noqa: E501 +) -> torch.Tensor: + """ + Split the input image tensor (supporting batch) into large patches of size `patch_size`, + and then further divide each large patch into smaller regions of size + (patch_size // adaptor_patch_div) x (patch_size // adaptor_patch_div). + Each small region is extracted as a tensor of shape [3, patch_size, patch_size]. + The final output contains all such small region tensors. + + Args: + pixel_values: Input image tensor of shape [batch_size, 3, H, W]. + patch_size: Size of the large patch, e.g., 16. + adaptor_patch_div: Each large patch is divided into + (patch_size // adaptor_patch_div) x (patch_size // adaptor_patch_div) + smaller regions. + + Returns: + patches: A tensor of shape [N, 3, patch_size, patch_size], + where N = batch_size * (H // patch_size) * (W // patch_size) * (patch_size // adaptor_patch_div)^2. + Each element in the batch corresponds to one small image region. + """ # noqa: E501 + batch_size, channels, height, width = pixel_values.shape + assert channels == 3, "Pixel values must have 3 channels in dim=1" + assert height % patch_size == 0 and width % patch_size == 0, ( + "H and W must be divisible by patch_size" + ) + + patch_height_num = height // patch_size + patch_width_num = width // patch_size + + # Reshape to [B, 3, ph, ps, pw, ps] + img = pixel_values.reshape( + batch_size, 3, patch_height_num, patch_size, patch_width_num, patch_size + ) + + # Further split each psxps patch into (ps//aps)x(ps//aps) small regions + img = img.reshape( + batch_size, + 3, + patch_height_num, + patch_size // adaptor_patch_div, # ps // aps + adaptor_patch_div, + patch_width_num, + patch_size // adaptor_patch_div, # ps // aps + adaptor_patch_div, + ) + + # Permute to group the small regions: [B, ph, pw, ps//aps, ps//aps, 3, aps, aps] + img = img.permute(0, 2, 5, 3, 6, 1, 4, 7) + + # Reshape into [B * ph * pw * (ps//aps)^2, 3, patch_size, patch_size] + patches = img.reshape(-1, 3, patch_size, patch_size) + + return patches + + +AutoProcessor.register("HunYuanVLProcessor", HunYuanVLProcessor) diff --git a/vllm/transformers_utils/processors/hunyuan_vl_image.py b/vllm/transformers_utils/processors/hunyuan_vl_image.py new file mode 100644 index 0000000000000..0a7e7865c783a --- /dev/null +++ b/vllm/transformers_utils/processors/hunyuan_vl_image.py @@ -0,0 +1,477 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project + +# adapted from https://github.com/ManaEstras/transformers/blob/v4.57.1.hyvl/src/transformers/models/hunyuan_vl/image_processing_hunyuan_vl.py +"""Image processor class for HunYuanVL.""" + +# isort conflicts with ruff for transformers imports +# isort: skip_file +import math + +import numpy as np +import torchvision.transforms as transforms +from transformers import AutoImageProcessor +from transformers.image_processing_utils import BaseImageProcessor, BatchFeature +from transformers.image_transforms import ( + convert_to_rgb, +) +from transformers.image_utils import ( + OPENAI_CLIP_MEAN, + OPENAI_CLIP_STD, + ChannelDimension, + ImageInput, + PILImageResampling, + make_flat_list_of_images, + make_list_of_images, + valid_images, + validate_preprocess_arguments, +) +from transformers.utils import TensorType, logging +from transformers.video_utils import VideoInput, make_batched_videos + +logger = logging.get_logger(__name__) + + +def smart_resize( + height: int, + width: int, + factor: int = 16, + min_pixels: int = 512 * 512, + max_pixels: int = 2048 * 2048, +): + """Rescales the image so that the following conditions are met: + + 1. Both dimensions (height and width) are divisible by 'factor'. + + 2. The total number of pixels is within the range ['min_pixels', 'max_pixels']. + + 3. The aspect ratio of the image is maintained as closely as possible. + + """ + if max(height, width) / min(height, width) > 200: + raise ValueError( + "absolute aspect ratio must be smaller than 200, got " + f"{max(height, width) / min(height, width)}" + ) + h_bar = round(height / factor) * factor + w_bar = round(width / factor) * factor + if h_bar * w_bar > max_pixels: + beta = math.sqrt((height * width) / max_pixels) + h_bar = max(factor, math.floor(height / beta / factor) * factor) + w_bar = max(factor, math.floor(width / beta / factor) * factor) + elif h_bar * w_bar < min_pixels: + beta = math.sqrt(min_pixels / (height * width)) + h_bar = math.ceil(height * beta / factor) * factor + w_bar = math.ceil(width * beta / factor) * factor + return h_bar, w_bar + + +class HunYuanVLImageProcessor(BaseImageProcessor): + model_input_names = [ + "pixel_values", + "image_grid_thw", + "pixel_values_videos", + "video_grid_thw", + ] + + def __init__( + self, + do_resize: bool = True, + size: dict[str, int] | None = None, + resample: PILImageResampling = PILImageResampling.BICUBIC, + do_rescale: bool = True, + rescale_factor: int | float = 1 / 255, + do_normalize: bool = True, + image_mean: float | list[float] | None = None, + image_std: float | list[float] | None = None, + do_convert_rgb: bool = True, + min_pixels: int | None = None, + max_pixels: int | None = None, + patch_size: int = 16, + temporal_patch_size: int = 2, + merge_size: int = 2, + **kwargs, + ) -> None: + super().__init__(**kwargs) + if size is not None and ( + "shortest_edge" not in size or "longest_edge" not in size + ): + raise ValueError( + "size must contain 'shortest_edge' and 'longest_edge' keys." + ) + else: + size = {"shortest_edge": 512 * 512, "longest_edge": 2048 * 2048} + # backward compatibility: override size with min_pixels and max_pixels + # if they are provided. + if min_pixels is not None: + size["shortest_edge"] = min_pixels + if max_pixels is not None: + size["longest_edge"] = max_pixels + self.min_pixels = size["shortest_edge"] + self.max_pixels = size["longest_edge"] + self.size = size + + self.do_resize = do_resize + self.resample = resample + self.do_rescale = do_rescale + self.rescale_factor = rescale_factor + self.do_normalize = do_normalize + self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN + self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD + + self.patch_size = patch_size + self.temporal_patch_size = temporal_patch_size + self.merge_size = merge_size + self.do_convert_rgb = do_convert_rgb + + # hard-code + + def _preprocess( + self, + images: ImageInput | VideoInput, + do_resize: bool | None = None, + size: dict[str, int] | None = None, + resample: PILImageResampling = None, + do_rescale: bool | None = None, + rescale_factor: float | None = None, + do_normalize: bool | None = None, + image_mean: float | list[float] | None = None, + image_std: float | list[float] | None = None, + patch_size: int = 16, + temporal_patch_size: int = 2, + merge_size: int = 2, + do_convert_rgb: bool | None = None, + data_format: ChannelDimension | None = ChannelDimension.FIRST, + input_data_format: str | ChannelDimension | None = None, + ): + """ + Preprocess an image or batch of images. Copy of the `preprocess` method from `CLIPImageProcessor`. + + Args: + images (`ImageInput`): + Image or batch of images to preprocess. Expects pixel values ranging from 0 to 255. If pixel values range from 0 to 1, set `do_rescale=False`. + do_resize (`bool`, *optional*, defaults to `self.do_resize`): + Whether to resize the image. + size (`dict[str, int]`, *optional*, defaults to `self.size`): + Size of the image after resizing. `shortest_edge` and `longest_edge` keys must be present. + resample (`PILImageResampling`, *optional*, defaults to `self.resample`): + Resampling filter to use if resizing the image. This can be one of the `PILImageResampling` enums. + do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): + Whether to rescale the image. + rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): + Scale factor to use if rescaling the image. + do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): + Whether to normalize the image. + image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): + Mean to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image. + image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): + Standard deviation to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image. + patch_size (`int`, *optional*, defaults to `self.patch_size`): + The spatial patch size of the vision encoder. + temporal_patch_size (`int`, *optional*, defaults to `self.temporal_patch_size`): + The temporal patch size of the vision encoder. + merge_size (`int`, *optional*, defaults to `self.merge_size`): + The merge size of the vision encoder to llm encoder. + do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): + Whether to convert the image to RGB. + data_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`): + The channel dimension format for the output image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - Unset: Use the channel dimension format of the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + """ # noqa: E501 + images = make_list_of_images(images) + + if do_convert_rgb: + images = [convert_to_rgb(image) for image in images] + + width, height = images[0].width, images[0].height + resized_width, resized_height = width, height + processed_images = [] + for image in images: + if do_resize: + resized_width, resized_height = smart_resize( + width, + height, + factor=patch_size * merge_size, + min_pixels=self.min_pixels, + max_pixels=self.max_pixels, + ) + image = image.resize((resized_width, resized_height)) + + if do_normalize: + image = transforms.Compose( + [ + transforms.ToTensor(), + transforms.Normalize(self.image_mean, self.image_std), + ] + )(image) + processed_images.append(image) + + patches = np.array(processed_images) + channel = patches.shape[1] + grid_t = patches.shape[0] // temporal_patch_size + grid_h, grid_w = resized_height // patch_size, resized_width // patch_size + patches = patches.reshape( + 1, + channel, + grid_h // merge_size, + merge_size, + patch_size, + grid_w // merge_size, + merge_size, + patch_size, + ) + patches = patches.transpose(0, 2, 3, 5, 6, 1, 4, 7) + flatten_patches = patches.reshape( + 1 * grid_h * grid_w, channel * patch_size * patch_size + ) + + return flatten_patches, (grid_t, grid_h, grid_w) + + def preprocess( + self, + images: ImageInput, + videos: VideoInput = None, + do_resize: bool | None = None, + size: dict[str, int] | None = None, + min_pixels: int | None = None, + max_pixels: int | None = None, + resample: PILImageResampling = None, + do_rescale: bool | None = None, + rescale_factor: float | None = None, + do_normalize: bool | None = None, + image_mean: float | list[float] | None = None, + image_std: float | list[float] | None = None, + patch_size: int | None = None, + temporal_patch_size: int | None = None, + merge_size: int | None = None, + do_convert_rgb: bool | None = None, + return_tensors: str | TensorType | None = None, + data_format: ChannelDimension | None = ChannelDimension.FIRST, + input_data_format: str | ChannelDimension | None = None, + ): + """ + Args: + images (`ImageInput`): + Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If + passing in images with pixel values between 0 and 1, set `do_rescale=False`. + videos (`VideoInput`): + Video to preprocess. Expects a single or batch of videos with pixel values ranging from 0 to 255. If + passing in videos with pixel values between 0 and 1, set `do_rescale=False`. + do_resize (`bool`, *optional*, defaults to `self.do_resize`): + Whether to resize the image. + size (`dict[str, int]`, *optional*, defaults to `self.size`): + Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with + the longest edge resized to keep the input aspect ratio. + resample (`int`, *optional*, defaults to `self.resample`): + Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only + has an effect if `do_resize` is set to `True`. + do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): + Whether to rescale the image. + rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): + Rescale factor to rescale the image by if `do_rescale` is set to `True`. + do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): + Whether to normalize the image. + image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): + Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. + image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): + Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to + `True`. + min_pixels (`int`, *optional*, defaults to `self.min_pixels`): + The min pixels of the image to resize the image. + max_pixels (`int`, *optional*, defaults to `self.max_pixels`): + The max pixels of the image to resize the image. + patch_size (`int`, *optional*, defaults to `self.patch_size`): + The spatial patch size of the vision encoder. + temporal_patch_size (`int`, *optional*, defaults to `self.temporal_patch_size`): + The temporal patch size of the vision encoder. + merge_size (`int`, *optional*, defaults to `self.merge_size`): + The merge size of the vision encoder to llm encoder. + do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): + Whether to convert the image to RGB. + return_tensors (`str` or `TensorType`, *optional*): + The type of tensors to return. Can be one of: + - Unset: Return a list of `np.ndarray`. + - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. + - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. + - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. + - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. + data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): + The channel dimension format for the output image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - Unset: Use the channel dimension format of the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + + """ # noqa: E501 + min_pixels = min_pixels if min_pixels is not None else self.min_pixels + max_pixels = max_pixels if max_pixels is not None else self.max_pixels + + if size is not None: + if "shortest_edge" not in size or "longest_edge" not in size: + raise ValueError( + "size must contain 'shortest_edge' and 'longest_edge' keys." + ) + min_pixels = size["shortest_edge"] + elif min_pixels is not None and max_pixels is not None: + # backward compatibility: override size with min_pixels and max_pixels + # if they are provided. + size = {"shortest_edge": min_pixels, "longest_edge": max_pixels} + else: + size = {**self.size} + + do_resize = do_resize if do_resize is not None else self.do_resize + + resample = resample if resample is not None else self.resample + do_rescale = do_rescale if do_rescale is not None else self.do_rescale + rescale_factor = ( + rescale_factor if rescale_factor is not None else self.rescale_factor + ) + do_normalize = do_normalize if do_normalize is not None else self.do_normalize + image_mean = image_mean if image_mean is not None else self.image_mean + image_std = image_std if image_std is not None else self.image_std + patch_size = patch_size if patch_size is not None else self.patch_size + temporal_patch_size = ( + temporal_patch_size + if temporal_patch_size is not None + else self.temporal_patch_size + ) + merge_size = merge_size if merge_size is not None else self.merge_size + do_convert_rgb = ( + do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb + ) + + if images is not None: + images = make_flat_list_of_images(images) + + if images is not None and not valid_images(images): + raise ValueError( + "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " + "torch.Tensor, tf.Tensor or jax.ndarray." + ) + + validate_preprocess_arguments( + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + do_resize=do_resize, + size=size, + resample=resample, + ) + + data = {} + if images is not None: + pixel_values, vision_grid_thws = [], [] + for image in images: + patches, image_grid_thw = self._preprocess( + image, + do_resize=do_resize, + size=size, + resample=resample, + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + patch_size=patch_size, + temporal_patch_size=temporal_patch_size, + merge_size=merge_size, + data_format=data_format, + do_convert_rgb=do_convert_rgb, + input_data_format=input_data_format, + ) + pixel_values.extend(patches) + vision_grid_thws.append(image_grid_thw) + pixel_values = np.array(pixel_values) + vision_grid_thws = np.array(vision_grid_thws) + data.update( + {"pixel_values": pixel_values, "image_grid_thw": vision_grid_thws} + ) + + # kept for BC only and should be removed after v5.0 + if videos is not None: + logger.warning( + "`HunYuanVLV1ImageProcessor` works only with image inputs " + "and doesn't process videos anymore. " + "This is a deprecated behavior and will be removed in v5.0. " + "Your videos should be forwarded to `HunYuanVLV1VideoProcessor`. " + ) + videos = make_batched_videos(videos) + pixel_values_videos, vision_grid_thws_videos = [], [] + for images in videos: + patches, video_grid_thw = self._preprocess( + images, + do_resize=do_resize, + size=size, + resample=resample, + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + patch_size=patch_size, + temporal_patch_size=temporal_patch_size, + merge_size=merge_size, + data_format=data_format, + do_convert_rgb=do_convert_rgb, + input_data_format=input_data_format, + ) + pixel_values_videos.extend(patches) + vision_grid_thws_videos.append(video_grid_thw) + data.update( + { + "pixel_values_videos": np.array(pixel_values_videos), + "video_grid_thw": np.array(vision_grid_thws_videos), + } + ) + + return BatchFeature(data=data, tensor_type=return_tensors) + + def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None): + """ + A utility that returns number of image patches for a given image size. + + Args: + height (`int`): + Height of the input image. + width (`int`): + Width of the input image. + images_kwargs (`dict`, *optional*): + Any kwargs to override defaults of the image processor. + Returns: + `int`: Number of image patches per image. + """ + min_pixels = ( + images_kwargs["min_pixels"] + if "min_pixels" in images_kwargs + else self.size["shortest_edge"] + ) + max_pixels = ( + images_kwargs["max_pixels"] + if "max_pixels" in images_kwargs + else self.size["longest_edge"] + ) + patch_size = images_kwargs.get("patch_size", self.patch_size) + merge_size = images_kwargs.get("merge_size", self.merge_size) + + factor = patch_size * merge_size + resized_height, resized_width = smart_resize( + height, width, factor, min_pixels=min_pixels, max_pixels=max_pixels + ) + grid_h, grid_w = resized_height // patch_size, resized_width // patch_size + return grid_h * (grid_w + 1) + 2 + + +AutoImageProcessor.register("HunYuanVLImageProcessor", HunYuanVLImageProcessor) diff --git a/vllm/v1/worker/gpu_input_batch.py b/vllm/v1/worker/gpu_input_batch.py index 4a2818ab1bfd8..e7991baeaa1b8 100644 --- a/vllm/v1/worker/gpu_input_batch.py +++ b/vllm/v1/worker/gpu_input_batch.py @@ -43,6 +43,8 @@ class CachedRequestState: mrope_positions: torch.Tensor | None = None mrope_position_delta: int | None = None + xdrope_positions: torch.Tensor | None = None + lora_request: LoRARequest | None = None prompt_embeds: torch.Tensor | None = None diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 6a83ac14e0b3f..6413be66b141c 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -50,16 +50,21 @@ from vllm.distributed.parallel_state import ( from vllm.forward_context import BatchDescriptor, set_forward_context from vllm.logger import init_logger from vllm.model_executor.layers.attention_layer_base import AttentionLayerBase -from vllm.model_executor.layers.rotary_embedding import MRotaryEmbedding +from vllm.model_executor.layers.rotary_embedding import ( + MRotaryEmbedding, + XDRotaryEmbedding, +) from vllm.model_executor.model_loader import TensorizerLoader, get_model_loader from vllm.model_executor.models.interfaces import ( SupportsMRoPE, SupportsMultiModal, + SupportsXDRoPE, is_mixture_of_experts, supports_eagle3, supports_mrope, supports_multimodal_pruning, supports_transcription, + supports_xdrope, ) from vllm.model_executor.models.interfaces_base import ( VllmModelForPooling, @@ -324,6 +329,7 @@ class GPUModelRunner( # Multi-modal data support self.mm_registry = MULTIMODAL_REGISTRY self.uses_mrope = model_config.uses_mrope + self.uses_xdrope_dim = model_config.uses_xdrope_dim self.supports_mm_inputs = self.mm_registry.supports_multimodal_inputs( model_config ) @@ -512,6 +518,13 @@ class GPUModelRunner( (3, self.max_num_tokens + 1), dtype=torch.int64 ) + # Only relevant for models using XD-RoPE (e.g, HunYuan-VL) + if self.uses_xdrope_dim > 0: + # Similar to mrope but use assigned dimension number for RoPE, 4 as default. + self.xdrope_positions = self._make_buffer( + (self.uses_xdrope_dim, self.max_num_tokens + 1), dtype=torch.int64 + ) + # None in the first PP rank. The rest are set after load_model. self.intermediate_tensors: IntermediateTensors | None = None @@ -593,10 +606,14 @@ class GPUModelRunner( if isinstance(num_tokens, int): if self.uses_mrope: return self.mrope_positions.gpu[:, :num_tokens] + if self.uses_xdrope_dim > 0: + return self.xdrope_positions.gpu[:, :num_tokens] return self.positions.gpu[:num_tokens] else: if self.uses_mrope: return self.mrope_positions.gpu[:, num_tokens] + if self.uses_xdrope_dim > 0: + return self.xdrope_positions.gpu[:, num_tokens] return self.positions.gpu[num_tokens] def _make_buffer( @@ -772,6 +789,10 @@ class GPUModelRunner( if self.uses_mrope: self._init_mrope_positions(req_state) + # Only relevant for models using XD-RoPE (e.g, HunYuan-VL) + if self.uses_xdrope_dim > 0: + self._init_xdrope_positions(req_state) + reqs_to_add.append(req_state) # Update the states of the running/resumed requests. @@ -987,6 +1008,19 @@ class GPUModelRunner( ) ) + def _init_xdrope_positions(self, req_state: CachedRequestState): + model = self.get_model() + xdrope_model = cast(SupportsXDRoPE, model) + assert req_state.prompt_token_ids is not None, ( + "XD-RoPE requires prompt_token_ids to be available." + ) + assert supports_xdrope(model), "XD-RoPE support is not implemented." + + req_state.xdrope_positions = xdrope_model.get_xdrope_input_positions( + req_state.prompt_token_ids, + req_state.mm_features, + ) + def _extract_mm_kwargs( self, scheduler_output: "SchedulerOutput", @@ -1231,6 +1265,11 @@ class GPUModelRunner( if self.uses_mrope: self._calc_mrope_positions(scheduler_output) + # Calculate XD-RoPE positions. + # Only relevant for models using XD-RoPE (e.g, HunYuan-VL) + if self.uses_xdrope_dim > 0: + self._calc_xdrope_positions(scheduler_output) + # Get token indices. # E.g., [0, 1, 0, 1, 2, 3, 4, 0, 1, 2] # -> [0, 1, M, M + 1, M + 2, M + 3, M + 4, 2 * M, 2 * M + 1, 2 * M + 2] @@ -1364,6 +1403,12 @@ class GPUModelRunner( self.mrope_positions.cpu[:, :total_num_scheduled_tokens], non_blocking=True, ) + elif self.uses_xdrope_dim > 0: + # Only relevant for models using XD-RoPE (e.g, HunYuan-VL) + self.xdrope_positions.gpu[:, :total_num_scheduled_tokens].copy_( + self.xdrope_positions.cpu[:, :total_num_scheduled_tokens], + non_blocking=True, + ) else: # Common case (1D positions) self.positions.copy_to_gpu(total_num_scheduled_tokens) @@ -1793,6 +1838,53 @@ class GPUModelRunner( mrope_pos_ptr += completion_part_len + def _calc_xdrope_positions(self, scheduler_output: "SchedulerOutput"): + xdrope_pos_ptr = 0 + for index, req_id in enumerate(self.input_batch.req_ids): + req = self.requests[req_id] + assert req.xdrope_positions is not None + + num_computed_tokens = self.input_batch.num_computed_tokens_cpu[index] + num_scheduled_tokens = scheduler_output.num_scheduled_tokens[req_id] + num_prompt_tokens = length_from_prompt_token_ids_or_embeds( + req.prompt_token_ids, req.prompt_embeds + ) + + if num_computed_tokens + num_scheduled_tokens > num_prompt_tokens: + prompt_part_len = max(0, num_prompt_tokens - num_computed_tokens) + completion_part_len = max(0, num_scheduled_tokens - prompt_part_len) + else: + prompt_part_len = num_scheduled_tokens + completion_part_len = 0 + + assert num_scheduled_tokens == prompt_part_len + completion_part_len + + if prompt_part_len > 0: + # prompt's xdrope_positions are pre-computed + dst_start = xdrope_pos_ptr + dst_end = xdrope_pos_ptr + prompt_part_len + src_start = num_computed_tokens + src_end = num_computed_tokens + prompt_part_len + + self.xdrope_positions.cpu[:, dst_start:dst_end] = req.xdrope_positions[ + :, src_start:src_end + ] + xdrope_pos_ptr += prompt_part_len + + if completion_part_len > 0: + # compute completion's xdrope_positions on-the-fly + dst_start = xdrope_pos_ptr + dst_end = xdrope_pos_ptr + completion_part_len + + XDRotaryEmbedding.get_next_input_positions_tensor( + out=self.xdrope_positions.np, + out_offset=dst_start, + context_len=num_computed_tokens + prompt_part_len, + num_new_tokens=completion_part_len, + ) + + xdrope_pos_ptr += completion_part_len + def _calc_spec_decode_metadata( self, num_draft_tokens: np.ndarray, @@ -2037,6 +2129,7 @@ class GPUModelRunner( req_start_idx = 0 should_sync_mrope_positions = False + should_sync_xdrope_positions = False for req_id in self.input_batch.req_ids: mm_embeds_req: list[torch.Tensor] = [] @@ -2110,6 +2203,10 @@ class GPUModelRunner( self._calc_mrope_positions(scheduler_output) self.mrope_positions.copy_to_gpu(total_num_scheduled_tokens) + if should_sync_xdrope_positions: + self._calc_xdrope_positions(scheduler_output) + self.xdrope_positions.copy_to_gpu(total_num_scheduled_tokens) + return mm_embeds, is_mm_embed def get_model(self) -> nn.Module: @@ -2384,8 +2481,11 @@ class GPUModelRunner( input_ids = self.input_ids.gpu[:num_input_tokens] inputs_embeds = None model_kwargs = self._init_model_kwargs(num_input_tokens) + if self.uses_mrope: positions = self.mrope_positions.gpu[:, :num_input_tokens] + elif self.uses_xdrope_dim > 0: + positions = self.xdrope_positions.gpu[:, :num_input_tokens] else: positions = self.positions.gpu[:num_input_tokens] @@ -3824,6 +3924,8 @@ class GPUModelRunner( if self.uses_mrope: positions = self.mrope_positions.gpu[:, :num_tokens_after_padding] + elif self.uses_xdrope_dim > 0: + positions = self.xdrope_positions.gpu[:, :num_tokens_after_padding] else: positions = self.positions.gpu[:num_tokens_after_padding] From 81db702ed28d9a6edbd59fbd0ec039e107d36bc0 Mon Sep 17 00:00:00 2001 From: Jiangyun Zhu Date: Tue, 25 Nov 2025 12:25:20 +0800 Subject: [PATCH 27/43] [Attention] add `_cudagraph_support` for linear attention (#28934) Signed-off-by: zjy0516 --- vllm/v1/attention/backends/linear_attn.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/vllm/v1/attention/backends/linear_attn.py b/vllm/v1/attention/backends/linear_attn.py index 1900c50849eca..004baa2d09cde 100644 --- a/vllm/v1/attention/backends/linear_attn.py +++ b/vllm/v1/attention/backends/linear_attn.py @@ -7,6 +7,7 @@ import torch from vllm.attention.backends.abstract import AttentionBackend from vllm.config import VllmConfig from vllm.v1.attention.backends.utils import ( + AttentionCGSupport, AttentionMetadataBuilder, CommonAttentionMetadata, split_decodes_and_prefills, @@ -35,6 +36,8 @@ class LinearAttentionMetadata: class LinearAttentionMetadataBuilder(AttentionMetadataBuilder[LinearAttentionMetadata]): reorder_batch_threshold: int = 1 + _cudagraph_support = AttentionCGSupport.UNIFORM_SINGLE_TOKEN_DECODE + def __init__( self, kv_cache_spec: AttentionSpec, From 2d9ee28cab204b90aa304f60fd7083ea45204bd7 Mon Sep 17 00:00:00 2001 From: Lucas Wilkinson Date: Mon, 24 Nov 2025 23:55:57 -0500 Subject: [PATCH 28/43] [CI/Test Fix] Fix CP tests on Blackwell (#29338) Signed-off-by: Lucas Wilkinson --- vllm/attention/ops/common.py | 1 - 1 file changed, 1 deletion(-) diff --git a/vllm/attention/ops/common.py b/vllm/attention/ops/common.py index 67c5f7dbba9c0..af6766bdd1615 100644 --- a/vllm/attention/ops/common.py +++ b/vllm/attention/ops/common.py @@ -194,7 +194,6 @@ def _cp_lse_common( cp_attn_lse = cp_attn_lse.contiguous() lses = cp_group.all_gather(cp_attn_lse, dim=0).view_as(lses) out, lse = correct_attn_out(cp_attn_out, lses, cp_group.rank_in_group, ctx) - assert out.is_contiguous() return out, lse From 316c8492bf4d5fca8f9f8ea6f8ef1d76a0cb940f Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Tue, 25 Nov 2025 05:24:05 +0000 Subject: [PATCH 29/43] Scheduled removal of `guided_*` config fields (#29326) Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- docs/features/structured_outputs.md | 2 +- .../llm/test_struct_output_generate.py | 29 +-- vllm/engine/arg_utils.py | 33 --- vllm/entrypoints/openai/protocol.py | 203 ++++-------------- vllm/sampling_params.py | 38 ---- 5 files changed, 43 insertions(+), 262 deletions(-) diff --git a/docs/features/structured_outputs.md b/docs/features/structured_outputs.md index e38627c707884..7d52891bea7b9 100644 --- a/docs/features/structured_outputs.md +++ b/docs/features/structured_outputs.md @@ -7,7 +7,7 @@ This document shows you some examples of the different options that are available to generate structured outputs. !!! warning - If you are still using the following deprecated API fields, please update your code to use `structured_outputs` as demonstrated in the rest of this document: + If you are still using the following deprecated API fields which were removed in v0.12.0, please update your code to use `structured_outputs` as demonstrated in the rest of this document: - `guided_json` -> `{"structured_outputs": {"json": ...}}` or `StructuredOutputsParams(json=...)` - `guided_regex` -> `{"structured_outputs": {"regex": ...}}` or `StructuredOutputsParams(regex=...)` diff --git a/tests/v1/entrypoints/llm/test_struct_output_generate.py b/tests/v1/entrypoints/llm/test_struct_output_generate.py index d1b037b7956cf..85f108786c05a 100644 --- a/tests/v1/entrypoints/llm/test_struct_output_generate.py +++ b/tests/v1/entrypoints/llm/test_struct_output_generate.py @@ -3,7 +3,6 @@ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import json -from dataclasses import fields from enum import Enum from typing import TYPE_CHECKING, Any @@ -21,7 +20,6 @@ from vllm.outputs import RequestOutput from vllm.platforms import current_platform from vllm.reasoning.abs_reasoning_parsers import ReasoningParserManager from vllm.sampling_params import ( - GuidedDecodingParams, SamplingParams, StructuredOutputsParams, ) @@ -108,23 +106,6 @@ class CarDescription(BaseModel): car_type: CarType -def test_guided_decoding_deprecated(): - with pytest.warns(DeprecationWarning, match="GuidedDecodingParams is deprecated.*"): - guided_decoding = GuidedDecodingParams(json_object=True) - - structured_outputs = StructuredOutputsParams(json_object=True) - assert fields(guided_decoding) == fields(structured_outputs) - - with pytest.warns(DeprecationWarning, match="guided_decoding is deprecated.*"): - sp1 = SamplingParams(guided_decoding=guided_decoding) - - with pytest.warns(DeprecationWarning, match="guided_decoding is deprecated.*"): - sp2 = SamplingParams.from_optional(guided_decoding=guided_decoding) - - assert sp1 == sp2 - assert sp1.structured_outputs == guided_decoding - - @pytest.mark.parametrize( "model_name, backend, tokenizer_mode, speculative_config", PARAMS_MODELS_BACKENDS_TOKENIZER_MODE, @@ -899,13 +880,11 @@ def test_structured_output_batched_with_non_structured_outputs_requests( output_json = json.loads(generated_text) -@pytest.mark.parametrize("guided_decoding_backend", ["xgrammar"]) -def test_structured_output_with_structural_tag( - guided_decoding_backend: str, -): +@pytest.mark.parametrize("backend", ["xgrammar"]) +def test_structured_output_with_structural_tag(backend: str): llm = LLM( model="Qwen/Qwen2.5-1.5B-Instruct", - guided_decoding_backend=guided_decoding_backend, + structured_outputs_config=StructuredOutputsConfig(backend=backend), ) structural_tag_config = { @@ -923,7 +902,7 @@ def test_structured_output_with_structural_tag( sampling_params = SamplingParams( temperature=0.0, max_tokens=500, - guided_decoding=StructuredOutputsParams( + structured_outputs=StructuredOutputsParams( structural_tag=json.dumps(structural_tag_config) ), ) diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index a7c6b11ccd5a8..3cb76fc63f69c 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -502,11 +502,6 @@ class EngineArgs: ) reasoning_parser: str = StructuredOutputsConfig.reasoning_parser reasoning_parser_plugin: str | None = None - # Deprecated guided decoding fields - guided_decoding_backend: str | None = None - guided_decoding_disable_fallback: bool | None = None - guided_decoding_disable_any_whitespace: bool | None = None - guided_decoding_disable_additional_properties: bool | None = None logits_processor_pattern: str | None = ModelConfig.logits_processor_pattern @@ -725,19 +720,6 @@ class EngineArgs: "--reasoning-parser-plugin", **structured_outputs_kwargs["reasoning_parser_plugin"], ) - # Deprecated guided decoding arguments - for arg, type in [ - ("--guided-decoding-backend", str), - ("--guided-decoding-disable-fallback", bool), - ("--guided-decoding-disable-any-whitespace", bool), - ("--guided-decoding-disable-additional-properties", bool), - ]: - structured_outputs_group.add_argument( - arg, - type=type, - help=(f"[DEPRECATED] {arg} will be removed in v0.12.0."), - deprecated=True, - ) # Parallel arguments parallel_kwargs = get_kwargs(ParallelConfig) @@ -1712,21 +1694,6 @@ class EngineArgs: self.reasoning_parser_plugin ) - # Forward the deprecated CLI args to the StructuredOutputsConfig - so_config = self.structured_outputs_config - if self.guided_decoding_backend is not None: - so_config.guided_decoding_backend = self.guided_decoding_backend - if self.guided_decoding_disable_fallback is not None: - so_config.disable_fallback = self.guided_decoding_disable_fallback - if self.guided_decoding_disable_any_whitespace is not None: - so_config.disable_any_whitespace = ( - self.guided_decoding_disable_any_whitespace - ) - if self.guided_decoding_disable_additional_properties is not None: - so_config.disable_additional_properties = ( - self.guided_decoding_disable_additional_properties - ) - observability_config = ObservabilityConfig( show_hidden_metrics_for_version=self.show_hidden_metrics_for_version, otlp_traces_endpoint=self.otlp_traces_endpoint, diff --git a/vllm/entrypoints/openai/protocol.py b/vllm/entrypoints/openai/protocol.py index b352c3ad01db0..5a0a05f9af323 100644 --- a/vllm/entrypoints/openai/protocol.py +++ b/vllm/entrypoints/openai/protocol.py @@ -652,62 +652,6 @@ class ChatCompletionRequest(OpenAIBaseModel): default=None, description="Additional kwargs for structured outputs", ) - guided_json: str | dict | BaseModel | None = Field( - default=None, - description=( - "`guided_json` is deprecated. " - "This will be removed in v0.12.0 or v1.0.0, whichever is soonest. " - "Please pass `json` to `structured_outputs` instead." - ), - ) - guided_regex: str | None = Field( - default=None, - description=( - "`guided_regex` is deprecated. " - "This will be removed in v0.12.0 or v1.0.0, whichever is soonest. " - "Please pass `regex` to `structured_outputs` instead." - ), - ) - guided_choice: list[str] | None = Field( - default=None, - description=( - "`guided_choice` is deprecated. " - "This will be removed in v0.12.0 or v1.0.0, whichever is soonest. " - "Please pass `choice` to `structured_outputs` instead." - ), - ) - guided_grammar: str | None = Field( - default=None, - description=( - "`guided_grammar` is deprecated. " - "This will be removed in v0.12.0 or v1.0.0, whichever is soonest. " - "Please pass `grammar` to `structured_outputs` instead." - ), - ) - structural_tag: str | None = Field( - default=None, - description=( - "`structural_tag` is deprecated. " - "This will be removed in v0.12.0 or v1.0.0, whichever is soonest. " - "Please pass `structural_tag` to `structured_outputs` instead." - ), - ) - guided_decoding_backend: str | None = Field( - default=None, - description=( - "`guided_decoding_backend` is deprecated. " - "This will be removed in v0.12.0 or v1.0.0, whichever is soonest. " - "Please remove it from your request." - ), - ) - guided_whitespace_pattern: str | None = Field( - default=None, - description=( - "`guided_whitespace_pattern` is deprecated. " - "This will be removed in v0.12.0 or v1.0.0, whichever is soonest. " - "Please pass `whitespace_pattern` to `structured_outputs` instead." - ), - ) priority: int = Field( default=0, description=( @@ -841,20 +785,6 @@ class ChatCompletionRequest(OpenAIBaseModel): if prompt_logprobs is None and self.echo: prompt_logprobs = self.top_logprobs - # Forward deprecated guided_* parameters to structured_outputs - if self.structured_outputs is None: - kwargs = dict[str, Any]( - json=self.guided_json, - regex=self.guided_regex, - choice=self.guided_choice, - grammar=self.guided_grammar, - whitespace_pattern=self.guided_whitespace_pattern, - structural_tag=self.structural_tag, - ) - kwargs = {k: v for k, v in kwargs.items() if v is not None} - if len(kwargs) > 0: - self.structured_outputs = StructuredOutputsParams(**kwargs) - response_format = self.response_format if response_format is not None: # If structured outputs wasn't already enabled, @@ -863,24 +793,23 @@ class ChatCompletionRequest(OpenAIBaseModel): self.structured_outputs = StructuredOutputsParams() # Set structured output params for response format - if response_format is not None: - if response_format.type == "json_object": - self.structured_outputs.json_object = True - elif response_format.type == "json_schema": - json_schema = response_format.json_schema - assert json_schema is not None - self.structured_outputs.json = json_schema.json_schema - elif response_format.type == "structural_tag": - structural_tag = response_format - assert structural_tag is not None and isinstance( - structural_tag, - ( - LegacyStructuralTagResponseFormat, - StructuralTagResponseFormat, - ), - ) - s_tag_obj = structural_tag.model_dump(by_alias=True) - self.structured_outputs.structural_tag = json.dumps(s_tag_obj) + if response_format.type == "json_object": + self.structured_outputs.json_object = True + elif response_format.type == "json_schema": + json_schema = response_format.json_schema + assert json_schema is not None + self.structured_outputs.json = json_schema.json_schema + elif response_format.type == "structural_tag": + structural_tag = response_format + assert structural_tag is not None and isinstance( + structural_tag, + ( + LegacyStructuralTagResponseFormat, + StructuralTagResponseFormat, + ), + ) + s_tag_obj = structural_tag.model_dump(by_alias=True) + self.structured_outputs.structural_tag = json.dumps(s_tag_obj) extra_args: dict[str, Any] = self.vllm_xargs if self.vllm_xargs else {} if self.kv_transfer_params: @@ -1140,58 +1069,6 @@ class CompletionRequest(OpenAIBaseModel): default=None, description="Additional kwargs for structured outputs", ) - guided_json: str | dict | BaseModel | None = Field( - default=None, - description=( - "`guided_json` is deprecated. " - "This will be removed in v0.12.0 or v1.0.0, whichever is soonest. " - "Please pass `json` to `structured_outputs` instead." - ), - ) - guided_regex: str | None = Field( - default=None, - description=( - "`guided_regex` is deprecated. " - "This will be removed in v0.12.0 or v1.0.0, whichever is soonest. " - "Please pass `regex` to `structured_outputs` instead." - ), - ) - guided_choice: list[str] | None = Field( - default=None, - description=( - "`guided_choice` is deprecated. " - "This will be removed in v0.12.0 or v1.0.0, whichever is soonest. " - "Please pass `choice` to `structured_outputs` instead." - ), - ) - guided_grammar: str | None = Field( - default=None, - description=( - "`guided_grammar` is deprecated. " - "This will be removed in v0.12.0 or v1.0.0, whichever is soonest. " - "Please pass `grammar` to `structured_outputs` instead." - ), - ) - structural_tag: str | None = Field( - default=None, - description=("If specified, the output will follow the structural tag schema."), - ) - guided_decoding_backend: str | None = Field( - default=None, - description=( - "`guided_decoding_backend` is deprecated. " - "This will be removed in v0.12.0 or v1.0.0, whichever is soonest. " - "Please remove it from your request." - ), - ) - guided_whitespace_pattern: str | None = Field( - default=None, - description=( - "`guided_whitespace_pattern` is deprecated. " - "This will be removed in v0.12.0 or v1.0.0, whichever is soonest. " - "Please pass `whitespace_pattern` to `structured_outputs` instead." - ), - ) priority: int = Field( default=0, description=( @@ -1336,35 +1213,31 @@ class CompletionRequest(OpenAIBaseModel): echo_without_generation = self.echo and self.max_tokens == 0 - guided_json_object = None - if self.response_format is not None: - if self.response_format.type == "json_object": - guided_json_object = True - elif self.response_format.type == "json_schema": - json_schema = self.response_format.json_schema + response_format = self.response_format + if response_format is not None: + # If structured outputs wasn't already enabled, + # we must enable it for these features to work + if self.structured_outputs is None: + self.structured_outputs = StructuredOutputsParams() + + # Set structured output params for response format + if response_format.type == "json_object": + self.structured_outputs.json_object = True + elif response_format.type == "json_schema": + json_schema = response_format.json_schema assert json_schema is not None - self.guided_json = json_schema.json_schema - elif self.response_format.type == "structural_tag": - structural_tag = self.response_format + self.structured_outputs.json = json_schema.json_schema + elif response_format.type == "structural_tag": + structural_tag = response_format assert structural_tag is not None and isinstance( - structural_tag, StructuralTagResponseFormat + structural_tag, + ( + LegacyStructuralTagResponseFormat, + StructuralTagResponseFormat, + ), ) s_tag_obj = structural_tag.model_dump(by_alias=True) - self.structural_tag = json.dumps(s_tag_obj) - - # Forward deprecated guided_* parameters to structured_outputs - if self.structured_outputs is None: - kwargs = dict[str, Any]( - json=self.guided_json, - json_object=guided_json_object, - regex=self.guided_regex, - choice=self.guided_choice, - grammar=self.guided_grammar, - whitespace_pattern=self.guided_whitespace_pattern, - ) - kwargs = {k: v for k, v in kwargs.items() if v is not None} - if len(kwargs) > 0: - self.structured_outputs = StructuredOutputsParams(**kwargs) + self.structured_outputs.structural_tag = json.dumps(s_tag_obj) extra_args: dict[str, Any] = self.vllm_xargs if self.vllm_xargs else {} if self.kv_transfer_params: diff --git a/vllm/sampling_params.py b/vllm/sampling_params.py index fbbe3d4cabb9a..142853ff0ff0e 100644 --- a/vllm/sampling_params.py +++ b/vllm/sampling_params.py @@ -3,7 +3,6 @@ """Sampling parameters for text generation.""" import copy -import warnings from dataclasses import field from enum import Enum, IntEnum from functools import cached_property @@ -100,19 +99,6 @@ class StructuredOutputsParams: ) -@dataclass -class GuidedDecodingParams(StructuredOutputsParams): - def __post_init__(self): - warnings.warn( - "GuidedDecodingParams is deprecated. This will be removed in " - "v0.12.0 or v1.0.0, which ever is soonest. Please use " - "StructuredOutputsParams instead.", - DeprecationWarning, - stacklevel=2, - ) - return super().__post_init__() - - class RequestOutputKind(Enum): # Return entire output so far in every RequestOutput CUMULATIVE = 0 @@ -234,8 +220,6 @@ class SamplingParams( # Fields used to construct logits processors structured_outputs: StructuredOutputsParams | None = None """Parameters for configuring structured outputs.""" - guided_decoding: GuidedDecodingParams | None = None - """Deprecated alias for structured_outputs.""" logit_bias: dict[int, float] | None = None """If provided, the engine will construct a logits processor that applies these logit biases.""" @@ -283,7 +267,6 @@ class SamplingParams( truncate_prompt_tokens: Annotated[int, msgspec.Meta(ge=-1)] | None = None, output_kind: RequestOutputKind = RequestOutputKind.CUMULATIVE, structured_outputs: StructuredOutputsParams | None = None, - guided_decoding: GuidedDecodingParams | None = None, logit_bias: dict[int, float] | dict[str, float] | None = None, allowed_token_ids: list[int] | None = None, extra_args: dict[str, Any] | None = None, @@ -295,16 +278,6 @@ class SamplingParams( int(token): min(100.0, max(-100.0, bias)) for token, bias in logit_bias.items() } - if guided_decoding is not None: - warnings.warn( - "guided_decoding is deprecated. This will be removed in " - "v0.12.0 or v1.0.0, which ever is soonest. Please use " - "structured_outputs instead.", - DeprecationWarning, - stacklevel=2, - ) - structured_outputs = guided_decoding - guided_decoding = None return SamplingParams( n=1 if n is None else n, @@ -387,17 +360,6 @@ class SamplingParams( # eos_token_id is added to this by the engine self._all_stop_token_ids.update(self.stop_token_ids) - if self.guided_decoding is not None: - warnings.warn( - "guided_decoding is deprecated. This will be removed in " - "v0.12.0 or v1.0.0, which ever is soonest. Please use " - "structured_outputs instead.", - DeprecationWarning, - stacklevel=2, - ) - self.structured_outputs = self.guided_decoding - self.guided_decoding = None - if self.skip_reading_prefix_cache is None: # If prefix caching is enabled, # the output of prompt logprobs may less than n_prompt_tokens, From a21256c46327ec366b7804d22ba66ed04c2ae18b Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Tue, 25 Nov 2025 14:03:20 +0800 Subject: [PATCH 30/43] Add TP CLI argument to multimodal inference examples (#29301) Signed-off-by: Lin, Fanli --- examples/offline_inference/audio_language.py | 15 +++++++ examples/offline_inference/vision_language.py | 15 +++++++ .../vision_language_multi_image.py | 40 ++++++++++++++++--- 3 files changed, 65 insertions(+), 5 deletions(-) mode change 100644 => 100755 examples/offline_inference/audio_language.py mode change 100644 => 100755 examples/offline_inference/vision_language.py mode change 100644 => 100755 examples/offline_inference/vision_language_multi_image.py diff --git a/examples/offline_inference/audio_language.py b/examples/offline_inference/audio_language.py old mode 100644 new mode 100755 index 04e6f99f8957e..df6e96ca375fc --- a/examples/offline_inference/audio_language.py +++ b/examples/offline_inference/audio_language.py @@ -425,6 +425,13 @@ def parse_args(): default=None, help="Set the seed when initializing `vllm.LLM`.", ) + parser.add_argument( + "--tensor-parallel-size", + "-tp", + type=int, + default=None, + help="Tensor parallel size to override the model's default setting. ", + ) return parser.parse_args() @@ -434,6 +441,12 @@ def main(args): if model not in model_example_map: raise ValueError(f"Model type {model} is not supported.") + if args.tensor_parallel_size is not None and args.tensor_parallel_size < 1: + raise ValueError( + f"tensor_parallel_size must be a positive integer, " + f"got {args.tensor_parallel_size}" + ) + audio_count = args.num_audios req_data = model_example_map[model]( question_per_audio_count[audio_count], audio_count @@ -446,6 +459,8 @@ def main(args): ) engine_args = asdict(req_data.engine_args) | {"seed": args.seed} + if args.tensor_parallel_size is not None: + engine_args["tensor_parallel_size"] = args.tensor_parallel_size llm = LLM(**engine_args) # We set temperature to 0.2 so that outputs can be different diff --git a/examples/offline_inference/vision_language.py b/examples/offline_inference/vision_language.py old mode 100644 new mode 100755 index 65ea4df4a3099..8f72bf6f0b0d1 --- a/examples/offline_inference/vision_language.py +++ b/examples/offline_inference/vision_language.py @@ -2064,6 +2064,13 @@ def parse_args(): help="If True, will send all requests in a second batch with empty mm " "data to verify cache hits with UUIDs.", ) + parser.add_argument( + "--tensor-parallel-size", + "-tp", + type=int, + default=None, + help="Tensor parallel size to override the model's default setting. ", + ) return parser.parse_args() @@ -2072,6 +2079,12 @@ def main(args): if model not in model_example_map: raise ValueError(f"Model type {model} is not supported.") + if args.tensor_parallel_size is not None and args.tensor_parallel_size < 1: + raise ValueError( + f"tensor_parallel_size must be a positive integer, " + f"got {args.tensor_parallel_size}" + ) + modality = args.modality mm_input = get_multi_modal_input(args) data = mm_input["data"] @@ -2089,6 +2102,8 @@ def main(args): "seed": args.seed, "mm_processor_cache_gb": 0 if args.disable_mm_processor_cache else 4, } + if args.tensor_parallel_size is not None: + engine_args["tensor_parallel_size"] = args.tensor_parallel_size llm = LLM(**engine_args) # Don't want to check the flag multiple times, so just hijack `prompts`. diff --git a/examples/offline_inference/vision_language_multi_image.py b/examples/offline_inference/vision_language_multi_image.py old mode 100644 new mode 100755 index 301265d4e17f7..7ba4e64b567de --- a/examples/offline_inference/vision_language_multi_image.py +++ b/examples/offline_inference/vision_language_multi_image.py @@ -1352,10 +1352,18 @@ model_example_map = { } -def run_generate(model, question: str, image_urls: list[str], seed: int | None): +def run_generate( + model, + question: str, + image_urls: list[str], + seed: int | None, + tensor_parallel_size: int | None, +): req_data = model_example_map[model](question, image_urls) - engine_args = asdict(req_data.engine_args) | {"seed": args.seed} + engine_args = asdict(req_data.engine_args) | {"seed": seed} + if tensor_parallel_size is not None: + engine_args["tensor_parallel_size"] = tensor_parallel_size llm = LLM(**engine_args) sampling_params = SamplingParams( @@ -1378,7 +1386,13 @@ def run_generate(model, question: str, image_urls: list[str], seed: int | None): print("-" * 50) -def run_chat(model: str, question: str, image_urls: list[str], seed: int | None): +def run_chat( + model: str, + question: str, + image_urls: list[str], + seed: int | None, + tensor_parallel_size: int | None, +): req_data = model_example_map[model](question, image_urls) # Disable other modalities to save memory @@ -1388,6 +1402,8 @@ def run_chat(model: str, question: str, image_urls: list[str], seed: int | None) ) engine_args = asdict(req_data.engine_args) | {"seed": seed} + if tensor_parallel_size is not None: + engine_args["tensor_parallel_size"] = tensor_parallel_size llm = LLM(**engine_args) sampling_params = ( @@ -1463,6 +1479,13 @@ def parse_args(): default=2, help="Number of images to use for the demo.", ) + parser.add_argument( + "--tensor-parallel-size", + "-tp", + type=int, + default=None, + help="Tensor parallel size to override the model's default setting. ", + ) return parser.parse_args() @@ -1470,13 +1493,20 @@ def main(args: Namespace): model = args.model_type method = args.method seed = args.seed + tensor_parallel_size = args.tensor_parallel_size + + if tensor_parallel_size is not None and tensor_parallel_size < 1: + raise ValueError( + f"tensor_parallel_size must be a positive integer, " + f"got {tensor_parallel_size}" + ) image_urls = IMAGE_URLS[: args.num_images] if method == "generate": - run_generate(model, QUESTION, image_urls, seed) + run_generate(model, QUESTION, image_urls, seed, tensor_parallel_size) elif method == "chat": - run_chat(model, QUESTION, image_urls, seed) + run_chat(model, QUESTION, image_urls, seed, tensor_parallel_size) else: raise ValueError(f"Invalid method: {method}") From ce58fdc1c366b0257c2b2d8310b14d4ea8f8dd30 Mon Sep 17 00:00:00 2001 From: kflu Date: Mon, 24 Nov 2025 22:39:29 -0800 Subject: [PATCH 31/43] Fix PoolingParams.skip_reading_prefix_cache type (#29364) Signed-off-by: KFL --- vllm/pooling_params.py | 2 +- vllm/sampling_params.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/pooling_params.py b/vllm/pooling_params.py index 5c3dfa8ac9cbc..d1aab98c274e1 100644 --- a/vllm/pooling_params.py +++ b/vllm/pooling_params.py @@ -57,7 +57,7 @@ class PoolingParams( ## Internal use only task: PoolingTask | None = None requires_token_ids: bool = False - skip_reading_prefix_cache: bool = None + skip_reading_prefix_cache: bool | None = None extra_kwargs: dict[str, Any] | None = None output_kind: RequestOutputKind = RequestOutputKind.FINAL_ONLY diff --git a/vllm/sampling_params.py b/vllm/sampling_params.py index 142853ff0ff0e..8de961e62db1b 100644 --- a/vllm/sampling_params.py +++ b/vllm/sampling_params.py @@ -238,7 +238,7 @@ class SamplingParams( generated token can complete the sequence.""" _bad_words_token_ids: list[list[int]] | None = None - skip_reading_prefix_cache: bool = None + skip_reading_prefix_cache: bool | None = None @staticmethod def from_optional( From 40a6f53f6c09cd15b07436acc8d631a3a86f7416 Mon Sep 17 00:00:00 2001 From: Inoki Date: Tue, 25 Nov 2025 07:40:06 +0100 Subject: [PATCH 32/43] Display warning only when ROCm version is less than Pytorch required version (#29200) Signed-off-by: Inoki --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index a4cf51d17e982..86746a0db4c0e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -136,7 +136,7 @@ elseif(HIP_FOUND) # ROCm 5.X and 6.X if (ROCM_VERSION_DEV_MAJOR GREATER_EQUAL 5 AND - NOT Torch_VERSION VERSION_EQUAL ${TORCH_SUPPORTED_VERSION_ROCM}) + Torch_VERSION VERSION_LESS ${TORCH_SUPPORTED_VERSION_ROCM}) message(WARNING "Pytorch version >= ${TORCH_SUPPORTED_VERSION_ROCM} " "expected for ROCm build, saw ${Torch_VERSION} instead.") endif() From 7992324f23478bebf5e39542a4ce198cd7a1ab2a Mon Sep 17 00:00:00 2001 From: Nick Hill Date: Mon, 24 Nov 2025 22:55:16 -0800 Subject: [PATCH 33/43] [BugFix] Use unique ids for different transcription prompts (#29372) Signed-off-by: Nick Hill --- vllm/entrypoints/openai/speech_to_text.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/entrypoints/openai/speech_to_text.py b/vllm/entrypoints/openai/speech_to_text.py index b9b9b1ab30ad8..3dece07748cc4 100644 --- a/vllm/entrypoints/openai/speech_to_text.py +++ b/vllm/entrypoints/openai/speech_to_text.py @@ -201,10 +201,10 @@ class OpenAISpeechToText(OpenAIServing): self.engine_client.generate( prompt, sampling_params, - request_id, + f"{request_id}_{i}", lora_request=lora_request, ) - for prompt in prompts + for i, prompt in enumerate(prompts) ] except ValueError as e: # TODO: Use a vllm-specific Validation Error From 64deead719cc181a1930982b0a5f4d280c284156 Mon Sep 17 00:00:00 2001 From: vllmellm Date: Tue, 25 Nov 2025 14:56:06 +0800 Subject: [PATCH 34/43] [Bugfix] [ROCm] [UX]: revert Flex attention backend (#29371) Signed-off-by: vllmellm --- .../v1/attention/test_rocm_attention_backends_selection.py | 6 ++++++ vllm/platforms/rocm.py | 4 ++++ 2 files changed, 10 insertions(+) diff --git a/tests/v1/attention/test_rocm_attention_backends_selection.py b/tests/v1/attention/test_rocm_attention_backends_selection.py index 4ec79e9eb6ba4..80158d4b7278c 100644 --- a/tests/v1/attention/test_rocm_attention_backends_selection.py +++ b/tests/v1/attention/test_rocm_attention_backends_selection.py @@ -36,6 +36,12 @@ def mock_on_gfx9(): @pytest.mark.parametrize( "env_vars, selected_backend, expected_backend_path", [ + # Test Case: Explicit FLEX_ATTENTION backend + ( + {}, + "FLEX_ATTENTION", + AttentionBackendEnum.FLEX_ATTENTION.get_path(), + ), # Test Case 1: Default (no env vars, no explicit backend) ( {}, diff --git a/vllm/platforms/rocm.py b/vllm/platforms/rocm.py index f3ec965bd0881..b0434b9642f07 100644 --- a/vllm/platforms/rocm.py +++ b/vllm/platforms/rocm.py @@ -262,6 +262,10 @@ class RocmPlatform(Platform): f"is not MLA type while requested for MLA backend." ) + if selected_backend == AttentionBackendEnum.FLEX_ATTENTION: + logger.info("Using FlexAttention backend.") + return AttentionBackendEnum.FLEX_ATTENTION.get_path() + if selected_backend == AttentionBackendEnum.TRITON_ATTN: logger.info("Using Triton Attention backend on V1 engine.") return AttentionBackendEnum.TRITON_ATTN.get_path() From 98caeadd54599c8038fab5b19cc8ef5688b7b03a Mon Sep 17 00:00:00 2001 From: Fadi Arafeh <115173828+fadara01@users.noreply.github.com> Date: Tue, 25 Nov 2025 07:11:11 +0000 Subject: [PATCH 35/43] [fix][cpu] Use a SwigluOAI impl which supports interleaved gate-up wei (#29273) Signed-off-by: Fadi Arafeh --- .../layers/fused_moe/cpu_fused_moe.py | 29 +++++-------------- 1 file changed, 8 insertions(+), 21 deletions(-) diff --git a/vllm/model_executor/layers/fused_moe/cpu_fused_moe.py b/vllm/model_executor/layers/fused_moe/cpu_fused_moe.py index 572307052b489..659a2d4ee5b39 100644 --- a/vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +++ b/vllm/model_executor/layers/fused_moe/cpu_fused_moe.py @@ -6,22 +6,7 @@ import torch from torch.nn import functional as F from vllm import _custom_ops as ops - - -def silu_and_mul(x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - return F.silu(x[..., :d]) * x[..., d:] - - -def swigluoai_and_mul( - x: torch.Tensor, alpha: float = 1.702, limit: float = 7.0 -) -> torch.Tensor: - d = x.shape[-1] // 2 - gate, up = x[..., :d], x[..., d:] - gate = gate.clamp(max=limit) - up = up.clamp(min=-limit, max=limit) - glu = gate * torch.sigmoid(alpha * gate) - return (up + 1) * glu +from vllm.model_executor.layers.activation import SiluAndMul, SwigluOAIAndMul def grouped_topk( @@ -227,6 +212,11 @@ class CPUFusedMOE: layer.w13_weight = torch.nn.Parameter(torch.empty(0), requires_grad=False) layer.w2_weight = torch.nn.Parameter(torch.empty(0), requires_grad=False) + self.act_to_impl = { + "silu": SiluAndMul(), + "swigluoai": SwigluOAIAndMul(), + } + def __call__( self, layer: torch.nn.Module, @@ -246,7 +236,7 @@ class CPUFusedMOE: apply_router_weight_on_input: bool = False, activation: str = "silu", ) -> torch.Tensor: - assert activation in {"silu", "swigluoai"}, f"{activation} is not supported." + assert activation in self.act_to_impl, f"{activation} is not supported." assert not apply_router_weight_on_input topk_weights, topk_ids = select_experts( hidden_states=x, @@ -283,10 +273,7 @@ class CPUFusedMOE: tokens_for_this_expert = sorted_tokens[start_idx:end_idx] gate_up = layer.gate_up_linear[i](tokens_for_this_expert) - if activation == "swigluoai": - gate_up = swigluoai_and_mul(gate_up) - else: - gate_up = silu_and_mul(gate_up) + gate_up = self.act_to_impl[activation].forward_native(gate_up) expert_out = layer.down_linear[i](gate_up) outputs.append(expert_out) start_idx = end_idx From fe3a4f5b347c64f1d5f2cb10990437a56f720660 Mon Sep 17 00:00:00 2001 From: Ryan Rock Date: Tue, 25 Nov 2025 01:14:59 -0600 Subject: [PATCH 36/43] [CI/Build] Pin torchgeo dependency for AMD (#29353) Signed-off-by: Ryan Rock --- requirements/rocm-test.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/requirements/rocm-test.txt b/requirements/rocm-test.txt index f9bddc23420b4..8a91b59de6f72 100644 --- a/requirements/rocm-test.txt +++ b/requirements/rocm-test.txt @@ -45,6 +45,7 @@ multiprocess==0.70.16 # Plugins test terratorch @ git+https://github.com/IBM/terratorch.git@07184fcf91a1324f831ff521dd238d97fe350e3e +torchgeo==0.7.0 # Required for suffix decoding test -arctic-inference == 0.1.1 \ No newline at end of file +arctic-inference == 0.1.1 From 888152bf87d62c9f5929d06f386068990b618db7 Mon Sep 17 00:00:00 2001 From: Icey <1790571317@qq.com> Date: Tue, 25 Nov 2025 15:25:15 +0800 Subject: [PATCH 37/43] Allow oot custom compiler extension via CompilerInterface (#28623) Signed-off-by: wxsIcey <1790571317@qq.com> Signed-off-by: Mengqing Cao Signed-off-by: Icey <1790571317@qq.com> Co-authored-by: Mengqing Cao --- vllm/compilation/backends.py | 34 +++++++++++++++++----------------- vllm/config/compilation.py | 12 +++++------- vllm/platforms/interface.py | 20 ++++++++++++++++++++ 3 files changed, 42 insertions(+), 24 deletions(-) diff --git a/vllm/compilation/backends.py b/vllm/compilation/backends.py index 1e66f21ff6388..2d8dd4c51c7ef 100644 --- a/vllm/compilation/backends.py +++ b/vllm/compilation/backends.py @@ -63,13 +63,14 @@ def make_compiler(compilation_config: CompilationConfig) -> CompilerInterface: else: logger.debug("Using InductorAdaptor") return InductorAdaptor() - else: - assert compilation_config.backend == "eager", ( - "Custom backends not supported with CompilationMode.VLLM_COMPILE" - ) - + elif compilation_config.backend == "eager": logger.debug("Using EagerAdaptor") return EagerAdaptor() + else: + logger.debug("Using custom backend: %s", compilation_config.backend) + compiler = resolve_obj_by_qualname(current_platform.get_compile_backend())() + assert isinstance(compiler, CompilerInterface) + return compiler class CompilerManager: @@ -545,7 +546,10 @@ class VllmBackend: self.prefix = prefix or model_tag # Passes to run on the graph post-grad. - self.post_grad_pass_manager = PostGradPassManager() + self.pass_manager = resolve_obj_by_qualname( + current_platform.get_pass_manager_cls() + )() + self.pass_key = current_platform.pass_key self.sym_tensor_indices = [] self.input_buffers = [] @@ -562,24 +566,20 @@ class VllmBackend: def configure_post_pass(self): config = self.compilation_config - self.post_grad_pass_manager.configure(self.vllm_config) + self.pass_manager.configure(self.vllm_config) # Post-grad custom passes are run using the post_grad_custom_post_pass # hook. If a pass for that hook exists, add it to the pass manager. inductor_config = config.inductor_compile_config - PASS_KEY = "post_grad_custom_post_pass" - if PASS_KEY in inductor_config: - if isinstance(inductor_config[PASS_KEY], PostGradPassManager): + if self.pass_key in inductor_config: + if isinstance(inductor_config[self.pass_key], PostGradPassManager): # PassManager already added to config, make sure it's correct - assert ( - inductor_config[PASS_KEY].uuid() - == self.post_grad_pass_manager.uuid() - ) + assert inductor_config[self.pass_key].uuid() == self.pass_manager.uuid() else: # Config should automatically wrap all inductor passes - assert isinstance(inductor_config[PASS_KEY], InductorPass) - self.post_grad_pass_manager.add(inductor_config[PASS_KEY]) - inductor_config[PASS_KEY] = self.post_grad_pass_manager + assert isinstance(inductor_config[self.pass_key], InductorPass) + self.pass_manager.add(inductor_config[self.pass_key]) + inductor_config[self.pass_key] = self.pass_manager def __call__( self, graph: fx.GraphModule, example_inputs diff --git a/vllm/config/compilation.py b/vllm/config/compilation.py index 42eccf9f41123..556b2d9168b32 100644 --- a/vllm/config/compilation.py +++ b/vllm/config/compilation.py @@ -331,9 +331,9 @@ class CompilationConfig: We use string to avoid serialization issues when using compilation in a distributed setting. When the compilation mode is 1 or 2, the backend is used for the compilation directly (it sees the whole graph). When the - compilation mode is 3, the backend is used for the piecewise compilation - (it sees a part of the graph). The backend can not be custom for compilation - mode 3, i.e. the backend must be either eager or inductor. Furthermore, + compilation mode is 3, the backend supports both whole graph and piecewise + compilation, available backends include eager, inductor, and custom backends, + the latter of which can be defined via `get_compile_backend`. Furthermore, compilation is only piecewise if splitting ops is set accordingly and use_inductor_graph_partition is off. Note that the default options for splitting ops are sufficient for piecewise compilation. @@ -768,7 +768,7 @@ class CompilationConfig: self.backend = "inductor" if self.use_inductor else "eager" if self.backend == "": - self.backend = current_platform.simple_compile_backend + self.backend = current_platform.get_compile_backend() def init_backend(self, vllm_config: "VllmConfig") -> str | Callable: """ @@ -800,9 +800,7 @@ class CompilationConfig: assert self.mode == CompilationMode.VLLM_COMPILE if self.backend not in ["eager", "inductor"]: - raise ValueError( - f"Invalid backend for piecewise compilation: {self.backend}" - ) + logger.info("Using OOT custom backend for compilation.") from vllm.compilation.backends import VllmBackend diff --git a/vllm/platforms/interface.py b/vllm/platforms/interface.py index 0471c20429b1d..1e6b53021f888 100644 --- a/vllm/platforms/interface.py +++ b/vllm/platforms/interface.py @@ -134,6 +134,11 @@ class Platform: _global_graph_pool: Any | None = None + @property + def pass_key(self) -> str: + """Inductor config key for the PassManager custom pass""" + return "post_grad_custom_post_pass" + @property def supported_dtypes(self) -> list[torch.dtype]: """Returns the supported dtypes for the current platform.""" @@ -177,6 +182,21 @@ class Platform: # all ROCm platforms for now. return self._enum in (PlatformEnum.CUDA, PlatformEnum.ROCM) + @classmethod + def get_pass_manager_cls(cls) -> str: + """ + Get the pass manager class for this platform. + It will be registered as a custom pass under the current_platform.pass_key. + """ + return "vllm.compilation.pass_manager.PostGradPassManager" + + @classmethod + def get_compile_backend(cls) -> str: + """ + Get the custom compile backend for current platform. + """ + return cls.simple_compile_backend + @classmethod def device_id_to_physical_device_id(cls, device_id: int): # Treat empty device control env var as unset. This is a valid From f242cfcdd5f1db4e005503a02a1317369d2a8e3d Mon Sep 17 00:00:00 2001 From: zhrrr <43847754+izhuhaoran@users.noreply.github.com> Date: Tue, 25 Nov 2025 15:31:07 +0800 Subject: [PATCH 38/43] [Perf] use cpu all reduce to avoid sync when async_scheduling & dp > 1 (#29311) Signed-off-by: zhuhaoran --- vllm/engine/arg_utils.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 3cb76fc63f69c..8338e54d4fd85 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -1570,6 +1570,12 @@ class EngineArgs: model_config.skip_tokenizer_init = True logger.info("Skipping tokenizer initialization for tokens-only mode.") + if self.async_scheduling and not self.disable_nccl_for_dp_synchronization: + logger.info( + "Disabling NCCL for DP synchronization when using async scheduling." + ) + self.disable_nccl_for_dp_synchronization = True + # Forward the deprecated CLI args to the EPLB config. if self.num_redundant_experts is not None: self.eplb_config.num_redundant_experts = self.num_redundant_experts From 12c007e288bf5c0ae3bd438036fbafbad88e706b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Delacourt?= <54138269+Flechman@users.noreply.github.com> Date: Tue, 25 Nov 2025 08:32:21 +0100 Subject: [PATCH 39/43] EAGLE Support DP>1 (#26086) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Rémi Delacourt Signed-off-by: Rémi Delacourt <54138269+Flechman@users.noreply.github.com> Signed-off-by: remi --- .buildkite/test-pipeline.yaml | 2 + tests/v1/distributed/test_eagle_dp.py | 77 ++++++++++++++++ vllm/v1/spec_decode/eagle.py | 123 +++++++++++++++++++------- vllm/v1/worker/gpu_model_runner.py | 5 +- 4 files changed, 176 insertions(+), 31 deletions(-) create mode 100644 tests/v1/distributed/test_eagle_dp.py diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index f1cd39ef4f948..e88e693a2dda5 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -192,6 +192,7 @@ steps: # test with internal dp - python3 ../examples/offline_inference/data_parallel.py --enforce-eager - TP_SIZE=2 DP_SIZE=2 pytest -v -s v1/distributed/test_async_llm_dp.py + - TP_SIZE=2 DP_SIZE=2 pytest -v -s v1/distributed/test_eagle_dp.py - TP_SIZE=2 DP_SIZE=2 pytest -v -s v1/distributed/test_external_lb_dp.py - TP_SIZE=1 DP_SIZE=4 pytest -v -s v1/distributed/test_internal_lb_dp.py - TP_SIZE=1 DP_SIZE=4 pytest -v -s v1/distributed/test_hybrid_lb_dp.py @@ -1116,6 +1117,7 @@ steps: # https://github.com/NVIDIA/nccl/issues/1838 - export NCCL_CUMEM_HOST_ENABLE=0 - TP_SIZE=1 DP_SIZE=2 pytest -v -s v1/distributed/test_async_llm_dp.py + - TP_SIZE=1 DP_SIZE=2 pytest -v -s v1/distributed/test_eagle_dp.py - TP_SIZE=1 DP_SIZE=2 pytest -v -s v1/distributed/test_external_lb_dp.py - DP_SIZE=2 pytest -v -s v1/entrypoints/openai/test_multi_api_servers.py - pytest -v -s entrypoints/llm/test_collective_rpc.py diff --git a/tests/v1/distributed/test_eagle_dp.py b/tests/v1/distributed/test_eagle_dp.py new file mode 100644 index 0000000000000..9f6a6614fc1fd --- /dev/null +++ b/tests/v1/distributed/test_eagle_dp.py @@ -0,0 +1,77 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +import asyncio +import os +from contextlib import AsyncExitStack +from dataclasses import replace + +import pytest + +from vllm import SamplingParams +from vllm.engine.arg_utils import AsyncEngineArgs +from vllm.sampling_params import RequestOutputKind +from vllm.v1.engine.async_llm import AsyncLLM + +DP_SIZE = int(os.getenv("DP_SIZE", 2)) + + +@pytest.mark.asyncio +async def test_run_eagle_dp(): + target_model = "meta-llama/Llama-3.1-8B-Instruct" + draft_model = "yuhuili/EAGLE-LLaMA3.1-Instruct-8B" + + engine_args = AsyncEngineArgs( + model=target_model, + tokenizer_mode="auto", + enforce_eager=False, + tensor_parallel_size=int(os.getenv("TP_SIZE", 1)), + data_parallel_size=DP_SIZE, + data_parallel_backend="mp", # ray takes more time + trust_remote_code=True, + max_model_len=16384, + ) + + eagle_engine_args = replace( + engine_args, + speculative_config={ + "model": draft_model, + "method": "eagle", + "num_speculative_tokens": 3, + }, + ) + + prompt = "This is a test of data parallel with eagle" + num_expected_tokens = 100 + sampling_params = SamplingParams( + min_tokens=num_expected_tokens, + max_tokens=num_expected_tokens, + ignore_eos=True, + output_kind=RequestOutputKind.FINAL_ONLY, + temperature=0, + ) + + async def generate_with_timeout(given_engine: AsyncLLM): + async for out in given_engine.generate( + request_id="test-eagle-dp", prompt=prompt, sampling_params=sampling_params + ): + token_ids = out.outputs[0].token_ids + assert len(token_ids) == num_expected_tokens + return token_ids + + async def engine_create_and_generate(engine_args: AsyncEngineArgs): + async with AsyncExitStack() as after: + engine = AsyncLLM.from_engine_args(engine_args) + after.callback(engine.shutdown) + + token_ids = await asyncio.wait_for( + generate_with_timeout(engine), timeout=30 + ) + + assert not engine.output_processor.has_unfinished_requests() + return token_ids + + token_ids_with_eagle = await engine_create_and_generate(eagle_engine_args) + token_ids_no_eagle = await engine_create_and_generate(engine_args) + + # Test for correctness + assert token_ids_with_eagle == token_ids_no_eagle diff --git a/vllm/v1/spec_decode/eagle.py b/vllm/v1/spec_decode/eagle.py index afa16573eea10..784ccbc04932f 100644 --- a/vllm/v1/spec_decode/eagle.py +++ b/vllm/v1/spec_decode/eagle.py @@ -40,6 +40,7 @@ from vllm.v1.sample.metadata import SamplingMetadata from vllm.v1.sample.sampler import _SAMPLING_EPS from vllm.v1.spec_decode.metadata import SpecDecodeMetadata from vllm.v1.utils import CpuGpuBuffer +from vllm.v1.worker.dp_utils import coordinate_batch_across_dp from vllm.v1.worker.gpu_input_batch import CachedRequestState, InputBatch logger = init_logger(__name__) @@ -65,6 +66,7 @@ class EagleProposer: self.dtype = vllm_config.model_config.dtype self.max_model_len = vllm_config.model_config.max_model_len self.block_size = vllm_config.cache_config.block_size + self.dp_rank = vllm_config.parallel_config.data_parallel_rank self.num_speculative_tokens = self.speculative_config.num_speculative_tokens self.max_num_tokens = vllm_config.scheduler_config.max_num_batched_tokens self.token_arange_np = np.arange(self.max_num_tokens) @@ -271,15 +273,24 @@ class EagleProposer: assert draft_indexer_metadata is not None per_layer_attn_metadata[layer_name] = draft_indexer_metadata + num_tokens_dp_padded, num_tokens_across_dp = self._pad_batch_across_dp( + num_tokens_unpadded=num_tokens, + num_tokens_padded=num_tokens, + ) + cudagraph_runtime_mode = CUDAGraphMode.NONE if ( self.use_cuda_graph - and num_tokens <= self.compilation_config.max_cudagraph_capture_size + and num_tokens_dp_padded + <= self.compilation_config.max_cudagraph_capture_size ): - num_input_tokens = self.vllm_config.pad_for_cudagraph(num_tokens) + num_input_tokens = self.vllm_config.pad_for_cudagraph(num_tokens_dp_padded) cudagraph_runtime_mode = CUDAGraphMode.PIECEWISE else: - num_input_tokens = num_tokens + num_input_tokens = num_tokens_dp_padded + if num_tokens_across_dp is not None: + num_tokens_across_dp[self.dp_rank] = num_input_tokens + # copy inputs to buffer for cudagraph self._set_positions(num_tokens, target_positions) self.hidden_states[:num_tokens] = target_hidden_states @@ -303,6 +314,7 @@ class EagleProposer: per_layer_attn_metadata, self.vllm_config, num_tokens=num_input_tokens, + num_tokens_across_dp=num_tokens_across_dp, cudagraph_runtime_mode=cudagraph_runtime_mode, ): ret_hidden_states = self.model( @@ -365,15 +377,23 @@ class EagleProposer: # Generate the remaining draft tokens. draft_token_ids_list = [draft_token_ids] + batch_size_dp_padded, batch_size_across_dp = self._pad_batch_across_dp( + num_tokens_unpadded=batch_size, + num_tokens_padded=batch_size, + ) + if ( self.use_cuda_graph - and batch_size <= self.compilation_config.max_cudagraph_capture_size + and batch_size_dp_padded + <= self.compilation_config.max_cudagraph_capture_size ): - input_batch_size = self.vllm_config.pad_for_cudagraph(batch_size) + input_batch_size = self.vllm_config.pad_for_cudagraph(batch_size_dp_padded) cudagraph_runtime_mode = CUDAGraphMode.PIECEWISE else: - input_batch_size = batch_size + input_batch_size = batch_size_dp_padded cudagraph_runtime_mode = CUDAGraphMode.NONE + if batch_size_across_dp is not None: + batch_size_across_dp[self.dp_rank] = input_batch_size common_attn_metadata.num_actual_tokens = batch_size common_attn_metadata.max_query_len = 1 @@ -474,6 +494,7 @@ class EagleProposer: per_layer_attn_metadata, self.vllm_config, num_tokens=input_batch_size, + num_tokens_across_dp=batch_size_across_dp, cudagraph_runtime_mode=cudagraph_runtime_mode, ): ret_hidden_states = self.model( @@ -1116,36 +1137,56 @@ class EagleProposer: self, num_tokens: int, use_cudagraphs=True, + is_graph_capturing=False, ) -> None: # Determine if CUDA graphs should be used for this run. cudagraphs_enabled = use_cudagraphs and self.use_cuda_graph - if ( - cudagraphs_enabled - and num_tokens <= self.compilation_config.max_cudagraph_capture_size - ): - num_tokens = self.vllm_config.pad_for_cudagraph(num_tokens) - with set_forward_context( - None, - self.vllm_config, - num_tokens=num_tokens, - cudagraph_runtime_mode=( - CUDAGraphMode.PIECEWISE if cudagraphs_enabled else CUDAGraphMode.NONE - ), + # FIXME: when using tree-based specdec, adjust number of forward-passes + # according to the depth of the tree. + for fwd_idx in range( + self.num_speculative_tokens if not is_graph_capturing else 1 ): - if self.supports_mm_inputs: - input_ids = None - inputs_embeds = self.inputs_embeds[:num_tokens] - else: - input_ids = self.input_ids[:num_tokens] - inputs_embeds = None + if fwd_idx <= 1: + num_tokens_dp_padded, num_tokens_across_dp = self._pad_batch_across_dp( + num_tokens_unpadded=num_tokens, + num_tokens_padded=num_tokens, + ) + if ( + cudagraphs_enabled + and num_tokens_dp_padded + <= self.compilation_config.max_cudagraph_capture_size + ): + num_input_tokens = self.vllm_config.pad_for_cudagraph( + num_tokens_dp_padded + ) + else: + num_input_tokens = num_tokens_dp_padded + if num_tokens_across_dp is not None: + num_tokens_across_dp[self.dp_rank] = num_input_tokens - self.model( - input_ids=input_ids, - positions=self._get_positions(num_tokens), - hidden_states=self.hidden_states[:num_tokens], - inputs_embeds=inputs_embeds, - ) + with set_forward_context( + None, + self.vllm_config, + num_tokens=num_input_tokens, + num_tokens_across_dp=num_tokens_across_dp, + cudagraph_runtime_mode=CUDAGraphMode.PIECEWISE + if cudagraphs_enabled + else CUDAGraphMode.NONE, + ): + if self.supports_mm_inputs: + input_ids = None + inputs_embeds = self.inputs_embeds[:num_input_tokens] + else: + input_ids = self.input_ids[:num_input_tokens] + inputs_embeds = None + + self.model( + input_ids=input_ids, + positions=self._get_positions(num_input_tokens), + hidden_states=self.hidden_states[:num_input_tokens], + inputs_embeds=inputs_embeds, + ) def _get_attention_metadata_builder(self) -> AttentionMetadataBuilder: """Find and return the attention metadata builders for EAGLE layers. @@ -1211,6 +1252,28 @@ class EagleProposer: == 1 ), "All eagle layers should belong to the same kv cache group" + def _pad_batch_across_dp( + self, + num_tokens_unpadded: int, + num_tokens_padded: int, + ) -> tuple[int, torch.Tensor]: + # TODO(Flechman): support DBO ubatching + ubatch_slices, num_toks_across_dp = coordinate_batch_across_dp( + num_tokens_unpadded=num_tokens_unpadded, + parallel_config=self.vllm_config.parallel_config, + allow_microbatching=False, + allow_dp_padding=self.use_cuda_graph, + num_tokens_padded=num_tokens_padded, + uniform_decode=None, + num_scheduled_tokens_per_request=None, + ) + assert ubatch_slices is None, "DBO ubatching not implemented for EAGLE" + + num_tokens_dp_padded = num_tokens_padded + if num_toks_across_dp is not None: + num_tokens_dp_padded = int(num_toks_across_dp[self.dp_rank].item()) + return num_tokens_dp_padded, num_toks_across_dp + # NOTE(woosuk): Currently, the below code is not used and we always use argmax # to sample the draft tokens. We will use this after we find a way to manage diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 6413be66b141c..74fd2a1e2a2c0 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -3746,6 +3746,7 @@ class GPUModelRunner( create_mixed_batch: bool = False, remove_lora: bool = True, activate_lora: bool = False, + is_graph_capturing: bool = False, ) -> tuple[torch.Tensor, torch.Tensor]: """ Run a dummy forward pass to warm up/profile run or capture the @@ -3981,7 +3982,7 @@ class GPUModelRunner( if self.speculative_config and self.speculative_config.use_eagle(): assert isinstance(self.drafter, EagleProposer) use_cudagraphs = ( - cudagraph_runtime_mode == CUDAGraphMode.PIECEWISE + cudagraph_runtime_mode.has_mode(CUDAGraphMode.PIECEWISE) and not self.speculative_config.enforce_eager ) @@ -3995,6 +3996,7 @@ class GPUModelRunner( self.drafter.dummy_run( num_tokens, use_cudagraphs=use_cudagraphs, + is_graph_capturing=is_graph_capturing, ) # This is necessary to avoid blocking DP. @@ -4427,6 +4429,7 @@ class GPUModelRunner( skip_eplb=True, remove_lora=False, activate_lora=activate_lora, + is_graph_capturing=True, ) self.maybe_remove_all_loras(self.lora_config) From ef1f7030f016cc811236517e02fa51ee8876cc31 Mon Sep 17 00:00:00 2001 From: Micah Williamson Date: Tue, 25 Nov 2025 01:55:09 -0600 Subject: [PATCH 40/43] [ROCm][CI] Fix test_cudagraph_mode failure in AMD CI (#29367) Signed-off-by: Micah Williamson --- tests/v1/attention/utils.py | 7 +++ tests/v1/cudagraph/test_cudagraph_mode.py | 62 +++++++++++++++-------- vllm/platforms/rocm.py | 4 +- 3 files changed, 51 insertions(+), 22 deletions(-) diff --git a/tests/v1/attention/utils.py b/tests/v1/attention/utils.py index dea89babd4b47..df3d53332c7cd 100644 --- a/tests/v1/attention/utils.py +++ b/tests/v1/attention/utils.py @@ -340,4 +340,11 @@ full_cg_backend_configs = { "cudagraph_mode": "FULL_AND_PIECEWISE", }, ), + "RocmAttn": BackendConfig( + name="RocmAttn", + env_vars={"VLLM_V1_USE_PREFILL_DECODE_ATTENTION": "1"}, + comp_config={ + "cudagraph_mode": "FULL", + }, + ), } diff --git a/tests/v1/cudagraph/test_cudagraph_mode.py b/tests/v1/cudagraph/test_cudagraph_mode.py index d6bde16eba36b..7f9c2a0571c3c 100644 --- a/tests/v1/cudagraph/test_cudagraph_mode.py +++ b/tests/v1/cudagraph/test_cudagraph_mode.py @@ -35,14 +35,22 @@ def temporary_environ(env_vars): # test attention backend and cudagraph_mode combo # (backend_name, cudagraph_mode, supported) -combo_cases_1 = [ - ("FA3", "FULL", True), - ("FA3", "FULL_AND_PIECEWISE", True), - ("FA2", "FULL", True), # Should fallback to FULL_AND_PIECEWISE - ("FA2", "FULL_AND_PIECEWISE", True), - ("FlashInfer", "FULL", True), # Should fallback to FULL_AND_PIECEWISE - ("FlashInfer", "FULL_AND_PIECEWISE", True), -] +if current_platform.is_rocm(): + combo_cases_1 = [ + ("RocmAttn", "FULL", True), + ("RocmAttn", "FULL_AND_PIECEWISE", True), + ("TritonAttn", "FULL", True), + ("TritonAttn", "FULL_AND_PIECEWISE", True), + ] +else: + combo_cases_1 = [ + ("FA3", "FULL", True), + ("FA3", "FULL_AND_PIECEWISE", True), + ("FA2", "FULL", True), # Should fallback to FULL_AND_PIECEWISE + ("FA2", "FULL_AND_PIECEWISE", True), + ("FlashInfer", "FULL", True), # Should fallback to FULL_AND_PIECEWISE + ("FlashInfer", "FULL_AND_PIECEWISE", True), + ] @pytest.mark.parametrize("backend_name, cudagraph_mode, supported", combo_cases_1) @@ -92,18 +100,32 @@ def test_backend_and_cudagraph_mode_combo(backend_name, cudagraph_mode, supporte # test cudagraph_mode with different compilation mode. # (backend_name, cudagraph_mode, compilation_mode, supported) -combo_cases_2 = [ - ("FA2", "FULL", CompilationMode.NONE, True), - ("FA2", "FULL", CompilationMode.VLLM_COMPILE, True), - ("FA2", "PIECEWISE", CompilationMode.NONE, False), - ("FA2", "PIECEWISE", CompilationMode.VLLM_COMPILE, True), - ("FA2", "FULL_AND_PIECEWISE", CompilationMode.NONE, False), - ("FA2", "FULL_AND_PIECEWISE", CompilationMode.VLLM_COMPILE, True), - ("FA2", "FULL_DECODE_ONLY", CompilationMode.NONE, True), - ("FA2", "FULL_DECODE_ONLY", CompilationMode.VLLM_COMPILE, True), - ("FA2", "NONE", CompilationMode.NONE, True), - ("FA2", "NONE", CompilationMode.VLLM_COMPILE, True), -] +if current_platform.is_rocm(): + combo_cases_2 = [ + ("RocmAttn", "FULL", CompilationMode.NONE, True), + ("RocmAttn", "FULL", CompilationMode.VLLM_COMPILE, True), + ("RocmAttn", "PIECEWISE", CompilationMode.NONE, False), + ("RocmAttn", "PIECEWISE", CompilationMode.VLLM_COMPILE, True), + ("RocmAttn", "FULL_AND_PIECEWISE", CompilationMode.NONE, False), + ("RocmAttn", "FULL_AND_PIECEWISE", CompilationMode.VLLM_COMPILE, True), + ("RocmAttn", "FULL_DECODE_ONLY", CompilationMode.NONE, True), + ("RocmAttn", "FULL_DECODE_ONLY", CompilationMode.VLLM_COMPILE, True), + ("RocmAttn", "NONE", CompilationMode.NONE, True), + ("RocmAttn", "NONE", CompilationMode.VLLM_COMPILE, True), + ] +else: + combo_cases_2 = [ + ("FA2", "FULL", CompilationMode.NONE, True), + ("FA2", "FULL", CompilationMode.VLLM_COMPILE, True), + ("FA2", "PIECEWISE", CompilationMode.NONE, False), + ("FA2", "PIECEWISE", CompilationMode.VLLM_COMPILE, True), + ("FA2", "FULL_AND_PIECEWISE", CompilationMode.NONE, False), + ("FA2", "FULL_AND_PIECEWISE", CompilationMode.VLLM_COMPILE, True), + ("FA2", "FULL_DECODE_ONLY", CompilationMode.NONE, True), + ("FA2", "FULL_DECODE_ONLY", CompilationMode.VLLM_COMPILE, True), + ("FA2", "NONE", CompilationMode.NONE, True), + ("FA2", "NONE", CompilationMode.VLLM_COMPILE, True), + ] @pytest.mark.parametrize( diff --git a/vllm/platforms/rocm.py b/vllm/platforms/rocm.py index b0434b9642f07..0483f6c06ada8 100644 --- a/vllm/platforms/rocm.py +++ b/vllm/platforms/rocm.py @@ -321,8 +321,8 @@ class RocmPlatform(Platform): return AttentionBackendEnum.TRITON_ATTN.get_path() raise RuntimeError( - "V0 attention backends have been removed. Set VLLM_USE_V1=1 " - "to select a supported backend." + f"Attention backend {selected_backend.name} is not supported on " + "ROCm. Note that V0 attention backends have been removed." ) @classmethod From 6330f9477db214477004df6546f86e3f14f8eab9 Mon Sep 17 00:00:00 2001 From: elvischenv <219235043+elvischenv@users.noreply.github.com> Date: Tue, 25 Nov 2025 15:59:40 +0800 Subject: [PATCH 41/43] [Bugfix] Fix GPT-OSS AR+NORM fusion (#28841) Signed-off-by: elvischenv <219235043+elvischenv@users.noreply.github.com> --- .buildkite/test-pipeline.yaml | 1 + tests/compile/distributed/test_fusions_e2e.py | 11 +++++++++++ .../device_communicators/symm_mem.py | 2 +- vllm/model_executor/layers/fused_moe/layer.py | 17 +++++++++++------ 4 files changed, 24 insertions(+), 7 deletions(-) diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index e88e693a2dda5..e444becd9867b 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -971,6 +971,7 @@ steps: - vllm/model_executor/layers/layernorm.py - vllm/model_executor/layers/activation.py - vllm/model_executor/layers/quantization/input_quant_fp8.py + - vllm/model_executor/layers/fused_moe/layer.py - tests/compile/test_fusion_attn.py - tests/compile/test_silu_mul_quant_fusion.py - tests/compile/distributed/test_fusion_all_reduce.py diff --git a/tests/compile/distributed/test_fusions_e2e.py b/tests/compile/distributed/test_fusions_e2e.py index 661172e1965b5..53c3f875d2003 100644 --- a/tests/compile/distributed/test_fusions_e2e.py +++ b/tests/compile/distributed/test_fusions_e2e.py @@ -111,6 +111,17 @@ if current_platform.is_cuda(): async_tp=96, # MLP is MoE, half the fusions of dense ), ), + ModelBackendTestCase( + model_name="openai/gpt-oss-20b", + model_kwargs=dict(max_model_len=1024, kv_cache_dtype="fp8"), + backend=AttentionBackendEnum.FLASHINFER, + matches=Matches( + attention_fusion=0, + allreduce_fusion=49, + sequence_parallel=49, + async_tp=48, + ), + ), ] elif current_platform.is_rocm(): diff --git a/vllm/distributed/device_communicators/symm_mem.py b/vllm/distributed/device_communicators/symm_mem.py index eb1f173b11925..7a049b003cf73 100644 --- a/vllm/distributed/device_communicators/symm_mem.py +++ b/vllm/distributed/device_communicators/symm_mem.py @@ -131,7 +131,7 @@ class SymmMemCommunicator: return None if out is None: out = torch.empty_like(inp) - self.buffer[: inp.numel()].copy_(inp.view(-1)) + self.buffer[: inp.numel()].copy_(inp.reshape(-1)) # Determine which algorithm to use use_multimem = False diff --git a/vllm/model_executor/layers/fused_moe/layer.py b/vllm/model_executor/layers/fused_moe/layer.py index 0ef3130b26333..bb30f1292a5fa 100644 --- a/vllm/model_executor/layers/fused_moe/layer.py +++ b/vllm/model_executor/layers/fused_moe/layer.py @@ -1690,6 +1690,10 @@ class FusedMoE(CustomOp): ) def reduce_output(states: torch.Tensor) -> torch.Tensor: + # Slice before all_reduce to enable possible fusion + if self.hidden_size != og_hidden_states: + states = states[..., :og_hidden_states] + if ( not self.is_sequence_parallel and not self.use_dp_chunking @@ -1712,11 +1716,12 @@ class FusedMoE(CustomOp): if self.zero_expert_num is not None and self.zero_expert_num > 0: assert isinstance(fused_output, tuple) fused_output, zero_expert_result = fused_output - return (reduce_output(fused_output) + zero_expert_result)[ - ..., :og_hidden_states - ] + return ( + reduce_output(fused_output) + + zero_expert_result[..., :og_hidden_states] + ) else: - return reduce_output(fused_output)[..., :og_hidden_states] + return reduce_output(fused_output) else: if current_platform.is_tpu(): # TODO: Once the OOM issue for the TPU backend is resolved, we @@ -1729,8 +1734,8 @@ class FusedMoE(CustomOp): hidden_states, router_logits, self.layer_name ) return ( - reduce_output(shared_output)[..., :og_hidden_states], - reduce_output(fused_output)[..., :og_hidden_states], + reduce_output(shared_output), + reduce_output(fused_output), ) def forward_cuda( From 67fc16cd8cf778a30ad0f7619fe77bd85f1d1633 Mon Sep 17 00:00:00 2001 From: "wang.yuqi" Date: Tue, 25 Nov 2025 16:06:09 +0800 Subject: [PATCH 42/43] [Bugfix] If chunked_prefill is disabled, end the scheduling early. (#28911) Signed-off-by: wang.yuqi --- tests/v1/core/test_scheduler.py | 28 ++++++++++++++++++++++++++++ tests/v1/core/utils.py | 3 ++- vllm/v1/core/sched/scheduler.py | 6 +++--- 3 files changed, 33 insertions(+), 4 deletions(-) diff --git a/tests/v1/core/test_scheduler.py b/tests/v1/core/test_scheduler.py index 09acde6e08faa..fe4153e609971 100644 --- a/tests/v1/core/test_scheduler.py +++ b/tests/v1/core/test_scheduler.py @@ -641,6 +641,34 @@ def test_schedule_concurrent_batches( scheduler.update_from_output(scheduler_output1, model_runner_output) +@pytest.mark.parametrize("enable_chunked_prefill", [True, False]) +def test_schedule_order(enable_chunked_prefill: bool): + scheduler = create_scheduler( + max_num_batched_tokens=1024, + max_num_seqs=3, + enable_chunked_prefill=enable_chunked_prefill, + ) + + # long requests + requests = create_requests(num_requests=2, num_tokens=800) + # short requests + requests += create_requests(num_requests=2, num_tokens=10) + + for request in requests: + scheduler.add_request(request) + + scheduler_output1 = scheduler.schedule() + + if enable_chunked_prefill: + # When enable chunked prefill, long requests will be chunked. + assert len(scheduler_output1.scheduled_new_reqs) == 2 + else: + # When disable chunked prefill, should not skip the long requests, + # and scheduling subsequent short requests in advance, + # even though there is still token budgets remaining. + assert len(scheduler_output1.scheduled_new_reqs) == 1 + + def test_preempt_during_execution(): # NOTE(woosuk): The actual number of available blocks is 10 instead of 11 # because block 0 is reserved as the null block. diff --git a/tests/v1/core/utils.py b/tests/v1/core/utils.py index 6830f68736453..7537c7a60476b 100644 --- a/tests/v1/core/utils.py +++ b/tests/v1/core/utils.py @@ -42,6 +42,7 @@ def create_scheduler( model: str = "facebook/opt-125m", max_num_seqs: int = 16, max_num_batched_tokens: int = 8192, + enable_chunked_prefill: bool = True, enable_prefix_caching: bool = False, long_prefill_token_threshold: int = 0, disable_chunked_mm_input: bool = False, @@ -76,7 +77,7 @@ def create_scheduler( max_model_len=max_model_len, long_prefill_token_threshold=long_prefill_token_threshold, disable_chunked_mm_input=disable_chunked_mm_input, - enable_chunked_prefill=True, + enable_chunked_prefill=enable_chunked_prefill, async_scheduling=async_scheduling, ) model_config = ModelConfig( diff --git a/vllm/v1/core/sched/scheduler.py b/vllm/v1/core/sched/scheduler.py index a7ec0de372631..23af014c10364 100644 --- a/vllm/v1/core/sched/scheduler.py +++ b/vllm/v1/core/sched/scheduler.py @@ -508,9 +508,9 @@ class Scheduler(SchedulerInterface): not self.scheduler_config.enable_chunked_prefill and num_new_tokens > token_budget ): - self.waiting.pop_request() - skipped_waiting_requests.prepend_request(request) - continue + # If chunked_prefill is disabled, + # we can stop the scheduling here. + break num_new_tokens = min(num_new_tokens, token_budget) assert num_new_tokens > 0 From db2906108acdc141e8a21e390228c69b1379e3c2 Mon Sep 17 00:00:00 2001 From: Nick Hill Date: Tue, 25 Nov 2025 00:30:11 -0800 Subject: [PATCH 43/43] [Misc] Streamline unique id generation (#29375) Signed-off-by: Nick Hill --- vllm/entrypoints/openai/protocol.py | 16 ++++++++-------- vllm/entrypoints/openai/serving_engine.py | 9 +++++---- vllm/utils/__init__.py | 4 +++- 3 files changed, 16 insertions(+), 13 deletions(-) diff --git a/vllm/entrypoints/openai/protocol.py b/vllm/entrypoints/openai/protocol.py index 5a0a05f9af323..c4023a6185289 100644 --- a/vllm/entrypoints/openai/protocol.py +++ b/vllm/entrypoints/openai/protocol.py @@ -661,7 +661,7 @@ class ChatCompletionRequest(OpenAIBaseModel): ), ) request_id: str = Field( - default_factory=lambda: f"{random_uuid()}", + default_factory=random_uuid, description=( "The request_id related to this request. If the caller does " "not set it, a random_uuid will be generated. This id is used " @@ -1078,7 +1078,7 @@ class CompletionRequest(OpenAIBaseModel): ), ) request_id: str = Field( - default_factory=lambda: f"{random_uuid()}", + default_factory=random_uuid, description=( "The request_id related to this request. If the caller does " "not set it, a random_uuid will be generated. This id is used " @@ -1375,7 +1375,7 @@ class EmbeddingCompletionRequest(OpenAIBaseModel): ), ) request_id: str = Field( - default_factory=lambda: f"{random_uuid()}", + default_factory=random_uuid, description=( "The request_id related to this request. If the caller does " "not set it, a random_uuid will be generated. This id is used " @@ -1470,7 +1470,7 @@ class EmbeddingChatRequest(OpenAIBaseModel): ), ) request_id: str = Field( - default_factory=lambda: f"{random_uuid()}", + default_factory=random_uuid, description=( "The request_id related to this request. If the caller does " "not set it, a random_uuid will be generated. This id is used " @@ -1892,7 +1892,7 @@ class ClassificationCompletionRequest(OpenAIBaseModel): ), ) request_id: str = Field( - default_factory=lambda: f"{random_uuid()}", + default_factory=random_uuid, description=( "The request_id related to this request. If the caller does " "not set it, a random_uuid will be generated. This id is used " @@ -1983,7 +1983,7 @@ class ClassificationChatRequest(OpenAIBaseModel): ) request_id: str = Field( - default_factory=lambda: f"{random_uuid()}", + default_factory=random_uuid, description=( "The request_id related to this request. If the caller does " "not set it, a random_uuid will be generated. This id is used " @@ -3094,7 +3094,7 @@ class TranslationResponseVerbose(OpenAIBaseModel): ####### Tokens IN <> Tokens OUT ####### class GenerateRequest(BaseModel): request_id: str = Field( - default_factory=lambda: f"{random_uuid()}", + default_factory=random_uuid, description=( "The request_id related to this request. If the caller does " "not set it, a random_uuid will be generated. This id is used " @@ -3151,7 +3151,7 @@ class GenerateResponseChoice(BaseModel): class GenerateResponse(BaseModel): request_id: str = Field( - default_factory=lambda: f"{random_uuid()}", + default_factory=random_uuid, description=( "The request_id related to this request. If the caller does " "not set it, a random_uuid will be generated. This id is used " diff --git a/vllm/entrypoints/openai/serving_engine.py b/vllm/entrypoints/openai/serving_engine.py index de22c48809dc8..09a135b701d05 100644 --- a/vllm/entrypoints/openai/serving_engine.py +++ b/vllm/entrypoints/openai/serving_engine.py @@ -1349,11 +1349,12 @@ class OpenAIServing: raw_request: Request | None, default: str | None = None ) -> str | None: """Pulls the request id to use from a header, if provided""" - default = default or random_uuid() - if raw_request is None: - return default + if raw_request is not None and ( + (req_id := raw_request.headers.get("X-Request-Id")) is not None + ): + return req_id - return raw_request.headers.get("X-Request-Id", default) + return random_uuid() if default is None else default @staticmethod def _get_data_parallel_rank(raw_request: Request | None) -> int | None: diff --git a/vllm/utils/__init__.py b/vllm/utils/__init__.py index d94da71b289f3..fddcc27204307 100644 --- a/vllm/utils/__init__.py +++ b/vllm/utils/__init__.py @@ -52,9 +52,11 @@ STR_FLASHINFER_ATTN_VAL: str = "FLASHINFER" STR_FLASH_ATTN_VAL: str = "FLASH_ATTN" STR_INVALID_VAL: str = "INVALID" +MASK_64_BITS = (1 << 64) - 1 + def random_uuid() -> str: - return str(uuid.uuid4().hex) + return f"{uuid.uuid4().int & MASK_64_BITS:016x}" # 16 hex chars def length_from_prompt_token_ids_or_embeds(