diff --git a/vllm/distributed/device_communicators/custom_all_reduce.py b/vllm/distributed/device_communicators/custom_all_reduce.py index d239d645edc1..c95192a5a1bc 100644 --- a/vllm/distributed/device_communicators/custom_all_reduce.py +++ b/vllm/distributed/device_communicators/custom_all_reduce.py @@ -28,6 +28,10 @@ def _can_p2p(rank: int, world_size: int) -> bool: for i in range(world_size): if i == rank: continue + if envs.VLLM_SKIP_P2P_CHECK: + logger.info( + "Skipping P2P check and trusting the driver's P2P report.") + return torch.cuda.can_device_access_peer(rank, i) if not gpu_p2p_access_check(rank, i): return False return True diff --git a/vllm/envs.py b/vllm/envs.py index 705d858e71a6..7cbffc83a625 100644 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -63,6 +63,7 @@ VLLM_USE_TRITON_AWQ: bool = False VLLM_ALLOW_RUNTIME_LORA_UPDATING: bool = False VLLM_ALLOW_DEPRECATED_BEAM_SEARCH: bool = False + VLLM_SKIP_P2P_CHECK: bool = False def get_default_cache_root(): @@ -423,6 +424,13 @@ def get_default_config_root(): lambda: (os.environ.get("VLLM_ALLOW_RUNTIME_LORA_UPDATING", "0").strip().lower() in ("1", "true")), + + # By default, vLLM will check the peer-to-peer capability itself, + # in case of broken drivers. See https://github.com/vllm-project/vllm/blob/a9b15c606fea67a072416ea0ea115261a2756058/vllm/distributed/device_communicators/custom_all_reduce_utils.py#L101-L108 for details. # noqa + # If this env var is set to 1, vLLM will skip the peer-to-peer check, + # and trust the driver's peer-to-peer capability report. + "VLLM_SKIP_P2P_CHECK": + lambda: os.getenv("VLLM_SKIP_P2P_CHECK", "0") == "1", } # end-env-vars-definition