Skip to content

vllm.v1.executor.ray_executor

logger module-attribute

logger = init_logger(__name__)

RayDistributedExecutor

Bases: Executor

Ray-based distributed executor

Source code in vllm/v1/executor/ray_executor.py
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
class RayDistributedExecutor(Executor):
    """Ray-based distributed executor"""

    # These env vars are worker-specific, therefore are NOT copied
    # from the driver to the workers
    WORKER_SPECIFIC_ENV_VARS = {
        "VLLM_HOST_IP",
        "VLLM_HOST_PORT",
        "LOCAL_RANK",
        "CUDA_VISIBLE_DEVICES",
    }

    # These non-vLLM env vars are copied from the driver to workers
    ADDITIONAL_ENV_VARS = {"HF_TOKEN", "HUGGING_FACE_HUB_TOKEN"}

    uses_ray: bool = True
    supports_pp: bool = True

    def _init_executor(self) -> None:
        self.forward_dag: ray.dag.CompiledDAG | None = None

        # For TPU or XPU, avoid compiling NVIDIA's NCCL
        if current_platform.is_tpu() or current_platform.is_xpu():
            os.environ["VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE"] = "shm"

        assert self.uses_ray
        initialize_ray_cluster(self.parallel_config)
        placement_group = self.parallel_config.placement_group

        # Disable Ray usage stats collection.
        ray_usage = os.environ.get("RAY_USAGE_STATS_ENABLED", "0")
        if ray_usage != "1":
            os.environ["RAY_USAGE_STATS_ENABLED"] = "0"

        # Create the parallel GPU workers.
        self._init_workers_ray(placement_group)

        # KV connector setup
        self.has_connector = self.vllm_config.kv_transfer_config is not None

    @property
    def max_concurrent_batches(self) -> int:
        """Ray distributed executor supports pipeline parallelism,
        meaning that it allows PP size batches to be executed concurrently.
        """
        if self.scheduler_config.async_scheduling:
            return 2
        return self.parallel_config.pipeline_parallel_size

    def shutdown(self) -> None:
        if logger:
            # Somehow logger can be None here.
            logger.info(
                "Shutting down Ray distributed executor. If you see error log "
                "from logging.cc regarding SIGTERM received, please ignore "
                "because this is the expected termination process in Ray."
            )
        if hasattr(self, "forward_dag") and self.forward_dag is not None:
            self.forward_dag.teardown()
            import ray

            for worker in self.workers:
                ray.kill(worker)
            self.forward_dag = None

    def _configure_ray_workers_use_nsight(self, ray_remote_kwargs) -> dict[str, Any]:
        # If nsight profiling is enabled, we need to set the profiling
        # configuration for the ray workers as runtime env.
        runtime_env = ray_remote_kwargs.setdefault("runtime_env", {})
        runtime_env.update(
            {
                "nsight": {
                    "t": "cuda,cudnn,cublas",
                    "o": "'worker_process_%p'",
                    "cuda-graph-trace": "node",
                }
            }
        )

        return ray_remote_kwargs

    # child class could overwrite this to return actual env vars.
    def _get_env_vars_to_be_updated(self):
        return self._env_vars_for_all_workers

    def _init_workers_ray(self, placement_group: "PlacementGroup", **ray_remote_kwargs):
        num_gpus = envs.VLLM_RAY_PER_WORKER_GPUS

        # The driver dummy worker does not actually use any resources.
        # It holds the resource for the driver worker.
        self.driver_dummy_worker: RayWorkerWrapper | None = None
        # The remaining workers are the actual ray actors.
        self.workers: list[RayWorkerWrapper] = []

        # Used in ray compiled DAG: indexed first by PP rank,
        # and then TP rank. In other words, the inner list is
        # the TP group of workers for a PP rank.
        self.pp_tp_workers: list[list[RayWorkerWrapper]] = []

        if self.parallel_config.ray_workers_use_nsight:
            ray_remote_kwargs = self._configure_ray_workers_use_nsight(
                ray_remote_kwargs
            )

        # Create the workers.
        bundle_indices: list[int]
        if envs.VLLM_RAY_BUNDLE_INDICES:
            # Use the bundle indices specified by the user.
            bundle_indices = list(map(int, envs.VLLM_RAY_BUNDLE_INDICES.split(",")))
            assert len(bundle_indices) == self.parallel_config.world_size, (
                "VLLM_RAY_BUNDLE_INDICES must have the same size"
                f" as the world size, but got {bundle_indices=} "
                f"and {self.parallel_config.world_size=}"
            )
            assert len(set(bundle_indices)) == len(bundle_indices), (
                "VLLM_RAY_BUNDLE_INDICES cannot have duplicate values,"
                f" but got {bundle_indices=}"
            )
        else:
            # use the first N bundles that have GPU resources.
            bundle_indices = []
            for bundle_id, bundle in enumerate(placement_group.bundle_specs):
                if bundle.get(current_platform.ray_device_key, 0):
                    bundle_indices.append(bundle_id)
            bundle_indices = bundle_indices[: self.parallel_config.world_size]

        worker_metadata: list[RayWorkerMetaData] = []
        driver_ip = get_ip()
        for rank, bundle_id in enumerate(bundle_indices):
            scheduling_strategy = PlacementGroupSchedulingStrategy(
                placement_group=placement_group,
                placement_group_capture_child_tasks=True,
                placement_group_bundle_index=bundle_id,
            )

            if current_platform.ray_device_key == "GPU":
                # NV+AMD GPUs, and Intel XPUs
                worker = ray.remote(
                    num_cpus=0,
                    num_gpus=num_gpus,
                    scheduling_strategy=scheduling_strategy,
                    **ray_remote_kwargs,
                )(RayWorkerWrapper).remote(  # type: ignore[attr-defined]
                    vllm_config=self.vllm_config, rpc_rank=rank
                )
            else:
                worker = ray.remote(
                    num_cpus=0,
                    num_gpus=0,
                    resources={current_platform.ray_device_key: num_gpus},
                    scheduling_strategy=scheduling_strategy,
                    **ray_remote_kwargs,
                )(RayWorkerWrapper).remote(  # type: ignore[attr-defined]
                    vllm_config=self.vllm_config, rpc_rank=rank
                )
            worker_metadata.append(RayWorkerMetaData(worker=worker, created_rank=rank))

        worker_ips = ray.get(
            [
                each.worker.get_node_ip.remote()  # type: ignore[attr-defined]
                for each in worker_metadata
            ]
        )

        for each, ip in zip(worker_metadata, worker_ips):
            each.ip = ip

        logger.debug("workers: %s", worker_metadata)
        logger.debug("driver_dummy_worker: %s", self.driver_dummy_worker)

        ip_counts: dict[str, int] = {}
        for ip in worker_ips:
            ip_counts[ip] = ip_counts.get(ip, 0) + 1

        def sort_by_driver_then_worker_ip(item: RayWorkerMetaData):
            """
            Sort the workers based on 3 properties:
            1. If the worker is on the same node as the driver (vllm engine),
                it should be placed first.
            2. Then, if the worker is on a node with fewer workers, it should
                be placed first.
            3. Finally, if the work is on a node with smaller IP address, it
                should be placed first.
            """
            ip = item.ip
            return 0 if ip == driver_ip else 1, ip_counts[ip], ip

        # After sorting, the workers on the same node will be
        # close to each other, and the workers on the driver
        # node will be placed first.
        sorted_worker_metadata = sorted(
            worker_metadata, key=sort_by_driver_then_worker_ip
        )
        for i, item in enumerate(sorted_worker_metadata):
            item.adjusted_rank = i
        self.workers = [item.worker for item in sorted_worker_metadata]
        rerank_mapping = {
            item.created_rank: item.adjusted_rank for item in sorted_worker_metadata
        }
        self.collective_rpc("adjust_rank", args=(rerank_mapping,))

        # Get the set of GPU IDs used on each node.
        worker_node_and_gpu_ids = []
        for worker in [self.driver_dummy_worker] + self.workers:
            if worker is None:
                # driver_dummy_worker can be None when using ray spmd worker.
                continue
            worker_node_and_gpu_ids.append(
                ray.get(worker.get_node_and_gpu_ids.remote())
            )  # type: ignore[attr-defined]

        node_workers = defaultdict(list)  # node id -> list of worker ranks
        node_gpus = defaultdict(list)  # node id -> list of gpu ids

        for i, (node_id, gpu_ids) in enumerate(worker_node_and_gpu_ids):
            node_workers[node_id].append(i)
            # `gpu_ids` can be a list of strings or integers.
            # convert them to integers for consistency.
            # NOTE: gpu_ids can be larger than 9 (e.g. 16 GPUs),
            # string sorting is not sufficient.
            # see https://github.com/vllm-project/vllm/issues/5590
            gpu_ids = [int(x) for x in gpu_ids]
            node_gpus[node_id].extend(gpu_ids)
        for node_id, gpu_ids in node_gpus.items():
            node_gpus[node_id] = sorted(gpu_ids)

        all_ips = set(worker_ips + [driver_ip])
        n_ips = len(all_ips)
        n_nodes = len(node_workers)

        if n_nodes != n_ips:
            raise RuntimeError(
                f"Every node should have a unique IP address. Got {n_nodes}"
                f" nodes with node ids {list(node_workers.keys())} and "
                f"{n_ips} unique IP addresses {all_ips}. Please check your"
                " network configuration. If you set `VLLM_HOST_IP`"
                " environment variable, make sure it is unique for"
                " each node."
            )

        # Set environment variables for the driver and workers.
        all_args_to_update_environment_variables = [
            {
                current_platform.device_control_env_var: ",".join(
                    map(str, node_gpus[node_id])
                ),
            }
            for (node_id, _) in worker_node_and_gpu_ids
        ]

        # Environment variables to copy from driver to workers
        env_vars_to_copy = get_env_vars_to_copy(
            exclude_vars=self.WORKER_SPECIFIC_ENV_VARS,
            additional_vars=set(current_platform.additional_env_vars).union(
                self.ADDITIONAL_ENV_VARS
            ),
            destination="workers",
        )

        # Copy existing env vars to each worker's args
        for args in all_args_to_update_environment_variables:
            # TODO: refactor platform-specific env vars
            for name in env_vars_to_copy:
                if name in os.environ:
                    args[name] = os.environ[name]

        self._env_vars_for_all_workers = all_args_to_update_environment_variables

        self.collective_rpc(
            "update_environment_variables", args=(self._get_env_vars_to_be_updated(),)
        )

        if len(node_gpus) == 1:
            # in single node case, we don't need to get the IP address.
            # the loopback address is sufficient
            # NOTE: a node may have several IP addresses, one for each
            # network interface. `get_ip()` might return any of them,
            # while they might not work for communication inside the node
            # if the network setup is complicated. Using the loopback address
            # solves this issue, as it always works for communication inside
            # the node.
            driver_ip = "127.0.0.1"
        distributed_init_method = get_distributed_init_method(
            driver_ip, get_open_port()
        )

        # Initialize the actual workers inside worker wrapper.
        all_kwargs = []
        for rank, (node_id, _) in enumerate(worker_node_and_gpu_ids):
            local_rank = node_workers[node_id].index(rank)
            kwargs = dict(
                vllm_config=self.vllm_config,
                local_rank=local_rank,
                rank=rank,
                distributed_init_method=distributed_init_method,
                is_driver_worker=(not self.parallel_config)
                or (rank % self.parallel_config.tensor_parallel_size == 0),
            )
            all_kwargs.append(kwargs)
        self.collective_rpc("init_worker", args=(all_kwargs,))

        self.collective_rpc("init_device")
        self.collective_rpc("load_model")

        for pp_rank in range(self.parallel_config.pipeline_parallel_size):
            self.pp_tp_workers.append([])
            for tp_rank in range(self.parallel_config.tensor_parallel_size):
                # PP=2, TP=4
                # pp_tp_workers = [[0, 1, 2, 3], [4, 5, 6, 7]]
                rank = (pp_rank * self.parallel_config.tensor_parallel_size) + tp_rank
                assert len(self.pp_tp_workers[pp_rank]) == tp_rank
                assert pp_rank < len(self.pp_tp_workers)
                self.pp_tp_workers[pp_rank].append(self.workers[rank])

    def reinitialize_distributed(
        self, reconfig_request: ReconfigureDistributedRequest
    ) -> None:
        self.collective_rpc("reinitialize_distributed", args=(reconfig_request,))
        if (
            reconfig_request.new_data_parallel_rank
            == ReconfigureRankType.SHUTDOWN_CURRENT_RANK
        ):
            self.shutdown()

    def execute_model(  # type: ignore[override]
        self, scheduler_output: SchedulerOutput, non_block: bool = False
    ) -> ModelRunnerOutput | Future[ModelRunnerOutput]:
        """Execute the model on the Ray workers.

        Args:
            scheduler_output: The scheduler output to execute.
            non_block: If True, the method will return a Future.

        Returns:
            The model runner output.
        """
        # Build the compiled DAG for the first time.
        if self.forward_dag is None:  # type: ignore
            self.forward_dag = self._compiled_ray_dag(enable_asyncio=False)

        refs = self.forward_dag.execute(scheduler_output)  # type: ignore

        if not self.has_connector:
            # Get output only from a single worker (output_rank)
            # When PP is not used, we block here until the result is available.
            if not non_block:
                return refs[0].get()

            # When PP is used, we return a FutureWrapper immediately so that
            # the scheduler can yield to the next batch.
            return FutureWrapper(refs)

        # Get output from all workers when connector is present
        assert self.kv_output_aggregator is not None
        if not non_block:
            # Block and get results from all workers
            outputs = [ref.get() for ref in refs]
            return self.kv_output_aggregator.aggregate(outputs)

        # Return a future that will aggregate outputs from all workers
        return FutureWrapper(refs, self.kv_output_aggregator)

    def collective_rpc(
        self,
        method: str | Callable,
        timeout: float | None = None,
        args: tuple = (),
        kwargs: dict[str, Any] | None = None,
        non_block: bool = False,
    ) -> list[Any]:
        """Runs the given method on all workers."""
        sent_method = method if isinstance(method, str) else cloudpickle.dumps(method)
        del method

        if kwargs is None:
            kwargs = {}
        ray_worker_outputs = [
            worker.execute_method.remote(  # type: ignore[attr-defined]
                sent_method, *args, **kwargs
            )
            for worker in self.workers
        ]

        # Get the results of the ray workers.
        if non_block:
            return [FutureWrapper((output,)) for output in ray_worker_outputs]

        return ray.get(ray_worker_outputs, timeout=timeout)

    def _check_ray_cgraph_installation(self):
        import importlib.metadata

        from packaging import version

        required_version = version.parse("2.43.0")
        current_version = version.parse(importlib.metadata.version("ray"))
        if current_version < required_version:
            raise ValueError(
                f"Ray version {required_version} is "
                f"required, but found {current_version}"
            )

        import importlib.util

        cgraph_spec = importlib.util.find_spec("ray.experimental.compiled_dag_ref")
        if cgraph_spec is None:
            raise ValueError(
                "Ray Compiled Graph is not installed. "
                "Run `pip install ray[cgraph]` to install it."
            )

        cupy_spec = importlib.util.find_spec("cupy")
        if cupy_spec is None and envs.VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE == "nccl":
            raise ValueError(
                "cupy is not installed but required since "
                "VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE is set to 'nccl'. "
                "Run `pip install ray[cgraph]` and check cupy installation."
            )

    def _compiled_ray_dag(self, enable_asyncio: bool):
        assert self.parallel_config.use_ray
        self._check_ray_cgraph_installation()
        # Enlarge the default value of "RAY_CGRAPH_get_timeout" to 300 seconds
        # (it is 10 seconds by default). This is a Ray environment variable to
        # control the timeout of getting result from a compiled graph execution,
        # i.e., the distributed execution that includes model forward runs and
        # intermediate tensor communications, in the case of vllm.
        # Note: we should set this env var before importing
        # ray.dag, otherwise it will not take effect.
        os.environ.setdefault("RAY_CGRAPH_get_timeout", "300")  # noqa: SIM112
        from ray.dag import InputNode, MultiOutputNode

        logger.info(
            "RAY_CGRAPH_get_timeout is set to %s",
            os.environ["RAY_CGRAPH_get_timeout"],  # noqa: SIM112
        )
        logger.info(
            "VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE = %s",
            envs.VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE,
        )
        logger.info(
            "VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM = %s",
            envs.VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM,
        )

        channel_type = envs.VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE
        if channel_type not in ("auto", "nccl", "shm"):
            raise ValueError(
                "Invalid value for VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE: "
                f"{channel_type}. Valid values are: 'auto', 'nccl', or 'shm'."
            )

        with InputNode() as input_data:
            # Example DAG: PP=2, TP=4
            #
            # SchedulerOutput -> 0 -> (SchedulerOutput, IntermediateTensors) -> 4 -> ModelRunnerOutput   # noqa: E501
            # SchedulerOutput -> 1 -> (SchedulerOutput, IntermediateTensors) -> 5 -> ModelRunnerOutput   # noqa: E501
            # SchedulerOutput -> 2 -> (SchedulerOutput, IntermediateTensors) -> 6 -> ModelRunnerOutput   # noqa: E501
            # SchedulerOutput -> 3 -> (SchedulerOutput, IntermediateTensors) -> 7 -> ModelRunnerOutput   # noqa: E501

            # All workers in the first TP group will take in the
            # ExecuteModelRequest as input.
            outputs = [input_data for _ in self.pp_tp_workers[0]]
            for pp_rank, tp_group in enumerate(self.pp_tp_workers):
                # Each PP worker takes in the output of the previous PP worker,
                # and the TP group executes in SPMD fashion.
                outputs = [
                    worker.execute_model_ray.bind(outputs[i])  # type: ignore[attr-defined]
                    for i, worker in enumerate(tp_group)
                ]

                last_pp_rank = len(self.pp_tp_workers) - 1
                if (
                    pp_rank < last_pp_rank
                    and envs.VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE != "shm"
                ):
                    # Specify how intermediate tensors should be passed
                    # between pp stages, no need to specify for the last
                    # pp stage or when using shared memory (the default).
                    transport = envs.VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE
                    outputs = [
                        output.with_tensor_transport(transport=transport)
                        for output in outputs
                    ]

            forward_dag = MultiOutputNode(outputs)

        if envs.VLLM_USE_RAY_WRAPPED_PP_COMM:
            from ray.experimental.channel.accelerator_context import (
                register_accelerator_context,
            )

            from vllm.distributed.device_communicators.ray_communicator import (
                RayPPCommunicator,
            )

            register_accelerator_context(
                torch_module_name="cuda", communicator_cls=RayPPCommunicator
            )
            logger.info(
                "Using RayPPCommunicator "
                "(which wraps vLLM _PP GroupCoordinator) "
                "for Ray Compiled Graph communication."
            )
        else:
            logger.info(
                "Using Ray's NCCL communicator for Ray Compiled Graph communication."
            )

        return forward_dag.experimental_compile(
            enable_asyncio=enable_asyncio,
            _overlap_gpu_communication=envs.VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM,
        )

    def __del__(self):
        self.shutdown()

    def check_health(self) -> None:
        # Assume that the Ray workers are healthy.
        # TODO: check the health of the Ray workers
        return

ADDITIONAL_ENV_VARS class-attribute instance-attribute

ADDITIONAL_ENV_VARS = {"HF_TOKEN", "HUGGING_FACE_HUB_TOKEN"}

WORKER_SPECIFIC_ENV_VARS class-attribute instance-attribute

WORKER_SPECIFIC_ENV_VARS = {
    "VLLM_HOST_IP",
    "VLLM_HOST_PORT",
    "LOCAL_RANK",
    "CUDA_VISIBLE_DEVICES",
}

max_concurrent_batches property

max_concurrent_batches: int

Ray distributed executor supports pipeline parallelism, meaning that it allows PP size batches to be executed concurrently.

supports_pp class-attribute instance-attribute

supports_pp: bool = True

uses_ray class-attribute instance-attribute

uses_ray: bool = True

__del__

__del__()
Source code in vllm/v1/executor/ray_executor.py
def __del__(self):
    self.shutdown()

_check_ray_cgraph_installation

_check_ray_cgraph_installation()
Source code in vllm/v1/executor/ray_executor.py
def _check_ray_cgraph_installation(self):
    import importlib.metadata

    from packaging import version

    required_version = version.parse("2.43.0")
    current_version = version.parse(importlib.metadata.version("ray"))
    if current_version < required_version:
        raise ValueError(
            f"Ray version {required_version} is "
            f"required, but found {current_version}"
        )

    import importlib.util

    cgraph_spec = importlib.util.find_spec("ray.experimental.compiled_dag_ref")
    if cgraph_spec is None:
        raise ValueError(
            "Ray Compiled Graph is not installed. "
            "Run `pip install ray[cgraph]` to install it."
        )

    cupy_spec = importlib.util.find_spec("cupy")
    if cupy_spec is None and envs.VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE == "nccl":
        raise ValueError(
            "cupy is not installed but required since "
            "VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE is set to 'nccl'. "
            "Run `pip install ray[cgraph]` and check cupy installation."
        )

_compiled_ray_dag

_compiled_ray_dag(enable_asyncio: bool)
Source code in vllm/v1/executor/ray_executor.py
def _compiled_ray_dag(self, enable_asyncio: bool):
    assert self.parallel_config.use_ray
    self._check_ray_cgraph_installation()
    # Enlarge the default value of "RAY_CGRAPH_get_timeout" to 300 seconds
    # (it is 10 seconds by default). This is a Ray environment variable to
    # control the timeout of getting result from a compiled graph execution,
    # i.e., the distributed execution that includes model forward runs and
    # intermediate tensor communications, in the case of vllm.
    # Note: we should set this env var before importing
    # ray.dag, otherwise it will not take effect.
    os.environ.setdefault("RAY_CGRAPH_get_timeout", "300")  # noqa: SIM112
    from ray.dag import InputNode, MultiOutputNode

    logger.info(
        "RAY_CGRAPH_get_timeout is set to %s",
        os.environ["RAY_CGRAPH_get_timeout"],  # noqa: SIM112
    )
    logger.info(
        "VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE = %s",
        envs.VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE,
    )
    logger.info(
        "VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM = %s",
        envs.VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM,
    )

    channel_type = envs.VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE
    if channel_type not in ("auto", "nccl", "shm"):
        raise ValueError(
            "Invalid value for VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE: "
            f"{channel_type}. Valid values are: 'auto', 'nccl', or 'shm'."
        )

    with InputNode() as input_data:
        # Example DAG: PP=2, TP=4
        #
        # SchedulerOutput -> 0 -> (SchedulerOutput, IntermediateTensors) -> 4 -> ModelRunnerOutput   # noqa: E501
        # SchedulerOutput -> 1 -> (SchedulerOutput, IntermediateTensors) -> 5 -> ModelRunnerOutput   # noqa: E501
        # SchedulerOutput -> 2 -> (SchedulerOutput, IntermediateTensors) -> 6 -> ModelRunnerOutput   # noqa: E501
        # SchedulerOutput -> 3 -> (SchedulerOutput, IntermediateTensors) -> 7 -> ModelRunnerOutput   # noqa: E501

        # All workers in the first TP group will take in the
        # ExecuteModelRequest as input.
        outputs = [input_data for _ in self.pp_tp_workers[0]]
        for pp_rank, tp_group in enumerate(self.pp_tp_workers):
            # Each PP worker takes in the output of the previous PP worker,
            # and the TP group executes in SPMD fashion.
            outputs = [
                worker.execute_model_ray.bind(outputs[i])  # type: ignore[attr-defined]
                for i, worker in enumerate(tp_group)
            ]

            last_pp_rank = len(self.pp_tp_workers) - 1
            if (
                pp_rank < last_pp_rank
                and envs.VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE != "shm"
            ):
                # Specify how intermediate tensors should be passed
                # between pp stages, no need to specify for the last
                # pp stage or when using shared memory (the default).
                transport = envs.VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE
                outputs = [
                    output.with_tensor_transport(transport=transport)
                    for output in outputs
                ]

        forward_dag = MultiOutputNode(outputs)

    if envs.VLLM_USE_RAY_WRAPPED_PP_COMM:
        from ray.experimental.channel.accelerator_context import (
            register_accelerator_context,
        )

        from vllm.distributed.device_communicators.ray_communicator import (
            RayPPCommunicator,
        )

        register_accelerator_context(
            torch_module_name="cuda", communicator_cls=RayPPCommunicator
        )
        logger.info(
            "Using RayPPCommunicator "
            "(which wraps vLLM _PP GroupCoordinator) "
            "for Ray Compiled Graph communication."
        )
    else:
        logger.info(
            "Using Ray's NCCL communicator for Ray Compiled Graph communication."
        )

    return forward_dag.experimental_compile(
        enable_asyncio=enable_asyncio,
        _overlap_gpu_communication=envs.VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM,
    )

_configure_ray_workers_use_nsight

_configure_ray_workers_use_nsight(
    ray_remote_kwargs,
) -> dict[str, Any]
Source code in vllm/v1/executor/ray_executor.py
def _configure_ray_workers_use_nsight(self, ray_remote_kwargs) -> dict[str, Any]:
    # If nsight profiling is enabled, we need to set the profiling
    # configuration for the ray workers as runtime env.
    runtime_env = ray_remote_kwargs.setdefault("runtime_env", {})
    runtime_env.update(
        {
            "nsight": {
                "t": "cuda,cudnn,cublas",
                "o": "'worker_process_%p'",
                "cuda-graph-trace": "node",
            }
        }
    )

    return ray_remote_kwargs

_get_env_vars_to_be_updated

_get_env_vars_to_be_updated()
Source code in vllm/v1/executor/ray_executor.py
def _get_env_vars_to_be_updated(self):
    return self._env_vars_for_all_workers

_init_executor

_init_executor() -> None
Source code in vllm/v1/executor/ray_executor.py
def _init_executor(self) -> None:
    self.forward_dag: ray.dag.CompiledDAG | None = None

    # For TPU or XPU, avoid compiling NVIDIA's NCCL
    if current_platform.is_tpu() or current_platform.is_xpu():
        os.environ["VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE"] = "shm"

    assert self.uses_ray
    initialize_ray_cluster(self.parallel_config)
    placement_group = self.parallel_config.placement_group

    # Disable Ray usage stats collection.
    ray_usage = os.environ.get("RAY_USAGE_STATS_ENABLED", "0")
    if ray_usage != "1":
        os.environ["RAY_USAGE_STATS_ENABLED"] = "0"

    # Create the parallel GPU workers.
    self._init_workers_ray(placement_group)

    # KV connector setup
    self.has_connector = self.vllm_config.kv_transfer_config is not None

_init_workers_ray

_init_workers_ray(
    placement_group: PlacementGroup, **ray_remote_kwargs
)
Source code in vllm/v1/executor/ray_executor.py
def _init_workers_ray(self, placement_group: "PlacementGroup", **ray_remote_kwargs):
    num_gpus = envs.VLLM_RAY_PER_WORKER_GPUS

    # The driver dummy worker does not actually use any resources.
    # It holds the resource for the driver worker.
    self.driver_dummy_worker: RayWorkerWrapper | None = None
    # The remaining workers are the actual ray actors.
    self.workers: list[RayWorkerWrapper] = []

    # Used in ray compiled DAG: indexed first by PP rank,
    # and then TP rank. In other words, the inner list is
    # the TP group of workers for a PP rank.
    self.pp_tp_workers: list[list[RayWorkerWrapper]] = []

    if self.parallel_config.ray_workers_use_nsight:
        ray_remote_kwargs = self._configure_ray_workers_use_nsight(
            ray_remote_kwargs
        )

    # Create the workers.
    bundle_indices: list[int]
    if envs.VLLM_RAY_BUNDLE_INDICES:
        # Use the bundle indices specified by the user.
        bundle_indices = list(map(int, envs.VLLM_RAY_BUNDLE_INDICES.split(",")))
        assert len(bundle_indices) == self.parallel_config.world_size, (
            "VLLM_RAY_BUNDLE_INDICES must have the same size"
            f" as the world size, but got {bundle_indices=} "
            f"and {self.parallel_config.world_size=}"
        )
        assert len(set(bundle_indices)) == len(bundle_indices), (
            "VLLM_RAY_BUNDLE_INDICES cannot have duplicate values,"
            f" but got {bundle_indices=}"
        )
    else:
        # use the first N bundles that have GPU resources.
        bundle_indices = []
        for bundle_id, bundle in enumerate(placement_group.bundle_specs):
            if bundle.get(current_platform.ray_device_key, 0):
                bundle_indices.append(bundle_id)
        bundle_indices = bundle_indices[: self.parallel_config.world_size]

    worker_metadata: list[RayWorkerMetaData] = []
    driver_ip = get_ip()
    for rank, bundle_id in enumerate(bundle_indices):
        scheduling_strategy = PlacementGroupSchedulingStrategy(
            placement_group=placement_group,
            placement_group_capture_child_tasks=True,
            placement_group_bundle_index=bundle_id,
        )

        if current_platform.ray_device_key == "GPU":
            # NV+AMD GPUs, and Intel XPUs
            worker = ray.remote(
                num_cpus=0,
                num_gpus=num_gpus,
                scheduling_strategy=scheduling_strategy,
                **ray_remote_kwargs,
            )(RayWorkerWrapper).remote(  # type: ignore[attr-defined]
                vllm_config=self.vllm_config, rpc_rank=rank
            )
        else:
            worker = ray.remote(
                num_cpus=0,
                num_gpus=0,
                resources={current_platform.ray_device_key: num_gpus},
                scheduling_strategy=scheduling_strategy,
                **ray_remote_kwargs,
            )(RayWorkerWrapper).remote(  # type: ignore[attr-defined]
                vllm_config=self.vllm_config, rpc_rank=rank
            )
        worker_metadata.append(RayWorkerMetaData(worker=worker, created_rank=rank))

    worker_ips = ray.get(
        [
            each.worker.get_node_ip.remote()  # type: ignore[attr-defined]
            for each in worker_metadata
        ]
    )

    for each, ip in zip(worker_metadata, worker_ips):
        each.ip = ip

    logger.debug("workers: %s", worker_metadata)
    logger.debug("driver_dummy_worker: %s", self.driver_dummy_worker)

    ip_counts: dict[str, int] = {}
    for ip in worker_ips:
        ip_counts[ip] = ip_counts.get(ip, 0) + 1

    def sort_by_driver_then_worker_ip(item: RayWorkerMetaData):
        """
        Sort the workers based on 3 properties:
        1. If the worker is on the same node as the driver (vllm engine),
            it should be placed first.
        2. Then, if the worker is on a node with fewer workers, it should
            be placed first.
        3. Finally, if the work is on a node with smaller IP address, it
            should be placed first.
        """
        ip = item.ip
        return 0 if ip == driver_ip else 1, ip_counts[ip], ip

    # After sorting, the workers on the same node will be
    # close to each other, and the workers on the driver
    # node will be placed first.
    sorted_worker_metadata = sorted(
        worker_metadata, key=sort_by_driver_then_worker_ip
    )
    for i, item in enumerate(sorted_worker_metadata):
        item.adjusted_rank = i
    self.workers = [item.worker for item in sorted_worker_metadata]
    rerank_mapping = {
        item.created_rank: item.adjusted_rank for item in sorted_worker_metadata
    }
    self.collective_rpc("adjust_rank", args=(rerank_mapping,))

    # Get the set of GPU IDs used on each node.
    worker_node_and_gpu_ids = []
    for worker in [self.driver_dummy_worker] + self.workers:
        if worker is None:
            # driver_dummy_worker can be None when using ray spmd worker.
            continue
        worker_node_and_gpu_ids.append(
            ray.get(worker.get_node_and_gpu_ids.remote())
        )  # type: ignore[attr-defined]

    node_workers = defaultdict(list)  # node id -> list of worker ranks
    node_gpus = defaultdict(list)  # node id -> list of gpu ids

    for i, (node_id, gpu_ids) in enumerate(worker_node_and_gpu_ids):
        node_workers[node_id].append(i)
        # `gpu_ids` can be a list of strings or integers.
        # convert them to integers for consistency.
        # NOTE: gpu_ids can be larger than 9 (e.g. 16 GPUs),
        # string sorting is not sufficient.
        # see https://github.com/vllm-project/vllm/issues/5590
        gpu_ids = [int(x) for x in gpu_ids]
        node_gpus[node_id].extend(gpu_ids)
    for node_id, gpu_ids in node_gpus.items():
        node_gpus[node_id] = sorted(gpu_ids)

    all_ips = set(worker_ips + [driver_ip])
    n_ips = len(all_ips)
    n_nodes = len(node_workers)

    if n_nodes != n_ips:
        raise RuntimeError(
            f"Every node should have a unique IP address. Got {n_nodes}"
            f" nodes with node ids {list(node_workers.keys())} and "
            f"{n_ips} unique IP addresses {all_ips}. Please check your"
            " network configuration. If you set `VLLM_HOST_IP`"
            " environment variable, make sure it is unique for"
            " each node."
        )

    # Set environment variables for the driver and workers.
    all_args_to_update_environment_variables = [
        {
            current_platform.device_control_env_var: ",".join(
                map(str, node_gpus[node_id])
            ),
        }
        for (node_id, _) in worker_node_and_gpu_ids
    ]

    # Environment variables to copy from driver to workers
    env_vars_to_copy = get_env_vars_to_copy(
        exclude_vars=self.WORKER_SPECIFIC_ENV_VARS,
        additional_vars=set(current_platform.additional_env_vars).union(
            self.ADDITIONAL_ENV_VARS
        ),
        destination="workers",
    )

    # Copy existing env vars to each worker's args
    for args in all_args_to_update_environment_variables:
        # TODO: refactor platform-specific env vars
        for name in env_vars_to_copy:
            if name in os.environ:
                args[name] = os.environ[name]

    self._env_vars_for_all_workers = all_args_to_update_environment_variables

    self.collective_rpc(
        "update_environment_variables", args=(self._get_env_vars_to_be_updated(),)
    )

    if len(node_gpus) == 1:
        # in single node case, we don't need to get the IP address.
        # the loopback address is sufficient
        # NOTE: a node may have several IP addresses, one for each
        # network interface. `get_ip()` might return any of them,
        # while they might not work for communication inside the node
        # if the network setup is complicated. Using the loopback address
        # solves this issue, as it always works for communication inside
        # the node.
        driver_ip = "127.0.0.1"
    distributed_init_method = get_distributed_init_method(
        driver_ip, get_open_port()
    )

    # Initialize the actual workers inside worker wrapper.
    all_kwargs = []
    for rank, (node_id, _) in enumerate(worker_node_and_gpu_ids):
        local_rank = node_workers[node_id].index(rank)
        kwargs = dict(
            vllm_config=self.vllm_config,
            local_rank=local_rank,
            rank=rank,
            distributed_init_method=distributed_init_method,
            is_driver_worker=(not self.parallel_config)
            or (rank % self.parallel_config.tensor_parallel_size == 0),
        )
        all_kwargs.append(kwargs)
    self.collective_rpc("init_worker", args=(all_kwargs,))

    self.collective_rpc("init_device")
    self.collective_rpc("load_model")

    for pp_rank in range(self.parallel_config.pipeline_parallel_size):
        self.pp_tp_workers.append([])
        for tp_rank in range(self.parallel_config.tensor_parallel_size):
            # PP=2, TP=4
            # pp_tp_workers = [[0, 1, 2, 3], [4, 5, 6, 7]]
            rank = (pp_rank * self.parallel_config.tensor_parallel_size) + tp_rank
            assert len(self.pp_tp_workers[pp_rank]) == tp_rank
            assert pp_rank < len(self.pp_tp_workers)
            self.pp_tp_workers[pp_rank].append(self.workers[rank])

check_health

check_health() -> None
Source code in vllm/v1/executor/ray_executor.py
def check_health(self) -> None:
    # Assume that the Ray workers are healthy.
    # TODO: check the health of the Ray workers
    return

collective_rpc

collective_rpc(
    method: str | Callable,
    timeout: float | None = None,
    args: tuple = (),
    kwargs: dict[str, Any] | None = None,
    non_block: bool = False,
) -> list[Any]

Runs the given method on all workers.

Source code in vllm/v1/executor/ray_executor.py
def collective_rpc(
    self,
    method: str | Callable,
    timeout: float | None = None,
    args: tuple = (),
    kwargs: dict[str, Any] | None = None,
    non_block: bool = False,
) -> list[Any]:
    """Runs the given method on all workers."""
    sent_method = method if isinstance(method, str) else cloudpickle.dumps(method)
    del method

    if kwargs is None:
        kwargs = {}
    ray_worker_outputs = [
        worker.execute_method.remote(  # type: ignore[attr-defined]
            sent_method, *args, **kwargs
        )
        for worker in self.workers
    ]

    # Get the results of the ray workers.
    if non_block:
        return [FutureWrapper((output,)) for output in ray_worker_outputs]

    return ray.get(ray_worker_outputs, timeout=timeout)

execute_model

execute_model(
    scheduler_output: SchedulerOutput,
    non_block: bool = False,
) -> ModelRunnerOutput | Future[ModelRunnerOutput]

Execute the model on the Ray workers.

Parameters:

Name Type Description Default
scheduler_output SchedulerOutput

The scheduler output to execute.

required
non_block bool

If True, the method will return a Future.

False

Returns:

Type Description
ModelRunnerOutput | Future[ModelRunnerOutput]

The model runner output.

Source code in vllm/v1/executor/ray_executor.py
def execute_model(  # type: ignore[override]
    self, scheduler_output: SchedulerOutput, non_block: bool = False
) -> ModelRunnerOutput | Future[ModelRunnerOutput]:
    """Execute the model on the Ray workers.

    Args:
        scheduler_output: The scheduler output to execute.
        non_block: If True, the method will return a Future.

    Returns:
        The model runner output.
    """
    # Build the compiled DAG for the first time.
    if self.forward_dag is None:  # type: ignore
        self.forward_dag = self._compiled_ray_dag(enable_asyncio=False)

    refs = self.forward_dag.execute(scheduler_output)  # type: ignore

    if not self.has_connector:
        # Get output only from a single worker (output_rank)
        # When PP is not used, we block here until the result is available.
        if not non_block:
            return refs[0].get()

        # When PP is used, we return a FutureWrapper immediately so that
        # the scheduler can yield to the next batch.
        return FutureWrapper(refs)

    # Get output from all workers when connector is present
    assert self.kv_output_aggregator is not None
    if not non_block:
        # Block and get results from all workers
        outputs = [ref.get() for ref in refs]
        return self.kv_output_aggregator.aggregate(outputs)

    # Return a future that will aggregate outputs from all workers
    return FutureWrapper(refs, self.kv_output_aggregator)

reinitialize_distributed

reinitialize_distributed(
    reconfig_request: ReconfigureDistributedRequest,
) -> None
Source code in vllm/v1/executor/ray_executor.py
def reinitialize_distributed(
    self, reconfig_request: ReconfigureDistributedRequest
) -> None:
    self.collective_rpc("reinitialize_distributed", args=(reconfig_request,))
    if (
        reconfig_request.new_data_parallel_rank
        == ReconfigureRankType.SHUTDOWN_CURRENT_RANK
    ):
        self.shutdown()

shutdown

shutdown() -> None
Source code in vllm/v1/executor/ray_executor.py
def shutdown(self) -> None:
    if logger:
        # Somehow logger can be None here.
        logger.info(
            "Shutting down Ray distributed executor. If you see error log "
            "from logging.cc regarding SIGTERM received, please ignore "
            "because this is the expected termination process in Ray."
        )
    if hasattr(self, "forward_dag") and self.forward_dag is not None:
        self.forward_dag.teardown()
        import ray

        for worker in self.workers:
            ray.kill(worker)
        self.forward_dag = None

RayWorkerMetaData dataclass

Metadata for a Ray worker. The order of ray worker creation can be random, and we need to reset the rank after creating all workers.

Source code in vllm/v1/executor/ray_executor.py
@dataclass
class RayWorkerMetaData:
    """
    Metadata for a Ray worker.
    The order of ray worker creation can be random,
    and we need to reset the rank after creating all workers.
    """

    worker: ActorHandle
    created_rank: int
    adjusted_rank: int = -1
    ip: str = ""

adjusted_rank class-attribute instance-attribute

adjusted_rank: int = -1

created_rank instance-attribute

created_rank: int

ip class-attribute instance-attribute

ip: str = ''

worker instance-attribute

worker: ActorHandle

__init__

__init__(
    worker: ActorHandle,
    created_rank: int,
    adjusted_rank: int = -1,
    ip: str = "",
) -> None