Skip to content

vllm.model_executor.models.transformers.legacy

Transformers backend mixin for legacy models.

LegacyMixin

Source code in vllm/model_executor/models/transformers/legacy.py
class LegacyMixin:
    hf_to_vllm_mapper = WeightsMapper(
        # These are applied in order, so the order matters!
        orig_to_new_prefix={
            # Handle BERT-like models
            "roberta": "model",
            "bert": "model",
            # Add `model.` prefix for base model checkpoints
            "": "model.",
            # Remove `model.` prefix if it was already there
            "model.model.": "model.",
            # Classifier/scoring heads will be adjacent to `model`
            "model.score": "classifier",
            "model.classifier": "classifier",
        },
        orig_to_new_suffix={
            # Replace legacy suffixes used for norms
            ".gamma": ".weight",
            ".beta": ".bias",
        },
    )

    def __init__(self, *, vllm_config: "VllmConfig", prefix: str = ""):
        super().__init__(vllm_config=vllm_config, prefix=prefix)

        # Skip unsupported/unwanted output embeddings layers
        self.skip_prefixes.extend(
            [
                "model.lm_head.",
                "model.predictions.",
                "model.qa_outputs.",
                "model.embeddings_project.",
                "model.discriminator_predictions.",
            ]
        )

        # Some encoder models have the position_ids buffer in the checkpoint.
        # vLLM will always pass position_ids as an argument, so we skip loading
        # the buffer if it exists
        self.skip_substrs.append("position_ids")

        # Some encoder models have the bias of the final classifier layer
        # in the checkpoint. vLLM does not use this bias, so we skip loading
        # it if it exists
        self.skip_substrs.append("score.bias")

        # roberta-like models an extra padding in positions.
        # FIXME(Isotr0py): This is quite hacky for roberta edge case,
        # we should find a better way to handle this.
        self.is_roberta = "roberta" in self.text_config.model_type
        self.padding_idx = self.text_config.pad_token_id

    def forward(
        self,
        input_ids: torch.Tensor | None,
        positions: torch.Tensor,
        intermediate_tensors: IntermediateTensors | None = None,
        inputs_embeds: torch.Tensor | None = None,
    ) -> torch.Tensor | IntermediateTensors:
        if self.is_roberta:
            # RoBERTa-specific positions padding
            positions += self.padding_idx + 1
        return super().forward(
            input_ids=input_ids,
            positions=positions,
            intermediate_tensors=intermediate_tensors,
            inputs_embeds=inputs_embeds,
        )

hf_to_vllm_mapper class-attribute instance-attribute

hf_to_vllm_mapper = WeightsMapper(
    orig_to_new_prefix={
        "roberta": "model",
        "bert": "model",
        "": "model.",
        "model.model.": "model.",
        "model.score": "classifier",
        "model.classifier": "classifier",
    },
    orig_to_new_suffix={
        ".gamma": ".weight",
        ".beta": ".bias",
    },
)

is_roberta instance-attribute

is_roberta = 'roberta' in model_type

padding_idx instance-attribute

padding_idx = pad_token_id

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/transformers/legacy.py
def __init__(self, *, vllm_config: "VllmConfig", prefix: str = ""):
    super().__init__(vllm_config=vllm_config, prefix=prefix)

    # Skip unsupported/unwanted output embeddings layers
    self.skip_prefixes.extend(
        [
            "model.lm_head.",
            "model.predictions.",
            "model.qa_outputs.",
            "model.embeddings_project.",
            "model.discriminator_predictions.",
        ]
    )

    # Some encoder models have the position_ids buffer in the checkpoint.
    # vLLM will always pass position_ids as an argument, so we skip loading
    # the buffer if it exists
    self.skip_substrs.append("position_ids")

    # Some encoder models have the bias of the final classifier layer
    # in the checkpoint. vLLM does not use this bias, so we skip loading
    # it if it exists
    self.skip_substrs.append("score.bias")

    # roberta-like models an extra padding in positions.
    # FIXME(Isotr0py): This is quite hacky for roberta edge case,
    # we should find a better way to handle this.
    self.is_roberta = "roberta" in self.text_config.model_type
    self.padding_idx = self.text_config.pad_token_id

forward

forward(
    input_ids: Tensor | None,
    positions: Tensor,
    intermediate_tensors: IntermediateTensors | None = None,
    inputs_embeds: Tensor | None = None,
) -> Tensor | IntermediateTensors
Source code in vllm/model_executor/models/transformers/legacy.py
def forward(
    self,
    input_ids: torch.Tensor | None,
    positions: torch.Tensor,
    intermediate_tensors: IntermediateTensors | None = None,
    inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor | IntermediateTensors:
    if self.is_roberta:
        # RoBERTa-specific positions padding
        positions += self.padding_idx + 1
    return super().forward(
        input_ids=input_ids,
        positions=positions,
        intermediate_tensors=intermediate_tensors,
        inputs_embeds=inputs_embeds,
    )