Skip to content

vllm.model_executor.models.transformers.pooling

Transformers backend mixins for pooling models.

EmbeddingMixin

Bases: VllmModelForPooling

Source code in vllm/model_executor/models/transformers/pooling.py
class EmbeddingMixin(VllmModelForPooling):
    default_pooling_type = "CLS"

    def __init__(self, *, vllm_config: "VllmConfig", prefix: str = ""):
        # Skip VllmModelForPooling.__init__ and call the next class in MRO
        super(VllmModelForPooling, self).__init__(
            vllm_config=vllm_config, prefix=prefix
        )

        pooler_config = vllm_config.model_config.pooler_config
        assert pooler_config is not None

        self.pooler = DispatchPooler(
            {
                "token_embed": Pooler.for_token_embed(pooler_config),
                "embed": Pooler.for_embed(pooler_config),
            }
        )

default_pooling_type class-attribute instance-attribute

default_pooling_type = 'CLS'

pooler instance-attribute

pooler = DispatchPooler(
    {
        "token_embed": for_token_embed(pooler_config),
        "embed": for_embed(pooler_config),
    }
)

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/transformers/pooling.py
def __init__(self, *, vllm_config: "VllmConfig", prefix: str = ""):
    # Skip VllmModelForPooling.__init__ and call the next class in MRO
    super(VllmModelForPooling, self).__init__(
        vllm_config=vllm_config, prefix=prefix
    )

    pooler_config = vllm_config.model_config.pooler_config
    assert pooler_config is not None

    self.pooler = DispatchPooler(
        {
            "token_embed": Pooler.for_token_embed(pooler_config),
            "embed": Pooler.for_embed(pooler_config),
        }
    )

SequenceClassificationMixin

Bases: SupportsCrossEncoding, VllmModelForPooling

Source code in vllm/model_executor/models/transformers/pooling.py
class SequenceClassificationMixin(SupportsCrossEncoding, VllmModelForPooling):
    default_pooling_type = "CLS"

    def __init__(self, *, vllm_config: "VllmConfig", prefix: str = ""):
        # Skip VllmModelForPooling.__init__ and call the next class in MRO
        super(VllmModelForPooling, self).__init__(
            vllm_config=vllm_config, prefix=prefix
        )

        pooler_config = vllm_config.model_config.pooler_config
        assert pooler_config is not None

        # Certain information about the the model and classifier can only be
        # inferred from the `ForSequenceClassification` class. Therefore, we
        # instantiate it on the "meta" device to avoid allocating GPU memory.
        with torch.device("meta"):
            seq_cls_model = AutoModelForSequenceClassification.from_config(
                self.config,
                dtype=self.model_config.dtype,
                trust_remote_code=self.model_config.trust_remote_code,
            )

        # When used for sequence classification, some models have their
        # pooling layers removed. Make sure this is reflected in vLLM.
        for module in seq_cls_model.modules():
            if hasattr(module, "pooler") and module.pooler is None:
                self.model.pooler = None
                break
        if self.model.pooler is not None:
            raise ValueError(
                "Sequence classification models with pooling layers are not "
                "supported yet in the Transformers backend."
            )

        # Unlike `lm_head`, `classifier` is not always `nn.Linear`.
        self.classifier = seq_cls_model.classifier
        self.init_parameters(self.classifier, dtype=self.model_config.head_dtype)

        class ClassifierWithReshape(self.classifier.__class__):
            """CLSPool has already been applied in `pooling`.
            Add dim to match expected input shape of `classifier.forward`."""

            def forward(self, *args, **kwargs):
                if len(args) > 0:
                    args = (args[0].unsqueeze(1), *args[1:])
                return super().forward(*args, **kwargs)

        self.classifier.__class__ = ClassifierWithReshape

        self.pooler = DispatchPooler(
            {
                "token_classify": Pooler.for_token_classify(
                    pooler_config, classifier=self.classifier
                ),
                "classify": ClassifierPooler(
                    pooling=CLSPool(), classifier=self.classifier, act_fn="classify"
                ),
                "score": ClassifierPooler(
                    pooling=CLSPool(), classifier=self.classifier, act_fn="score"
                ),
            }
        )

classifier instance-attribute

classifier = classifier

default_pooling_type class-attribute instance-attribute

default_pooling_type = 'CLS'

pooler instance-attribute

pooler = DispatchPooler(
    {
        "token_classify": for_token_classify(
            pooler_config, classifier=classifier
        ),
        "classify": ClassifierPooler(
            pooling=CLSPool(),
            classifier=classifier,
            act_fn="classify",
        ),
        "score": ClassifierPooler(
            pooling=CLSPool(),
            classifier=classifier,
            act_fn="score",
        ),
    }
)

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/transformers/pooling.py
def __init__(self, *, vllm_config: "VllmConfig", prefix: str = ""):
    # Skip VllmModelForPooling.__init__ and call the next class in MRO
    super(VllmModelForPooling, self).__init__(
        vllm_config=vllm_config, prefix=prefix
    )

    pooler_config = vllm_config.model_config.pooler_config
    assert pooler_config is not None

    # Certain information about the the model and classifier can only be
    # inferred from the `ForSequenceClassification` class. Therefore, we
    # instantiate it on the "meta" device to avoid allocating GPU memory.
    with torch.device("meta"):
        seq_cls_model = AutoModelForSequenceClassification.from_config(
            self.config,
            dtype=self.model_config.dtype,
            trust_remote_code=self.model_config.trust_remote_code,
        )

    # When used for sequence classification, some models have their
    # pooling layers removed. Make sure this is reflected in vLLM.
    for module in seq_cls_model.modules():
        if hasattr(module, "pooler") and module.pooler is None:
            self.model.pooler = None
            break
    if self.model.pooler is not None:
        raise ValueError(
            "Sequence classification models with pooling layers are not "
            "supported yet in the Transformers backend."
        )

    # Unlike `lm_head`, `classifier` is not always `nn.Linear`.
    self.classifier = seq_cls_model.classifier
    self.init_parameters(self.classifier, dtype=self.model_config.head_dtype)

    class ClassifierWithReshape(self.classifier.__class__):
        """CLSPool has already been applied in `pooling`.
        Add dim to match expected input shape of `classifier.forward`."""

        def forward(self, *args, **kwargs):
            if len(args) > 0:
                args = (args[0].unsqueeze(1), *args[1:])
            return super().forward(*args, **kwargs)

    self.classifier.__class__ = ClassifierWithReshape

    self.pooler = DispatchPooler(
        {
            "token_classify": Pooler.for_token_classify(
                pooler_config, classifier=self.classifier
            ),
            "classify": ClassifierPooler(
                pooling=CLSPool(), classifier=self.classifier, act_fn="classify"
            ),
            "score": ClassifierPooler(
                pooling=CLSPool(), classifier=self.classifier, act_fn="score"
            ),
        }
    )