Skip to content

vllm.v1.engine.mm_input_cache

MultiModalInputCacheClient

Used by P0 to check whether multi-modal kwargs are cached in P1.

Source code in vllm/v1/engine/mm_input_cache.py
class MultiModalInputCacheClient:
    """Used by P0 to check whether multi-modal kwargs are cached in P1."""

    def __init__(self, model_config: "ModelConfig",
                 mm_registry: MultiModalRegistry) -> None:
        super().__init__()

        self.enabled = mm_registry.enable_mm_input_cache(model_config)
        self.mm_cache = MultiModalCache.get_lru_cache(
            model_config.get_mm_input_cache_gb(),
            MultiModalCacheItemMetadata,
        )

    def get_and_update(
        self,
        mm_inputs: Sequence[MultiModalKwargs],
        mm_hashes: list[str],
    ) -> Sequence[Optional[MultiModalKwargs]]:
        assert len(mm_inputs) == len(mm_hashes)

        if not self.enabled:
            assert is_list_of(mm_inputs, MultiModalKwargs)
            return mm_inputs

        full_mm_inputs = list[Optional[MultiModalKwargs]]()
        for mm_input, mm_hash in zip(mm_inputs, mm_hashes):
            if self.mm_cache.get(mm_hash) is not None:
                mm_input = None
            else:
                self.mm_cache[mm_hash] = \
                    MultiModalCacheItemMetadata.wraps(mm_input)

            full_mm_inputs.append(mm_input)

        return full_mm_inputs

    def reset(self) -> None:
        self.mm_cache.clear()

enabled instance-attribute

enabled = enable_mm_input_cache(model_config)

mm_cache instance-attribute

mm_cache = get_lru_cache(
    get_mm_input_cache_gb(), MultiModalCacheItemMetadata
)

__init__

__init__(
    model_config: ModelConfig,
    mm_registry: MultiModalRegistry,
) -> None
Source code in vllm/v1/engine/mm_input_cache.py
def __init__(self, model_config: "ModelConfig",
             mm_registry: MultiModalRegistry) -> None:
    super().__init__()

    self.enabled = mm_registry.enable_mm_input_cache(model_config)
    self.mm_cache = MultiModalCache.get_lru_cache(
        model_config.get_mm_input_cache_gb(),
        MultiModalCacheItemMetadata,
    )

get_and_update

get_and_update(
    mm_inputs: Sequence[MultiModalKwargs],
    mm_hashes: list[str],
) -> Sequence[Optional[MultiModalKwargs]]
Source code in vllm/v1/engine/mm_input_cache.py
def get_and_update(
    self,
    mm_inputs: Sequence[MultiModalKwargs],
    mm_hashes: list[str],
) -> Sequence[Optional[MultiModalKwargs]]:
    assert len(mm_inputs) == len(mm_hashes)

    if not self.enabled:
        assert is_list_of(mm_inputs, MultiModalKwargs)
        return mm_inputs

    full_mm_inputs = list[Optional[MultiModalKwargs]]()
    for mm_input, mm_hash in zip(mm_inputs, mm_hashes):
        if self.mm_cache.get(mm_hash) is not None:
            mm_input = None
        else:
            self.mm_cache[mm_hash] = \
                MultiModalCacheItemMetadata.wraps(mm_input)

        full_mm_inputs.append(mm_input)

    return full_mm_inputs

reset

reset() -> None
Source code in vllm/v1/engine/mm_input_cache.py
def reset(self) -> None:
    self.mm_cache.clear()

MultiModalInputCacheServer

Used by P1 to avoid requiring past multi-modal kwargs from P0.

Source code in vllm/v1/engine/mm_input_cache.py
class MultiModalInputCacheServer:
    """Used by P1 to avoid requiring past multi-modal kwargs from P0."""

    def __init__(self, model_config: "ModelConfig",
                 mm_registry: MultiModalRegistry) -> None:
        super().__init__()

        self.enabled = mm_registry.enable_mm_input_cache(model_config)
        self.mm_cache = MultiModalCache.get_lru_cache(
            model_config.get_mm_input_cache_gb(),
            MultiModalKwargs,
        )

    def get_and_update(
        self,
        mm_inputs: Sequence[Optional[MultiModalKwargs]],
        mm_hashes: list[str],
    ) -> Sequence[MultiModalKwargs]:
        assert len(mm_inputs) == len(mm_hashes)

        if not self.enabled:
            assert is_list_of(mm_inputs, MultiModalKwargs)
            return mm_inputs

        full_mm_inputs = list[MultiModalKwargs]()
        for mm_input, mm_hash in zip(mm_inputs, mm_hashes):
            if mm_input is None:
                mm_input = self.mm_cache[mm_hash]
            else:
                self.mm_cache[mm_hash] = mm_input

            full_mm_inputs.append(mm_input)

        return full_mm_inputs

    def reset(self) -> None:
        self.mm_cache.clear()

enabled instance-attribute

enabled = enable_mm_input_cache(model_config)

mm_cache instance-attribute

mm_cache = get_lru_cache(
    get_mm_input_cache_gb(), MultiModalKwargs
)

__init__

__init__(
    model_config: ModelConfig,
    mm_registry: MultiModalRegistry,
) -> None
Source code in vllm/v1/engine/mm_input_cache.py
def __init__(self, model_config: "ModelConfig",
             mm_registry: MultiModalRegistry) -> None:
    super().__init__()

    self.enabled = mm_registry.enable_mm_input_cache(model_config)
    self.mm_cache = MultiModalCache.get_lru_cache(
        model_config.get_mm_input_cache_gb(),
        MultiModalKwargs,
    )

get_and_update

get_and_update(
    mm_inputs: Sequence[Optional[MultiModalKwargs]],
    mm_hashes: list[str],
) -> Sequence[MultiModalKwargs]
Source code in vllm/v1/engine/mm_input_cache.py
def get_and_update(
    self,
    mm_inputs: Sequence[Optional[MultiModalKwargs]],
    mm_hashes: list[str],
) -> Sequence[MultiModalKwargs]:
    assert len(mm_inputs) == len(mm_hashes)

    if not self.enabled:
        assert is_list_of(mm_inputs, MultiModalKwargs)
        return mm_inputs

    full_mm_inputs = list[MultiModalKwargs]()
    for mm_input, mm_hash in zip(mm_inputs, mm_hashes):
        if mm_input is None:
            mm_input = self.mm_cache[mm_hash]
        else:
            self.mm_cache[mm_hash] = mm_input

        full_mm_inputs.append(mm_input)

    return full_mm_inputs

reset

reset() -> None
Source code in vllm/v1/engine/mm_input_cache.py
def reset(self) -> None:
    self.mm_cache.clear()