Skip to content

vllm.entrypoints.openai.generate.api_router

_check_chat_template_available

_check_chat_template_available(
    engine_client: EngineClient, chat_template: str | None
) -> bool

Check if a chat template can be resolved for this model.

Returns:

Type Description
bool

True if chat template is available, False otherwise.

Source code in vllm/entrypoints/openai/generate/api_router.py
def _check_chat_template_available(
    engine_client: "EngineClient",
    chat_template: str | None,
) -> bool:
    """
    Check if a chat template can be resolved for this model.

    Returns:
        True if chat template is available, False otherwise.
    """
    if engine_client.renderer.tokenizer is None:
        # If tokenizer is not initialized, chat template is not available.
        return False

    from vllm.entrypoints.logger import init_logger
    from vllm.renderers.hf import resolve_chat_template

    logger = init_logger(__name__)
    model_name = engine_client.model_config.model

    try:
        resolved = resolve_chat_template(
            tokenizer=engine_client.renderer.tokenizer,
            chat_template=chat_template,
            tools=None,
            model_config=engine_client.model_config,
        )

        if resolved is not None:
            return True

        # Normal case: base models without chat templates
        logger.info(
            "No chat template found for model %s. "
            "The /v1/chat/completions endpoint will be disabled. "
            "Use /v1/completions for this model, or provide a chat template "
            "via --chat-template to enable chat completions.",
            model_name,
        )
        return False

    except Exception:
        # Unexpected errors during template resolution
        logger.exception(
            "Error checking chat template availability for model %s. "
            "The /v1/chat/completions endpoint will be disabled.",
            model_name,
        )
        return False