Skip to content

vllm.entrypoints.anthropic.protocol

Pydantic models for Anthropic API protocol

AnthropicContentBlock

Bases: BaseModel

Content block in message

Source code in vllm/entrypoints/anthropic/protocol.py
class AnthropicContentBlock(BaseModel):
    """Content block in message"""

    type: Literal["text", "image", "tool_use", "tool_result"]
    text: str | None = None
    # For image content
    source: dict[str, Any] | None = None
    # For tool use/result
    id: str | None = None
    name: str | None = None
    input: dict[str, Any] | None = None
    content: str | list[dict[str, Any]] | None = None
    is_error: bool | None = None

AnthropicDelta

Bases: BaseModel

Delta for streaming responses

Source code in vllm/entrypoints/anthropic/protocol.py
class AnthropicDelta(BaseModel):
    """Delta for streaming responses"""

    type: Literal["text_delta", "input_json_delta"] | None = None
    text: str | None = None
    partial_json: str | None = None

    # Message delta
    stop_reason: (
        Literal["end_turn", "max_tokens", "stop_sequence", "tool_use"] | None
    ) = None
    stop_sequence: str | None = None

AnthropicError

Bases: BaseModel

Error structure for Anthropic API

Source code in vllm/entrypoints/anthropic/protocol.py
class AnthropicError(BaseModel):
    """Error structure for Anthropic API"""

    type: str
    message: str

AnthropicErrorResponse

Bases: BaseModel

Error response structure for Anthropic API

Source code in vllm/entrypoints/anthropic/protocol.py
class AnthropicErrorResponse(BaseModel):
    """Error response structure for Anthropic API"""

    type: Literal["error"] = "error"
    error: AnthropicError

AnthropicMessage

Bases: BaseModel

Message structure

Source code in vllm/entrypoints/anthropic/protocol.py
class AnthropicMessage(BaseModel):
    """Message structure"""

    role: Literal["user", "assistant"]
    content: str | list[AnthropicContentBlock]

AnthropicMessagesRequest

Bases: BaseModel

Anthropic Messages API request

Source code in vllm/entrypoints/anthropic/protocol.py
class AnthropicMessagesRequest(BaseModel):
    """Anthropic Messages API request"""

    model: str
    messages: list[AnthropicMessage]
    max_tokens: int
    metadata: dict[str, Any] | None = None
    stop_sequences: list[str] | None = None
    stream: bool | None = False
    system: str | list[AnthropicContentBlock] | None = None
    temperature: float | None = None
    tool_choice: AnthropicToolChoice | None = None
    tools: list[AnthropicTool] | None = None
    top_k: int | None = None
    top_p: float | None = None

    @field_validator("model")
    @classmethod
    def validate_model(cls, v):
        if not v:
            raise ValueError("Model is required")
        return v

    @field_validator("max_tokens")
    @classmethod
    def validate_max_tokens(cls, v):
        if v <= 0:
            raise ValueError("max_tokens must be positive")
        return v

AnthropicMessagesResponse

Bases: BaseModel

Anthropic Messages API response

Source code in vllm/entrypoints/anthropic/protocol.py
class AnthropicMessagesResponse(BaseModel):
    """Anthropic Messages API response"""

    id: str
    type: Literal["message"] = "message"
    role: Literal["assistant"] = "assistant"
    content: list[AnthropicContentBlock]
    model: str
    stop_reason: (
        Literal["end_turn", "max_tokens", "stop_sequence", "tool_use"] | None
    ) = None
    stop_sequence: str | None = None
    usage: AnthropicUsage | None = None

    def model_post_init(self, __context):
        if not self.id:
            self.id = f"msg_{int(time.time() * 1000)}"

AnthropicStreamEvent

Bases: BaseModel

Streaming event

Source code in vllm/entrypoints/anthropic/protocol.py
class AnthropicStreamEvent(BaseModel):
    """Streaming event"""

    type: Literal[
        "message_start",
        "message_delta",
        "message_stop",
        "content_block_start",
        "content_block_delta",
        "content_block_stop",
        "ping",
        "error",
    ]
    message: "AnthropicMessagesResponse | None" = None
    delta: AnthropicDelta | None = None
    content_block: AnthropicContentBlock | None = None
    index: int | None = None
    error: AnthropicError | None = None
    usage: AnthropicUsage | None = None

AnthropicTool

Bases: BaseModel

Tool definition

Source code in vllm/entrypoints/anthropic/protocol.py
class AnthropicTool(BaseModel):
    """Tool definition"""

    name: str
    description: str | None = None
    input_schema: dict[str, Any]

    @field_validator("input_schema")
    @classmethod
    def validate_input_schema(cls, v):
        if not isinstance(v, dict):
            raise ValueError("input_schema must be a dictionary")
        if "type" not in v:
            v["type"] = "object"  # Default to object type
        return v

AnthropicToolChoice

Bases: BaseModel

Tool Choice definition

Source code in vllm/entrypoints/anthropic/protocol.py
class AnthropicToolChoice(BaseModel):
    """Tool Choice definition"""

    type: Literal["auto", "any", "tool"]
    name: str | None = None

AnthropicUsage

Bases: BaseModel

Token usage information

Source code in vllm/entrypoints/anthropic/protocol.py
class AnthropicUsage(BaseModel):
    """Token usage information"""

    input_tokens: int
    output_tokens: int
    cache_creation_input_tokens: int | None = None
    cache_read_input_tokens: int | None = None