Files
evo-ai/.venv/lib/python3.10/site-packages/litellm/llms/anthropic/cost_calculation.py
2025-04-25 15:30:54 -03:00

26 lines
763 B
Python

"""
Helper util for handling anthropic-specific cost calculation
- e.g.: prompt caching
"""
from typing import Tuple
from litellm.litellm_core_utils.llm_cost_calc.utils import generic_cost_per_token
from litellm.types.utils import Usage
def cost_per_token(model: str, usage: Usage) -> Tuple[float, float]:
"""
Calculates the cost per token for a given model, prompt tokens, and completion tokens.
Input:
- model: str, the model name without provider prefix
- usage: LiteLLM Usage block, containing anthropic caching information
Returns:
Tuple[float, float] - prompt_cost_in_usd, completion_cost_in_usd
"""
return generic_cost_per_token(
model=model, usage=usage, custom_llm_provider="anthropic"
)