Files
evo-ai/.venv/lib/python3.10/site-packages/litellm/proxy/post_call_rules.py
2025-04-25 15:30:54 -03:00

9 lines
359 B
Python

def post_response_rule(input): # receives the model response
print(f"post_response_rule:input={input}") # noqa
if len(input) < 200:
return {
"decision": False,
"message": "This violates LiteLLM Proxy Rules. Response too short",
}
return {"decision": True} # message not required since, request will pass