structure saas with tools
This commit is contained in:
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,600 @@
|
||||
from typing import TYPE_CHECKING, Any, Callable, Optional, Tuple, Union, cast
|
||||
|
||||
import aiohttp
|
||||
import httpx # type: ignore
|
||||
from aiohttp import ClientSession, FormData
|
||||
|
||||
import litellm
|
||||
import litellm.litellm_core_utils
|
||||
import litellm.types
|
||||
import litellm.types.utils
|
||||
from litellm.llms.base_llm.chat.transformation import BaseConfig
|
||||
from litellm.llms.base_llm.image_variations.transformation import (
|
||||
BaseImageVariationConfig,
|
||||
)
|
||||
from litellm.llms.custom_httpx.http_handler import (
|
||||
AsyncHTTPHandler,
|
||||
HTTPHandler,
|
||||
_get_httpx_client,
|
||||
)
|
||||
from litellm.types.llms.openai import FileTypes
|
||||
from litellm.types.utils import HttpHandlerRequestFields, ImageResponse, LlmProviders
|
||||
from litellm.utils import CustomStreamWrapper, ModelResponse, ProviderConfigManager
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj
|
||||
|
||||
LiteLLMLoggingObj = _LiteLLMLoggingObj
|
||||
else:
|
||||
LiteLLMLoggingObj = Any
|
||||
|
||||
DEFAULT_TIMEOUT = 600
|
||||
|
||||
|
||||
class BaseLLMAIOHTTPHandler:
|
||||
def __init__(self):
|
||||
self.client_session: Optional[aiohttp.ClientSession] = None
|
||||
|
||||
def _get_async_client_session(
|
||||
self, dynamic_client_session: Optional[ClientSession] = None
|
||||
) -> ClientSession:
|
||||
if dynamic_client_session:
|
||||
return dynamic_client_session
|
||||
elif self.client_session:
|
||||
return self.client_session
|
||||
else:
|
||||
# init client session, and then return new session
|
||||
self.client_session = aiohttp.ClientSession()
|
||||
return self.client_session
|
||||
|
||||
async def _make_common_async_call(
|
||||
self,
|
||||
async_client_session: Optional[ClientSession],
|
||||
provider_config: BaseConfig,
|
||||
api_base: str,
|
||||
headers: dict,
|
||||
data: Optional[dict],
|
||||
timeout: Union[float, httpx.Timeout],
|
||||
litellm_params: dict,
|
||||
form_data: Optional[FormData] = None,
|
||||
stream: bool = False,
|
||||
) -> aiohttp.ClientResponse:
|
||||
"""Common implementation across stream + non-stream calls. Meant to ensure consistent error-handling."""
|
||||
max_retry_on_unprocessable_entity_error = (
|
||||
provider_config.max_retry_on_unprocessable_entity_error
|
||||
)
|
||||
|
||||
response: Optional[aiohttp.ClientResponse] = None
|
||||
async_client_session = self._get_async_client_session(
|
||||
dynamic_client_session=async_client_session
|
||||
)
|
||||
|
||||
for i in range(max(max_retry_on_unprocessable_entity_error, 1)):
|
||||
try:
|
||||
response = await async_client_session.post(
|
||||
url=api_base,
|
||||
headers=headers,
|
||||
json=data,
|
||||
data=form_data,
|
||||
)
|
||||
if not response.ok:
|
||||
response.raise_for_status()
|
||||
except aiohttp.ClientResponseError as e:
|
||||
setattr(e, "text", e.message)
|
||||
raise self._handle_error(e=e, provider_config=provider_config)
|
||||
except Exception as e:
|
||||
raise self._handle_error(e=e, provider_config=provider_config)
|
||||
break
|
||||
|
||||
if response is None:
|
||||
raise provider_config.get_error_class(
|
||||
error_message="No response from the API",
|
||||
status_code=422,
|
||||
headers={},
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
def _make_common_sync_call(
|
||||
self,
|
||||
sync_httpx_client: HTTPHandler,
|
||||
provider_config: BaseConfig,
|
||||
api_base: str,
|
||||
headers: dict,
|
||||
data: dict,
|
||||
timeout: Union[float, httpx.Timeout],
|
||||
litellm_params: dict,
|
||||
stream: bool = False,
|
||||
files: Optional[dict] = None,
|
||||
content: Any = None,
|
||||
params: Optional[dict] = None,
|
||||
) -> httpx.Response:
|
||||
max_retry_on_unprocessable_entity_error = (
|
||||
provider_config.max_retry_on_unprocessable_entity_error
|
||||
)
|
||||
|
||||
response: Optional[httpx.Response] = None
|
||||
|
||||
for i in range(max(max_retry_on_unprocessable_entity_error, 1)):
|
||||
try:
|
||||
response = sync_httpx_client.post(
|
||||
url=api_base,
|
||||
headers=headers,
|
||||
data=data, # do not json dump the data here. let the individual endpoint handle this.
|
||||
timeout=timeout,
|
||||
stream=stream,
|
||||
files=files,
|
||||
content=content,
|
||||
params=params,
|
||||
)
|
||||
except httpx.HTTPStatusError as e:
|
||||
hit_max_retry = i + 1 == max_retry_on_unprocessable_entity_error
|
||||
should_retry = provider_config.should_retry_llm_api_inside_llm_translation_on_http_error(
|
||||
e=e, litellm_params=litellm_params
|
||||
)
|
||||
if should_retry and not hit_max_retry:
|
||||
data = (
|
||||
provider_config.transform_request_on_unprocessable_entity_error(
|
||||
e=e, request_data=data
|
||||
)
|
||||
)
|
||||
continue
|
||||
else:
|
||||
raise self._handle_error(e=e, provider_config=provider_config)
|
||||
except Exception as e:
|
||||
raise self._handle_error(e=e, provider_config=provider_config)
|
||||
break
|
||||
|
||||
if response is None:
|
||||
raise provider_config.get_error_class(
|
||||
error_message="No response from the API",
|
||||
status_code=422, # don't retry on this error
|
||||
headers={},
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
async def async_completion(
|
||||
self,
|
||||
custom_llm_provider: str,
|
||||
provider_config: BaseConfig,
|
||||
api_base: str,
|
||||
headers: dict,
|
||||
data: dict,
|
||||
timeout: Union[float, httpx.Timeout],
|
||||
model: str,
|
||||
model_response: ModelResponse,
|
||||
logging_obj: LiteLLMLoggingObj,
|
||||
messages: list,
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
encoding: Any,
|
||||
api_key: Optional[str] = None,
|
||||
client: Optional[ClientSession] = None,
|
||||
):
|
||||
_response = await self._make_common_async_call(
|
||||
async_client_session=client,
|
||||
provider_config=provider_config,
|
||||
api_base=api_base,
|
||||
headers=headers,
|
||||
data=data,
|
||||
timeout=timeout,
|
||||
litellm_params=litellm_params,
|
||||
stream=False,
|
||||
)
|
||||
_transformed_response = await provider_config.transform_response( # type: ignore
|
||||
model=model,
|
||||
raw_response=_response, # type: ignore
|
||||
model_response=model_response,
|
||||
logging_obj=logging_obj,
|
||||
api_key=api_key,
|
||||
request_data=data,
|
||||
messages=messages,
|
||||
optional_params=optional_params,
|
||||
litellm_params=litellm_params,
|
||||
encoding=encoding,
|
||||
)
|
||||
return _transformed_response
|
||||
|
||||
def completion(
|
||||
self,
|
||||
model: str,
|
||||
messages: list,
|
||||
api_base: str,
|
||||
custom_llm_provider: str,
|
||||
model_response: ModelResponse,
|
||||
encoding,
|
||||
logging_obj: LiteLLMLoggingObj,
|
||||
optional_params: dict,
|
||||
timeout: Union[float, httpx.Timeout],
|
||||
litellm_params: dict,
|
||||
acompletion: bool,
|
||||
stream: Optional[bool] = False,
|
||||
fake_stream: bool = False,
|
||||
api_key: Optional[str] = None,
|
||||
headers: Optional[dict] = {},
|
||||
client: Optional[Union[HTTPHandler, AsyncHTTPHandler, ClientSession]] = None,
|
||||
):
|
||||
provider_config = ProviderConfigManager.get_provider_chat_config(
|
||||
model=model, provider=litellm.LlmProviders(custom_llm_provider)
|
||||
)
|
||||
if provider_config is None:
|
||||
raise ValueError(
|
||||
f"Provider config not found for model: {model} and provider: {custom_llm_provider}"
|
||||
)
|
||||
# get config from model, custom llm provider
|
||||
headers = provider_config.validate_environment(
|
||||
api_key=api_key,
|
||||
headers=headers or {},
|
||||
model=model,
|
||||
messages=messages,
|
||||
optional_params=optional_params,
|
||||
litellm_params=litellm_params,
|
||||
api_base=api_base,
|
||||
)
|
||||
|
||||
api_base = provider_config.get_complete_url(
|
||||
api_base=api_base,
|
||||
api_key=api_key,
|
||||
model=model,
|
||||
optional_params=optional_params,
|
||||
litellm_params=litellm_params,
|
||||
stream=stream,
|
||||
)
|
||||
|
||||
data = provider_config.transform_request(
|
||||
model=model,
|
||||
messages=messages,
|
||||
optional_params=optional_params,
|
||||
litellm_params=litellm_params,
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
## LOGGING
|
||||
logging_obj.pre_call(
|
||||
input=messages,
|
||||
api_key=api_key,
|
||||
additional_args={
|
||||
"complete_input_dict": data,
|
||||
"api_base": api_base,
|
||||
"headers": headers,
|
||||
},
|
||||
)
|
||||
|
||||
if acompletion is True:
|
||||
return self.async_completion(
|
||||
custom_llm_provider=custom_llm_provider,
|
||||
provider_config=provider_config,
|
||||
api_base=api_base,
|
||||
headers=headers,
|
||||
data=data,
|
||||
timeout=timeout,
|
||||
model=model,
|
||||
model_response=model_response,
|
||||
logging_obj=logging_obj,
|
||||
api_key=api_key,
|
||||
messages=messages,
|
||||
optional_params=optional_params,
|
||||
litellm_params=litellm_params,
|
||||
encoding=encoding,
|
||||
client=(
|
||||
client
|
||||
if client is not None and isinstance(client, ClientSession)
|
||||
else None
|
||||
),
|
||||
)
|
||||
|
||||
if stream is True:
|
||||
if fake_stream is not True:
|
||||
data["stream"] = stream
|
||||
completion_stream, headers = self.make_sync_call(
|
||||
provider_config=provider_config,
|
||||
api_base=api_base,
|
||||
headers=headers, # type: ignore
|
||||
data=data,
|
||||
model=model,
|
||||
messages=messages,
|
||||
logging_obj=logging_obj,
|
||||
timeout=timeout,
|
||||
fake_stream=fake_stream,
|
||||
client=(
|
||||
client
|
||||
if client is not None and isinstance(client, HTTPHandler)
|
||||
else None
|
||||
),
|
||||
litellm_params=litellm_params,
|
||||
)
|
||||
return CustomStreamWrapper(
|
||||
completion_stream=completion_stream,
|
||||
model=model,
|
||||
custom_llm_provider=custom_llm_provider,
|
||||
logging_obj=logging_obj,
|
||||
)
|
||||
|
||||
if client is None or not isinstance(client, HTTPHandler):
|
||||
sync_httpx_client = _get_httpx_client()
|
||||
else:
|
||||
sync_httpx_client = client
|
||||
|
||||
response = self._make_common_sync_call(
|
||||
sync_httpx_client=sync_httpx_client,
|
||||
provider_config=provider_config,
|
||||
api_base=api_base,
|
||||
headers=headers,
|
||||
timeout=timeout,
|
||||
litellm_params=litellm_params,
|
||||
data=data,
|
||||
)
|
||||
return provider_config.transform_response(
|
||||
model=model,
|
||||
raw_response=response,
|
||||
model_response=model_response,
|
||||
logging_obj=logging_obj,
|
||||
api_key=api_key,
|
||||
request_data=data,
|
||||
messages=messages,
|
||||
optional_params=optional_params,
|
||||
litellm_params=litellm_params,
|
||||
encoding=encoding,
|
||||
)
|
||||
|
||||
def make_sync_call(
|
||||
self,
|
||||
provider_config: BaseConfig,
|
||||
api_base: str,
|
||||
headers: dict,
|
||||
data: dict,
|
||||
model: str,
|
||||
messages: list,
|
||||
logging_obj,
|
||||
litellm_params: dict,
|
||||
timeout: Union[float, httpx.Timeout],
|
||||
fake_stream: bool = False,
|
||||
client: Optional[HTTPHandler] = None,
|
||||
) -> Tuple[Any, dict]:
|
||||
if client is None or not isinstance(client, HTTPHandler):
|
||||
sync_httpx_client = _get_httpx_client()
|
||||
else:
|
||||
sync_httpx_client = client
|
||||
stream = True
|
||||
if fake_stream is True:
|
||||
stream = False
|
||||
|
||||
response = self._make_common_sync_call(
|
||||
sync_httpx_client=sync_httpx_client,
|
||||
provider_config=provider_config,
|
||||
api_base=api_base,
|
||||
headers=headers,
|
||||
data=data,
|
||||
timeout=timeout,
|
||||
litellm_params=litellm_params,
|
||||
stream=stream,
|
||||
)
|
||||
|
||||
if fake_stream is True:
|
||||
completion_stream = provider_config.get_model_response_iterator(
|
||||
streaming_response=response.json(), sync_stream=True
|
||||
)
|
||||
else:
|
||||
completion_stream = provider_config.get_model_response_iterator(
|
||||
streaming_response=response.iter_lines(), sync_stream=True
|
||||
)
|
||||
|
||||
# LOGGING
|
||||
logging_obj.post_call(
|
||||
input=messages,
|
||||
api_key="",
|
||||
original_response="first stream response received",
|
||||
additional_args={"complete_input_dict": data},
|
||||
)
|
||||
|
||||
return completion_stream, dict(response.headers)
|
||||
|
||||
async def async_image_variations(
|
||||
self,
|
||||
client: Optional[ClientSession],
|
||||
provider_config: BaseImageVariationConfig,
|
||||
api_base: str,
|
||||
headers: dict,
|
||||
data: HttpHandlerRequestFields,
|
||||
timeout: float,
|
||||
litellm_params: dict,
|
||||
model_response: ImageResponse,
|
||||
logging_obj: LiteLLMLoggingObj,
|
||||
api_key: str,
|
||||
model: Optional[str],
|
||||
image: FileTypes,
|
||||
optional_params: dict,
|
||||
) -> ImageResponse:
|
||||
# create aiohttp form data if files in data
|
||||
form_data: Optional[FormData] = None
|
||||
if "files" in data and "data" in data:
|
||||
form_data = FormData()
|
||||
for k, v in data["files"].items():
|
||||
form_data.add_field(k, v[1], filename=v[0], content_type=v[2])
|
||||
|
||||
for key, value in data["data"].items():
|
||||
form_data.add_field(key, value)
|
||||
|
||||
_response = await self._make_common_async_call(
|
||||
async_client_session=client,
|
||||
provider_config=provider_config,
|
||||
api_base=api_base,
|
||||
headers=headers,
|
||||
data=None if form_data is not None else cast(dict, data),
|
||||
form_data=form_data,
|
||||
timeout=timeout,
|
||||
litellm_params=litellm_params,
|
||||
stream=False,
|
||||
)
|
||||
|
||||
## LOGGING
|
||||
logging_obj.post_call(
|
||||
api_key=api_key,
|
||||
original_response=_response.text,
|
||||
additional_args={
|
||||
"headers": headers,
|
||||
"api_base": api_base,
|
||||
},
|
||||
)
|
||||
|
||||
## RESPONSE OBJECT
|
||||
return await provider_config.async_transform_response_image_variation(
|
||||
model=model,
|
||||
model_response=model_response,
|
||||
raw_response=_response,
|
||||
logging_obj=logging_obj,
|
||||
request_data=cast(dict, data),
|
||||
image=image,
|
||||
optional_params=optional_params,
|
||||
litellm_params=litellm_params,
|
||||
encoding=None,
|
||||
api_key=api_key,
|
||||
)
|
||||
|
||||
def image_variations(
|
||||
self,
|
||||
model_response: ImageResponse,
|
||||
api_key: str,
|
||||
model: Optional[str],
|
||||
image: FileTypes,
|
||||
timeout: float,
|
||||
custom_llm_provider: str,
|
||||
logging_obj: LiteLLMLoggingObj,
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
print_verbose: Optional[Callable] = None,
|
||||
api_base: Optional[str] = None,
|
||||
aimage_variation: bool = False,
|
||||
logger_fn=None,
|
||||
client=None,
|
||||
organization: Optional[str] = None,
|
||||
headers: Optional[dict] = None,
|
||||
) -> ImageResponse:
|
||||
if model is None:
|
||||
raise ValueError("model is required for non-openai image variations")
|
||||
|
||||
provider_config = ProviderConfigManager.get_provider_image_variation_config(
|
||||
model=model, # openai defaults to dall-e-2
|
||||
provider=LlmProviders(custom_llm_provider),
|
||||
)
|
||||
|
||||
if provider_config is None:
|
||||
raise ValueError(
|
||||
f"image variation provider not found: {custom_llm_provider}."
|
||||
)
|
||||
|
||||
api_base = provider_config.get_complete_url(
|
||||
api_base=api_base,
|
||||
api_key=api_key,
|
||||
model=model,
|
||||
optional_params=optional_params,
|
||||
litellm_params=litellm_params,
|
||||
stream=False,
|
||||
)
|
||||
|
||||
headers = provider_config.validate_environment(
|
||||
api_key=api_key,
|
||||
headers=headers or {},
|
||||
model=model,
|
||||
messages=[{"role": "user", "content": "test"}],
|
||||
optional_params=optional_params,
|
||||
litellm_params=litellm_params,
|
||||
api_base=api_base,
|
||||
)
|
||||
|
||||
data = provider_config.transform_request_image_variation(
|
||||
model=model,
|
||||
image=image,
|
||||
optional_params=optional_params,
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
## LOGGING
|
||||
logging_obj.pre_call(
|
||||
input="",
|
||||
api_key=api_key,
|
||||
additional_args={
|
||||
"headers": headers,
|
||||
"api_base": api_base,
|
||||
"complete_input_dict": data.copy(),
|
||||
},
|
||||
)
|
||||
|
||||
if litellm_params.get("async_call", False):
|
||||
return self.async_image_variations(
|
||||
api_base=api_base,
|
||||
data=data,
|
||||
headers=headers,
|
||||
model_response=model_response,
|
||||
logging_obj=logging_obj,
|
||||
model=model,
|
||||
timeout=timeout,
|
||||
client=client,
|
||||
optional_params=optional_params,
|
||||
litellm_params=litellm_params,
|
||||
image=image,
|
||||
provider_config=provider_config,
|
||||
) # type: ignore
|
||||
|
||||
if client is None or not isinstance(client, HTTPHandler):
|
||||
sync_httpx_client = _get_httpx_client()
|
||||
else:
|
||||
sync_httpx_client = client
|
||||
|
||||
response = self._make_common_sync_call(
|
||||
sync_httpx_client=sync_httpx_client,
|
||||
provider_config=provider_config,
|
||||
api_base=api_base,
|
||||
headers=headers,
|
||||
timeout=timeout,
|
||||
litellm_params=litellm_params,
|
||||
stream=False,
|
||||
data=data.get("data") or {},
|
||||
files=data.get("files"),
|
||||
content=data.get("content"),
|
||||
params=data.get("params"),
|
||||
)
|
||||
|
||||
## LOGGING
|
||||
logging_obj.post_call(
|
||||
api_key=api_key,
|
||||
original_response=response.text,
|
||||
additional_args={
|
||||
"headers": headers,
|
||||
"api_base": api_base,
|
||||
},
|
||||
)
|
||||
|
||||
## RESPONSE OBJECT
|
||||
return provider_config.transform_response_image_variation(
|
||||
model=model,
|
||||
model_response=model_response,
|
||||
raw_response=response,
|
||||
logging_obj=logging_obj,
|
||||
request_data=cast(dict, data),
|
||||
image=image,
|
||||
optional_params=optional_params,
|
||||
litellm_params=litellm_params,
|
||||
encoding=None,
|
||||
api_key=api_key,
|
||||
)
|
||||
|
||||
def _handle_error(self, e: Exception, provider_config: BaseConfig):
|
||||
status_code = getattr(e, "status_code", 500)
|
||||
error_headers = getattr(e, "headers", None)
|
||||
error_text = getattr(e, "text", str(e))
|
||||
error_response = getattr(e, "response", None)
|
||||
if error_headers is None and error_response:
|
||||
error_headers = getattr(error_response, "headers", None)
|
||||
if error_response and hasattr(error_response, "text"):
|
||||
error_text = getattr(error_response, "text", error_text)
|
||||
if error_headers:
|
||||
error_headers = dict(error_headers)
|
||||
else:
|
||||
error_headers = {}
|
||||
raise provider_config.get_error_class(
|
||||
error_message=error_text,
|
||||
status_code=status_code,
|
||||
headers=error_headers,
|
||||
)
|
||||
@@ -0,0 +1,784 @@
|
||||
import asyncio
|
||||
import os
|
||||
import ssl
|
||||
import time
|
||||
from typing import TYPE_CHECKING, Any, Callable, List, Mapping, Optional, Union
|
||||
|
||||
import httpx
|
||||
from httpx import USE_CLIENT_DEFAULT, AsyncHTTPTransport, HTTPTransport
|
||||
|
||||
import litellm
|
||||
from litellm.constants import _DEFAULT_TTL_FOR_HTTPX_CLIENTS
|
||||
from litellm.litellm_core_utils.logging_utils import track_llm_api_timing
|
||||
from litellm.types.llms.custom_http import *
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from litellm import LlmProviders
|
||||
from litellm.litellm_core_utils.litellm_logging import (
|
||||
Logging as LiteLLMLoggingObject,
|
||||
)
|
||||
else:
|
||||
LlmProviders = Any
|
||||
LiteLLMLoggingObject = Any
|
||||
|
||||
try:
|
||||
from litellm._version import version
|
||||
except Exception:
|
||||
version = "0.0.0"
|
||||
|
||||
headers = {
|
||||
"User-Agent": f"litellm/{version}",
|
||||
}
|
||||
|
||||
# https://www.python-httpx.org/advanced/timeouts
|
||||
_DEFAULT_TIMEOUT = httpx.Timeout(timeout=5.0, connect=5.0)
|
||||
|
||||
|
||||
def mask_sensitive_info(error_message):
|
||||
# Find the start of the key parameter
|
||||
if isinstance(error_message, str):
|
||||
key_index = error_message.find("key=")
|
||||
else:
|
||||
return error_message
|
||||
|
||||
# If key is found
|
||||
if key_index != -1:
|
||||
# Find the end of the key parameter (next & or end of string)
|
||||
next_param = error_message.find("&", key_index)
|
||||
|
||||
if next_param == -1:
|
||||
# If no more parameters, mask until the end of the string
|
||||
masked_message = error_message[: key_index + 4] + "[REDACTED_API_KEY]"
|
||||
else:
|
||||
# Replace the key with redacted value, keeping other parameters
|
||||
masked_message = (
|
||||
error_message[: key_index + 4]
|
||||
+ "[REDACTED_API_KEY]"
|
||||
+ error_message[next_param:]
|
||||
)
|
||||
|
||||
return masked_message
|
||||
|
||||
return error_message
|
||||
|
||||
|
||||
class MaskedHTTPStatusError(httpx.HTTPStatusError):
|
||||
def __init__(
|
||||
self, original_error, message: Optional[str] = None, text: Optional[str] = None
|
||||
):
|
||||
# Create a new error with the masked URL
|
||||
masked_url = mask_sensitive_info(str(original_error.request.url))
|
||||
# Create a new error that looks like the original, but with a masked URL
|
||||
|
||||
super().__init__(
|
||||
message=original_error.message,
|
||||
request=httpx.Request(
|
||||
method=original_error.request.method,
|
||||
url=masked_url,
|
||||
headers=original_error.request.headers,
|
||||
content=original_error.request.content,
|
||||
),
|
||||
response=httpx.Response(
|
||||
status_code=original_error.response.status_code,
|
||||
content=original_error.response.content,
|
||||
headers=original_error.response.headers,
|
||||
),
|
||||
)
|
||||
self.message = message
|
||||
self.text = text
|
||||
|
||||
|
||||
class AsyncHTTPHandler:
|
||||
def __init__(
|
||||
self,
|
||||
timeout: Optional[Union[float, httpx.Timeout]] = None,
|
||||
event_hooks: Optional[Mapping[str, List[Callable[..., Any]]]] = None,
|
||||
concurrent_limit=1000,
|
||||
client_alias: Optional[str] = None, # name for client in logs
|
||||
ssl_verify: Optional[VerifyTypes] = None,
|
||||
):
|
||||
self.timeout = timeout
|
||||
self.event_hooks = event_hooks
|
||||
self.client = self.create_client(
|
||||
timeout=timeout,
|
||||
concurrent_limit=concurrent_limit,
|
||||
event_hooks=event_hooks,
|
||||
ssl_verify=ssl_verify,
|
||||
)
|
||||
self.client_alias = client_alias
|
||||
|
||||
def create_client(
|
||||
self,
|
||||
timeout: Optional[Union[float, httpx.Timeout]],
|
||||
concurrent_limit: int,
|
||||
event_hooks: Optional[Mapping[str, List[Callable[..., Any]]]],
|
||||
ssl_verify: Optional[VerifyTypes] = None,
|
||||
) -> httpx.AsyncClient:
|
||||
# SSL certificates (a.k.a CA bundle) used to verify the identity of requested hosts.
|
||||
# /path/to/certificate.pem
|
||||
if ssl_verify is None:
|
||||
ssl_verify = os.getenv("SSL_VERIFY", litellm.ssl_verify)
|
||||
|
||||
ssl_security_level = os.getenv("SSL_SECURITY_LEVEL")
|
||||
|
||||
# If ssl_verify is not False and we need a lower security level
|
||||
if (
|
||||
not ssl_verify
|
||||
and ssl_security_level
|
||||
and isinstance(ssl_security_level, str)
|
||||
):
|
||||
# Create a custom SSL context with reduced security level
|
||||
custom_ssl_context = ssl.create_default_context()
|
||||
custom_ssl_context.set_ciphers(ssl_security_level)
|
||||
|
||||
# If ssl_verify is a path to a CA bundle, load it into our custom context
|
||||
if isinstance(ssl_verify, str) and os.path.exists(ssl_verify):
|
||||
custom_ssl_context.load_verify_locations(cafile=ssl_verify)
|
||||
|
||||
# Use our custom SSL context instead of the original ssl_verify value
|
||||
ssl_verify = custom_ssl_context
|
||||
|
||||
# An SSL certificate used by the requested host to authenticate the client.
|
||||
# /path/to/client.pem
|
||||
cert = os.getenv("SSL_CERTIFICATE", litellm.ssl_certificate)
|
||||
|
||||
if timeout is None:
|
||||
timeout = _DEFAULT_TIMEOUT
|
||||
# Create a client with a connection pool
|
||||
transport = self._create_async_transport()
|
||||
|
||||
return httpx.AsyncClient(
|
||||
transport=transport,
|
||||
event_hooks=event_hooks,
|
||||
timeout=timeout,
|
||||
limits=httpx.Limits(
|
||||
max_connections=concurrent_limit,
|
||||
max_keepalive_connections=concurrent_limit,
|
||||
),
|
||||
verify=ssl_verify,
|
||||
cert=cert,
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
async def close(self):
|
||||
# Close the client when you're done with it
|
||||
await self.client.aclose()
|
||||
|
||||
async def __aenter__(self):
|
||||
return self.client
|
||||
|
||||
async def __aexit__(self):
|
||||
# close the client when exiting
|
||||
await self.client.aclose()
|
||||
|
||||
async def get(
|
||||
self,
|
||||
url: str,
|
||||
params: Optional[dict] = None,
|
||||
headers: Optional[dict] = None,
|
||||
follow_redirects: Optional[bool] = None,
|
||||
):
|
||||
# Set follow_redirects to UseClientDefault if None
|
||||
_follow_redirects = (
|
||||
follow_redirects if follow_redirects is not None else USE_CLIENT_DEFAULT
|
||||
)
|
||||
|
||||
response = await self.client.get(
|
||||
url, params=params, headers=headers, follow_redirects=_follow_redirects # type: ignore
|
||||
)
|
||||
return response
|
||||
|
||||
@track_llm_api_timing()
|
||||
async def post(
|
||||
self,
|
||||
url: str,
|
||||
data: Optional[Union[dict, str, bytes]] = None, # type: ignore
|
||||
json: Optional[dict] = None,
|
||||
params: Optional[dict] = None,
|
||||
headers: Optional[dict] = None,
|
||||
timeout: Optional[Union[float, httpx.Timeout]] = None,
|
||||
stream: bool = False,
|
||||
logging_obj: Optional[LiteLLMLoggingObject] = None,
|
||||
):
|
||||
start_time = time.time()
|
||||
try:
|
||||
if timeout is None:
|
||||
timeout = self.timeout
|
||||
|
||||
req = self.client.build_request(
|
||||
"POST", url, data=data, json=json, params=params, headers=headers, timeout=timeout # type: ignore
|
||||
)
|
||||
response = await self.client.send(req, stream=stream)
|
||||
response.raise_for_status()
|
||||
return response
|
||||
except (httpx.RemoteProtocolError, httpx.ConnectError):
|
||||
# Retry the request with a new session if there is a connection error
|
||||
new_client = self.create_client(
|
||||
timeout=timeout, concurrent_limit=1, event_hooks=self.event_hooks
|
||||
)
|
||||
try:
|
||||
return await self.single_connection_post_request(
|
||||
url=url,
|
||||
client=new_client,
|
||||
data=data,
|
||||
json=json,
|
||||
params=params,
|
||||
headers=headers,
|
||||
stream=stream,
|
||||
)
|
||||
finally:
|
||||
await new_client.aclose()
|
||||
except httpx.TimeoutException as e:
|
||||
end_time = time.time()
|
||||
time_delta = round(end_time - start_time, 3)
|
||||
headers = {}
|
||||
error_response = getattr(e, "response", None)
|
||||
if error_response is not None:
|
||||
for key, value in error_response.headers.items():
|
||||
headers["response_headers-{}".format(key)] = value
|
||||
|
||||
raise litellm.Timeout(
|
||||
message=f"Connection timed out. Timeout passed={timeout}, time taken={time_delta} seconds",
|
||||
model="default-model-name",
|
||||
llm_provider="litellm-httpx-handler",
|
||||
headers=headers,
|
||||
)
|
||||
except httpx.HTTPStatusError as e:
|
||||
if stream is True:
|
||||
setattr(e, "message", await e.response.aread())
|
||||
setattr(e, "text", await e.response.aread())
|
||||
else:
|
||||
setattr(e, "message", mask_sensitive_info(e.response.text))
|
||||
setattr(e, "text", mask_sensitive_info(e.response.text))
|
||||
|
||||
setattr(e, "status_code", e.response.status_code)
|
||||
|
||||
raise e
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
async def put(
|
||||
self,
|
||||
url: str,
|
||||
data: Optional[Union[dict, str]] = None, # type: ignore
|
||||
json: Optional[dict] = None,
|
||||
params: Optional[dict] = None,
|
||||
headers: Optional[dict] = None,
|
||||
timeout: Optional[Union[float, httpx.Timeout]] = None,
|
||||
stream: bool = False,
|
||||
):
|
||||
try:
|
||||
if timeout is None:
|
||||
timeout = self.timeout
|
||||
|
||||
req = self.client.build_request(
|
||||
"PUT", url, data=data, json=json, params=params, headers=headers, timeout=timeout # type: ignore
|
||||
)
|
||||
response = await self.client.send(req)
|
||||
response.raise_for_status()
|
||||
return response
|
||||
except (httpx.RemoteProtocolError, httpx.ConnectError):
|
||||
# Retry the request with a new session if there is a connection error
|
||||
new_client = self.create_client(
|
||||
timeout=timeout, concurrent_limit=1, event_hooks=self.event_hooks
|
||||
)
|
||||
try:
|
||||
return await self.single_connection_post_request(
|
||||
url=url,
|
||||
client=new_client,
|
||||
data=data,
|
||||
json=json,
|
||||
params=params,
|
||||
headers=headers,
|
||||
stream=stream,
|
||||
)
|
||||
finally:
|
||||
await new_client.aclose()
|
||||
except httpx.TimeoutException as e:
|
||||
headers = {}
|
||||
error_response = getattr(e, "response", None)
|
||||
if error_response is not None:
|
||||
for key, value in error_response.headers.items():
|
||||
headers["response_headers-{}".format(key)] = value
|
||||
|
||||
raise litellm.Timeout(
|
||||
message=f"Connection timed out after {timeout} seconds.",
|
||||
model="default-model-name",
|
||||
llm_provider="litellm-httpx-handler",
|
||||
headers=headers,
|
||||
)
|
||||
except httpx.HTTPStatusError as e:
|
||||
setattr(e, "status_code", e.response.status_code)
|
||||
if stream is True:
|
||||
setattr(e, "message", await e.response.aread())
|
||||
else:
|
||||
setattr(e, "message", e.response.text)
|
||||
raise e
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
async def patch(
|
||||
self,
|
||||
url: str,
|
||||
data: Optional[Union[dict, str]] = None, # type: ignore
|
||||
json: Optional[dict] = None,
|
||||
params: Optional[dict] = None,
|
||||
headers: Optional[dict] = None,
|
||||
timeout: Optional[Union[float, httpx.Timeout]] = None,
|
||||
stream: bool = False,
|
||||
):
|
||||
try:
|
||||
if timeout is None:
|
||||
timeout = self.timeout
|
||||
|
||||
req = self.client.build_request(
|
||||
"PATCH", url, data=data, json=json, params=params, headers=headers, timeout=timeout # type: ignore
|
||||
)
|
||||
response = await self.client.send(req)
|
||||
response.raise_for_status()
|
||||
return response
|
||||
except (httpx.RemoteProtocolError, httpx.ConnectError):
|
||||
# Retry the request with a new session if there is a connection error
|
||||
new_client = self.create_client(
|
||||
timeout=timeout, concurrent_limit=1, event_hooks=self.event_hooks
|
||||
)
|
||||
try:
|
||||
return await self.single_connection_post_request(
|
||||
url=url,
|
||||
client=new_client,
|
||||
data=data,
|
||||
json=json,
|
||||
params=params,
|
||||
headers=headers,
|
||||
stream=stream,
|
||||
)
|
||||
finally:
|
||||
await new_client.aclose()
|
||||
except httpx.TimeoutException as e:
|
||||
headers = {}
|
||||
error_response = getattr(e, "response", None)
|
||||
if error_response is not None:
|
||||
for key, value in error_response.headers.items():
|
||||
headers["response_headers-{}".format(key)] = value
|
||||
|
||||
raise litellm.Timeout(
|
||||
message=f"Connection timed out after {timeout} seconds.",
|
||||
model="default-model-name",
|
||||
llm_provider="litellm-httpx-handler",
|
||||
headers=headers,
|
||||
)
|
||||
except httpx.HTTPStatusError as e:
|
||||
setattr(e, "status_code", e.response.status_code)
|
||||
if stream is True:
|
||||
setattr(e, "message", await e.response.aread())
|
||||
else:
|
||||
setattr(e, "message", e.response.text)
|
||||
raise e
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
async def delete(
|
||||
self,
|
||||
url: str,
|
||||
data: Optional[Union[dict, str]] = None, # type: ignore
|
||||
json: Optional[dict] = None,
|
||||
params: Optional[dict] = None,
|
||||
headers: Optional[dict] = None,
|
||||
timeout: Optional[Union[float, httpx.Timeout]] = None,
|
||||
stream: bool = False,
|
||||
):
|
||||
try:
|
||||
if timeout is None:
|
||||
timeout = self.timeout
|
||||
req = self.client.build_request(
|
||||
"DELETE", url, data=data, json=json, params=params, headers=headers, timeout=timeout # type: ignore
|
||||
)
|
||||
response = await self.client.send(req, stream=stream)
|
||||
response.raise_for_status()
|
||||
return response
|
||||
except (httpx.RemoteProtocolError, httpx.ConnectError):
|
||||
# Retry the request with a new session if there is a connection error
|
||||
new_client = self.create_client(
|
||||
timeout=timeout, concurrent_limit=1, event_hooks=self.event_hooks
|
||||
)
|
||||
try:
|
||||
return await self.single_connection_post_request(
|
||||
url=url,
|
||||
client=new_client,
|
||||
data=data,
|
||||
json=json,
|
||||
params=params,
|
||||
headers=headers,
|
||||
stream=stream,
|
||||
)
|
||||
finally:
|
||||
await new_client.aclose()
|
||||
except httpx.HTTPStatusError as e:
|
||||
setattr(e, "status_code", e.response.status_code)
|
||||
if stream is True:
|
||||
setattr(e, "message", await e.response.aread())
|
||||
else:
|
||||
setattr(e, "message", e.response.text)
|
||||
raise e
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
async def single_connection_post_request(
|
||||
self,
|
||||
url: str,
|
||||
client: httpx.AsyncClient,
|
||||
data: Optional[Union[dict, str, bytes]] = None, # type: ignore
|
||||
json: Optional[dict] = None,
|
||||
params: Optional[dict] = None,
|
||||
headers: Optional[dict] = None,
|
||||
stream: bool = False,
|
||||
):
|
||||
"""
|
||||
Making POST request for a single connection client.
|
||||
|
||||
Used for retrying connection client errors.
|
||||
"""
|
||||
req = client.build_request(
|
||||
"POST", url, data=data, json=json, params=params, headers=headers # type: ignore
|
||||
)
|
||||
response = await client.send(req, stream=stream)
|
||||
response.raise_for_status()
|
||||
return response
|
||||
|
||||
def __del__(self) -> None:
|
||||
try:
|
||||
asyncio.get_running_loop().create_task(self.close())
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def _create_async_transport(self) -> Optional[AsyncHTTPTransport]:
|
||||
"""
|
||||
Create an async transport with IPv4 only if litellm.force_ipv4 is True.
|
||||
Otherwise, return None.
|
||||
|
||||
Some users have seen httpx ConnectionError when using ipv6 - forcing ipv4 resolves the issue for them
|
||||
"""
|
||||
if litellm.force_ipv4:
|
||||
return AsyncHTTPTransport(local_address="0.0.0.0")
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
class HTTPHandler:
|
||||
def __init__(
|
||||
self,
|
||||
timeout: Optional[Union[float, httpx.Timeout]] = None,
|
||||
concurrent_limit=1000,
|
||||
client: Optional[httpx.Client] = None,
|
||||
ssl_verify: Optional[Union[bool, str]] = None,
|
||||
):
|
||||
if timeout is None:
|
||||
timeout = _DEFAULT_TIMEOUT
|
||||
|
||||
# SSL certificates (a.k.a CA bundle) used to verify the identity of requested hosts.
|
||||
# /path/to/certificate.pem
|
||||
|
||||
if ssl_verify is None:
|
||||
ssl_verify = os.getenv("SSL_VERIFY", litellm.ssl_verify)
|
||||
|
||||
# An SSL certificate used by the requested host to authenticate the client.
|
||||
# /path/to/client.pem
|
||||
cert = os.getenv("SSL_CERTIFICATE", litellm.ssl_certificate)
|
||||
|
||||
if client is None:
|
||||
transport = self._create_sync_transport()
|
||||
|
||||
# Create a client with a connection pool
|
||||
self.client = httpx.Client(
|
||||
transport=transport,
|
||||
timeout=timeout,
|
||||
limits=httpx.Limits(
|
||||
max_connections=concurrent_limit,
|
||||
max_keepalive_connections=concurrent_limit,
|
||||
),
|
||||
verify=ssl_verify,
|
||||
cert=cert,
|
||||
headers=headers,
|
||||
)
|
||||
else:
|
||||
self.client = client
|
||||
|
||||
def close(self):
|
||||
# Close the client when you're done with it
|
||||
self.client.close()
|
||||
|
||||
def get(
|
||||
self,
|
||||
url: str,
|
||||
params: Optional[dict] = None,
|
||||
headers: Optional[dict] = None,
|
||||
follow_redirects: Optional[bool] = None,
|
||||
):
|
||||
# Set follow_redirects to UseClientDefault if None
|
||||
_follow_redirects = (
|
||||
follow_redirects if follow_redirects is not None else USE_CLIENT_DEFAULT
|
||||
)
|
||||
|
||||
response = self.client.get(
|
||||
url, params=params, headers=headers, follow_redirects=_follow_redirects # type: ignore
|
||||
)
|
||||
return response
|
||||
|
||||
def post(
|
||||
self,
|
||||
url: str,
|
||||
data: Optional[Union[dict, str, bytes]] = None,
|
||||
json: Optional[Union[dict, str, List]] = None,
|
||||
params: Optional[dict] = None,
|
||||
headers: Optional[dict] = None,
|
||||
stream: bool = False,
|
||||
timeout: Optional[Union[float, httpx.Timeout]] = None,
|
||||
files: Optional[dict] = None,
|
||||
content: Any = None,
|
||||
logging_obj: Optional[LiteLLMLoggingObject] = None,
|
||||
):
|
||||
try:
|
||||
if timeout is not None:
|
||||
req = self.client.build_request(
|
||||
"POST",
|
||||
url,
|
||||
data=data, # type: ignore
|
||||
json=json,
|
||||
params=params,
|
||||
headers=headers,
|
||||
timeout=timeout,
|
||||
files=files,
|
||||
content=content, # type: ignore
|
||||
)
|
||||
else:
|
||||
req = self.client.build_request(
|
||||
"POST", url, data=data, json=json, params=params, headers=headers, files=files, content=content # type: ignore
|
||||
)
|
||||
response = self.client.send(req, stream=stream)
|
||||
response.raise_for_status()
|
||||
return response
|
||||
except httpx.TimeoutException:
|
||||
raise litellm.Timeout(
|
||||
message=f"Connection timed out after {timeout} seconds.",
|
||||
model="default-model-name",
|
||||
llm_provider="litellm-httpx-handler",
|
||||
)
|
||||
except httpx.HTTPStatusError as e:
|
||||
if stream is True:
|
||||
setattr(e, "message", mask_sensitive_info(e.response.read()))
|
||||
setattr(e, "text", mask_sensitive_info(e.response.read()))
|
||||
else:
|
||||
error_text = mask_sensitive_info(e.response.text)
|
||||
setattr(e, "message", error_text)
|
||||
setattr(e, "text", error_text)
|
||||
|
||||
setattr(e, "status_code", e.response.status_code)
|
||||
raise e
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def patch(
|
||||
self,
|
||||
url: str,
|
||||
data: Optional[Union[dict, str]] = None,
|
||||
json: Optional[Union[dict, str]] = None,
|
||||
params: Optional[dict] = None,
|
||||
headers: Optional[dict] = None,
|
||||
stream: bool = False,
|
||||
timeout: Optional[Union[float, httpx.Timeout]] = None,
|
||||
):
|
||||
try:
|
||||
if timeout is not None:
|
||||
req = self.client.build_request(
|
||||
"PATCH", url, data=data, json=json, params=params, headers=headers, timeout=timeout # type: ignore
|
||||
)
|
||||
else:
|
||||
req = self.client.build_request(
|
||||
"PATCH", url, data=data, json=json, params=params, headers=headers # type: ignore
|
||||
)
|
||||
response = self.client.send(req, stream=stream)
|
||||
response.raise_for_status()
|
||||
return response
|
||||
except httpx.TimeoutException:
|
||||
raise litellm.Timeout(
|
||||
message=f"Connection timed out after {timeout} seconds.",
|
||||
model="default-model-name",
|
||||
llm_provider="litellm-httpx-handler",
|
||||
)
|
||||
except httpx.HTTPStatusError as e:
|
||||
if stream is True:
|
||||
setattr(e, "message", mask_sensitive_info(e.response.read()))
|
||||
setattr(e, "text", mask_sensitive_info(e.response.read()))
|
||||
else:
|
||||
error_text = mask_sensitive_info(e.response.text)
|
||||
setattr(e, "message", error_text)
|
||||
setattr(e, "text", error_text)
|
||||
|
||||
setattr(e, "status_code", e.response.status_code)
|
||||
|
||||
raise e
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def put(
|
||||
self,
|
||||
url: str,
|
||||
data: Optional[Union[dict, str]] = None,
|
||||
json: Optional[Union[dict, str]] = None,
|
||||
params: Optional[dict] = None,
|
||||
headers: Optional[dict] = None,
|
||||
stream: bool = False,
|
||||
timeout: Optional[Union[float, httpx.Timeout]] = None,
|
||||
):
|
||||
try:
|
||||
if timeout is not None:
|
||||
req = self.client.build_request(
|
||||
"PUT", url, data=data, json=json, params=params, headers=headers, timeout=timeout # type: ignore
|
||||
)
|
||||
else:
|
||||
req = self.client.build_request(
|
||||
"PUT", url, data=data, json=json, params=params, headers=headers # type: ignore
|
||||
)
|
||||
response = self.client.send(req, stream=stream)
|
||||
return response
|
||||
except httpx.TimeoutException:
|
||||
raise litellm.Timeout(
|
||||
message=f"Connection timed out after {timeout} seconds.",
|
||||
model="default-model-name",
|
||||
llm_provider="litellm-httpx-handler",
|
||||
)
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def delete(
|
||||
self,
|
||||
url: str,
|
||||
data: Optional[Union[dict, str]] = None, # type: ignore
|
||||
json: Optional[dict] = None,
|
||||
params: Optional[dict] = None,
|
||||
headers: Optional[dict] = None,
|
||||
timeout: Optional[Union[float, httpx.Timeout]] = None,
|
||||
stream: bool = False,
|
||||
):
|
||||
try:
|
||||
if timeout is not None:
|
||||
req = self.client.build_request(
|
||||
"DELETE", url, data=data, json=json, params=params, headers=headers, timeout=timeout # type: ignore
|
||||
)
|
||||
else:
|
||||
req = self.client.build_request(
|
||||
"DELETE", url, data=data, json=json, params=params, headers=headers # type: ignore
|
||||
)
|
||||
response = self.client.send(req, stream=stream)
|
||||
response.raise_for_status()
|
||||
return response
|
||||
except httpx.TimeoutException:
|
||||
raise litellm.Timeout(
|
||||
message=f"Connection timed out after {timeout} seconds.",
|
||||
model="default-model-name",
|
||||
llm_provider="litellm-httpx-handler",
|
||||
)
|
||||
except httpx.HTTPStatusError as e:
|
||||
if stream is True:
|
||||
setattr(e, "message", mask_sensitive_info(e.response.read()))
|
||||
setattr(e, "text", mask_sensitive_info(e.response.read()))
|
||||
else:
|
||||
error_text = mask_sensitive_info(e.response.text)
|
||||
setattr(e, "message", error_text)
|
||||
setattr(e, "text", error_text)
|
||||
|
||||
setattr(e, "status_code", e.response.status_code)
|
||||
|
||||
raise e
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def __del__(self) -> None:
|
||||
try:
|
||||
self.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def _create_sync_transport(self) -> Optional[HTTPTransport]:
|
||||
"""
|
||||
Create an HTTP transport with IPv4 only if litellm.force_ipv4 is True.
|
||||
Otherwise, return None.
|
||||
|
||||
Some users have seen httpx ConnectionError when using ipv6 - forcing ipv4 resolves the issue for them
|
||||
"""
|
||||
if litellm.force_ipv4:
|
||||
return HTTPTransport(local_address="0.0.0.0")
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def get_async_httpx_client(
|
||||
llm_provider: Union[LlmProviders, httpxSpecialProvider],
|
||||
params: Optional[dict] = None,
|
||||
) -> AsyncHTTPHandler:
|
||||
"""
|
||||
Retrieves the async HTTP client from the cache
|
||||
If not present, creates a new client
|
||||
|
||||
Caches the new client and returns it.
|
||||
"""
|
||||
_params_key_name = ""
|
||||
if params is not None:
|
||||
for key, value in params.items():
|
||||
try:
|
||||
_params_key_name += f"{key}_{value}"
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
_cache_key_name = "async_httpx_client" + _params_key_name + llm_provider
|
||||
_cached_client = litellm.in_memory_llm_clients_cache.get_cache(_cache_key_name)
|
||||
if _cached_client:
|
||||
return _cached_client
|
||||
|
||||
if params is not None:
|
||||
_new_client = AsyncHTTPHandler(**params)
|
||||
else:
|
||||
_new_client = AsyncHTTPHandler(
|
||||
timeout=httpx.Timeout(timeout=600.0, connect=5.0)
|
||||
)
|
||||
|
||||
litellm.in_memory_llm_clients_cache.set_cache(
|
||||
key=_cache_key_name,
|
||||
value=_new_client,
|
||||
ttl=_DEFAULT_TTL_FOR_HTTPX_CLIENTS,
|
||||
)
|
||||
return _new_client
|
||||
|
||||
|
||||
def _get_httpx_client(params: Optional[dict] = None) -> HTTPHandler:
|
||||
"""
|
||||
Retrieves the HTTP client from the cache
|
||||
If not present, creates a new client
|
||||
|
||||
Caches the new client and returns it.
|
||||
"""
|
||||
_params_key_name = ""
|
||||
if params is not None:
|
||||
for key, value in params.items():
|
||||
try:
|
||||
_params_key_name += f"{key}_{value}"
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
_cache_key_name = "httpx_client" + _params_key_name
|
||||
|
||||
_cached_client = litellm.in_memory_llm_clients_cache.get_cache(_cache_key_name)
|
||||
if _cached_client:
|
||||
return _cached_client
|
||||
|
||||
if params is not None:
|
||||
_new_client = HTTPHandler(**params)
|
||||
else:
|
||||
_new_client = HTTPHandler(timeout=httpx.Timeout(timeout=600.0, connect=5.0))
|
||||
|
||||
litellm.in_memory_llm_clients_cache.set_cache(
|
||||
key=_cache_key_name,
|
||||
value=_new_client,
|
||||
ttl=_DEFAULT_TTL_FOR_HTTPX_CLIENTS,
|
||||
)
|
||||
return _new_client
|
||||
@@ -0,0 +1,49 @@
|
||||
from typing import Optional, Union
|
||||
|
||||
import httpx
|
||||
|
||||
try:
|
||||
from litellm._version import version
|
||||
except Exception:
|
||||
version = "0.0.0"
|
||||
|
||||
headers = {
|
||||
"User-Agent": f"litellm/{version}",
|
||||
}
|
||||
|
||||
|
||||
class HTTPHandler:
|
||||
def __init__(self, concurrent_limit=1000):
|
||||
# Create a client with a connection pool
|
||||
self.client = httpx.AsyncClient(
|
||||
limits=httpx.Limits(
|
||||
max_connections=concurrent_limit,
|
||||
max_keepalive_connections=concurrent_limit,
|
||||
),
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
async def close(self):
|
||||
# Close the client when you're done with it
|
||||
await self.client.aclose()
|
||||
|
||||
async def get(
|
||||
self, url: str, params: Optional[dict] = None, headers: Optional[dict] = None
|
||||
):
|
||||
response = await self.client.get(url, params=params, headers=headers)
|
||||
return response
|
||||
|
||||
async def post(
|
||||
self,
|
||||
url: str,
|
||||
data: Optional[Union[dict, str]] = None,
|
||||
params: Optional[dict] = None,
|
||||
headers: Optional[dict] = None,
|
||||
):
|
||||
try:
|
||||
response = await self.client.post(
|
||||
url, data=data, params=params, headers=headers # type: ignore
|
||||
)
|
||||
return response
|
||||
except Exception as e:
|
||||
raise e
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user