diff --git a/.env b/.env
index 83e371ce..c79ed9a6 100644
--- a/.env
+++ b/.env
@@ -1,3 +1,14 @@
OPENAI_API_KEY=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
-POSTGRES_CONNECTION_STRING=postgresql://postgres:root@localhost:5432/google-a2a-saas
\ No newline at end of file
+POSTGRES_CONNECTION_STRING=postgresql://postgres:root@localhost:5432/google-a2a-saas
+
+TENANT_ID=45cffb85-51c8-41ed-aa8d-710970a7ce50
+KNOWLEDGE_API_URL=http://localhost:5540
+KNOWLEDGE_API_KEY=79405047-7a5e-4b18-b25a-4af149d747dc
+
+REDIS_HOST=localhost
+REDIS_PORT=6379
+REDIS_DB=3
+REDIS_PASSWORD=
+
+LOG_LEVEL=DEBUG
\ No newline at end of file
diff --git a/src/api/__pycache__/routes.cpython-310.pyc b/src/api/__pycache__/routes.cpython-310.pyc
index 162c691b..7fbe2fc7 100644
Binary files a/src/api/__pycache__/routes.cpython-310.pyc and b/src/api/__pycache__/routes.cpython-310.pyc differ
diff --git a/src/api/routes.py b/src/api/routes.py
index 93a5a713..4ceb0112 100644
--- a/src/api/routes.py
+++ b/src/api/routes.py
@@ -9,19 +9,19 @@ from src.schemas.schemas import (
Client, ClientCreate,
Contact, ContactCreate,
Agent, AgentCreate,
- Message, MessageCreate
)
from src.services import (
client_service,
contact_service,
agent_service,
- message_service
)
from src.schemas.chat import ChatRequest, ChatResponse, ErrorResponse
from src.services.agent_runner import run_agent
-from src.core.exceptions import AgentNotFoundError, InternalServerError
+from src.core.exceptions import AgentNotFoundError
from google.adk.artifacts.in_memory_artifact_service import InMemoryArtifactService
from google.adk.sessions import DatabaseSessionService
+from google.adk.memory import InMemoryMemoryService
+from google.adk.memory import InMemoryMemoryService
from src.config.settings import settings
router = APIRouter()
@@ -32,6 +32,7 @@ POSTGRES_CONNECTION_STRING = settings.POSTGRES_CONNECTION_STRING
# Inicializar os serviços globalmente
session_service = DatabaseSessionService(db_url=POSTGRES_CONNECTION_STRING)
artifacts_service = InMemoryArtifactService()
+memory_service = InMemoryMemoryService()
@router.post("/chat", response_model=ChatResponse, responses={
400: {"model": ErrorResponse},
@@ -46,6 +47,7 @@ async def chat(request: ChatRequest, db: Session = Depends(get_db)):
request.message,
session_service,
artifacts_service,
+ memory_service,
db
)
@@ -145,33 +147,4 @@ def update_agent(agent_id: uuid.UUID, agent: AgentCreate, db: Session = Depends(
def delete_agent(agent_id: uuid.UUID, db: Session = Depends(get_db)):
if not agent_service.delete_agent(db, agent_id):
raise HTTPException(status_code=404, detail="Agent not found")
- return {"message": "Agent deleted successfully"}
-
-# Rotas para Mensagens
-@router.post("/messages/", response_model=Message)
-def create_message(message: MessageCreate, db: Session = Depends(get_db)):
- return message_service.create_message(db, message)
-
-@router.get("/messages/{session_id}", response_model=List[Message])
-def read_messages(session_id: uuid.UUID, skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
- return message_service.get_messages_by_session(db, session_id, skip, limit)
-
-@router.get("/message/{message_id}", response_model=Message)
-def read_message(message_id: int, db: Session = Depends(get_db)):
- db_message = message_service.get_message(db, message_id)
- if db_message is None:
- raise HTTPException(status_code=404, detail="Message not found")
- return db_message
-
-@router.put("/message/{message_id}", response_model=Message)
-def update_message(message_id: int, message: MessageCreate, db: Session = Depends(get_db)):
- db_message = message_service.update_message(db, message_id, message)
- if db_message is None:
- raise HTTPException(status_code=404, detail="Message not found")
- return db_message
-
-@router.delete("/message/{message_id}")
-def delete_message(message_id: int, db: Session = Depends(get_db)):
- if not message_service.delete_message(db, message_id):
- raise HTTPException(status_code=404, detail="Message not found")
- return {"message": "Message deleted successfully"}
\ No newline at end of file
+ return {"message": "Agent deleted successfully"}
\ No newline at end of file
diff --git a/src/config/__pycache__/settings.cpython-310.pyc b/src/config/__pycache__/settings.cpython-310.pyc
index 7d671248..7689073c 100644
Binary files a/src/config/__pycache__/settings.cpython-310.pyc and b/src/config/__pycache__/settings.cpython-310.pyc differ
diff --git a/src/config/settings.py b/src/config/settings.py
index 5c97d6b1..4d5ace35 100644
--- a/src/config/settings.py
+++ b/src/config/settings.py
@@ -28,6 +28,20 @@ class Settings(BaseSettings):
LOG_LEVEL: str = os.getenv("LOG_LEVEL", "INFO")
LOG_DIR: str = "logs"
+ # Configurações da API de Conhecimento
+ KNOWLEDGE_API_URL: str = os.getenv("KNOWLEDGE_API_URL", "http://localhost:5540")
+ KNOWLEDGE_API_KEY: str = os.getenv("KNOWLEDGE_API_KEY", "")
+ TENANT_ID: str = os.getenv("TENANT_ID", "")
+
+ # Configurações do Redis
+ REDIS_HOST: str = os.getenv("REDIS_HOST", "localhost")
+ REDIS_PORT: int = int(os.getenv("REDIS_PORT", 6379))
+ REDIS_DB: int = int(os.getenv("REDIS_DB", 0))
+ REDIS_PASSWORD: Optional[str] = os.getenv("REDIS_PASSWORD")
+
+ # TTL do cache de ferramentas em segundos (1 hora)
+ TOOLS_CACHE_TTL: int = int(os.getenv("TOOLS_CACHE_TTL", 3600))
+
class Config:
env_file = ".env"
case_sensitive = True
diff --git a/src/models/__pycache__/models.cpython-310.pyc b/src/models/__pycache__/models.cpython-310.pyc
index 442793ff..4e0bb6f9 100644
Binary files a/src/models/__pycache__/models.cpython-310.pyc and b/src/models/__pycache__/models.cpython-310.pyc differ
diff --git a/src/models/models.py b/src/models/models.py
index d8e64352..de7ed71d 100644
--- a/src/models/models.py
+++ b/src/models/models.py
@@ -25,6 +25,7 @@ class Agent(Base):
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
client_id = Column(UUID(as_uuid=True), ForeignKey("clients.id", ondelete="CASCADE"))
name = Column(String, nullable=False)
+ description = Column(Text, nullable=True)
type = Column(String, nullable=False)
model = Column(String, nullable=False)
api_key = Column(String, nullable=False)
@@ -33,13 +34,4 @@ class Agent(Base):
__table_args__ = (
CheckConstraint("type IN ('llm', 'sequential', 'parallel', 'loop')", name='check_agent_type'),
- )
-
-class Message(Base):
- __tablename__ = "messages"
-
- id = Column(BigInteger, primary_key=True, autoincrement=True)
- session_id = Column(UUID(as_uuid=True))
- sender = Column(String)
- content = Column(Text)
- created_at = Column(DateTime(timezone=True), server_default=func.now())
\ No newline at end of file
+ )
\ No newline at end of file
diff --git a/src/schemas/__pycache__/schemas.cpython-310.pyc b/src/schemas/__pycache__/schemas.cpython-310.pyc
index 0921dd92..28dba9af 100644
Binary files a/src/schemas/__pycache__/schemas.cpython-310.pyc and b/src/schemas/__pycache__/schemas.cpython-310.pyc differ
diff --git a/src/schemas/schemas.py b/src/schemas/schemas.py
index 6cd318d2..270578f5 100644
--- a/src/schemas/schemas.py
+++ b/src/schemas/schemas.py
@@ -47,19 +47,4 @@ class Agent(AgentBase):
client_id: UUID
class Config:
- from_attributes = True
-
-class MessageBase(BaseModel):
- session_id: UUID
- sender: str
- content: str
-
-class MessageCreate(MessageBase):
- pass
-
-class Message(MessageBase):
- id: int
- created_at: datetime
-
- class Config:
- from_attributes = True
\ No newline at end of file
+ from_attributes = True
\ No newline at end of file
diff --git a/src/services/__pycache__/agent_builder.cpython-310.pyc b/src/services/__pycache__/agent_builder.cpython-310.pyc
index 60ef6fea..73c61dc1 100644
Binary files a/src/services/__pycache__/agent_builder.cpython-310.pyc and b/src/services/__pycache__/agent_builder.cpython-310.pyc differ
diff --git a/src/services/__pycache__/agent_runner.cpython-310.pyc b/src/services/__pycache__/agent_runner.cpython-310.pyc
index c84a348c..8362fc0e 100644
Binary files a/src/services/__pycache__/agent_runner.cpython-310.pyc and b/src/services/__pycache__/agent_runner.cpython-310.pyc differ
diff --git a/src/services/agent_builder.py b/src/services/agent_builder.py
index 5032f3c2..416d3bec 100644
--- a/src/services/agent_builder.py
+++ b/src/services/agent_builder.py
@@ -1,6 +1,7 @@
from typing import List, Optional, Tuple
from google.adk.agents.llm_agent import LlmAgent
from google.adk.agents import SequentialAgent, ParallelAgent, LoopAgent
+from google.adk.memory import InMemoryMemoryService
from google.adk.models.lite_llm import LiteLlm
from src.utils.logger import setup_logger
from src.core.exceptions import AgentNotFoundError
@@ -9,16 +10,166 @@ from src.services.custom_tools import CustomToolBuilder
from src.services.mcp_service import MCPService
from sqlalchemy.orm import Session
from contextlib import AsyncExitStack
-
+from google.adk.agents.callback_context import CallbackContext
+from google.adk.models import LlmResponse, LlmRequest
+from typing import Optional
+import logging
+import os
+import requests
+from datetime import datetime
logger = setup_logger(__name__)
+
+def before_model_callback(
+ callback_context: CallbackContext, llm_request: LlmRequest
+) -> Optional[LlmResponse]:
+ """
+ Callback executado antes do modelo gerar uma resposta.
+ Sempre executa a busca na base de conhecimento antes de prosseguir.
+ """
+ try:
+ agent_name = callback_context.agent_name
+ logger.debug(f"🔄 Before model call for agent: {agent_name}")
+
+ # Extrai a última mensagem do usuário
+ last_user_message = ""
+ if llm_request.contents and llm_request.contents[-1].role == "user":
+ if llm_request.contents[-1].parts:
+ last_user_message = llm_request.contents[-1].parts[0].text
+ logger.debug(f"📝 Última mensagem do usuário: {last_user_message}")
+
+ # Extrai e formata o histórico de mensagens
+ history = []
+ for content in llm_request.contents:
+ if content.parts and content.parts[0].text:
+ # Substitui 'model' por 'assistant' no role
+ role = "assistant" if content.role == "model" else content.role
+ history.append(
+ {
+ "role": role,
+ "content": {
+ "type": "text",
+ "text": content.parts[0].text,
+ },
+ }
+ )
+
+ # loga o histórico de mensagens
+ logger.debug(f"📝 Histórico de mensagens: {history}")
+
+ if last_user_message:
+ logger.info("🔍 Executando busca na base de conhecimento")
+ # Executa a busca na base de conhecimento de forma síncrona
+ search_results = search_knowledge_base_function_sync(
+ last_user_message, history
+ )
+
+ if search_results:
+ logger.info("✅ Resultados encontrados, adicionando ao contexto")
+
+ # Obtém a instrução original do sistema
+ original_instruction = llm_request.config.system_instruction or ""
+
+ # Adiciona os resultados da busca e o histórico ao contexto do sistema
+ modified_text = (
+ original_instruction
+ + "\n\n\n"
+ + str(search_results)
+ + "\n\n\n\n"
+ + str(history)
+ + "\n"
+ )
+ llm_request.config.system_instruction = modified_text
+
+ logger.debug(
+ f"📝 Instrução do sistema atualizada com resultados da busca e histórico"
+ )
+ else:
+ logger.warning("⚠️ Nenhum resultado encontrado na busca")
+ else:
+ logger.warning("⚠️ Nenhuma mensagem do usuário encontrada")
+
+ logger.info("✅ Before_model_callback finalizado")
+ return None
+ except Exception as e:
+ logger.error(f"❌ Erro no before_model_callback: {str(e)}", exc_info=True)
+ return None
+
+
+def search_knowledge_base_function_sync(query: str, history=[]):
+ """
+ Search knowledge base de forma síncrona.
+
+ Args:
+ query (str): The search query, with user message and history messages, all in one string
+
+ Returns:
+ dict: The search results
+ """
+ try:
+ logger.info("🔍 Iniciando busca na base de conhecimento")
+ logger.debug(f"Query recebida: {query}")
+
+ # url = os.getenv("KNOWLEDGE_API_URL") + "/api/v1/search"
+ url = os.getenv("KNOWLEDGE_API_URL") + "/api/v1/knowledge"
+ tenant_id = os.getenv("TENANT_ID")
+ url = url + "?tenant_id=" + tenant_id
+ logger.debug(f"URL da API: {url}")
+ logger.debug(f"Tenant ID: {tenant_id}")
+
+ headers = {
+ "x-api-key": f"{os.getenv('KNOWLEDGE_API_KEY')}",
+ "Content-Type": "application/json",
+ }
+ logger.debug(f"Headers configurados: {headers}")
+
+ payload = {
+ "gemini_api_key": os.getenv("GOOGLE_API_KEY"),
+ "gemini_model": "gemini-2.0-flash-lite-001",
+ "gemini_temperature": 0.7,
+ "query": query,
+ "tenant_id": tenant_id,
+ "history": history,
+ }
+
+ logger.debug(f"Payload da requisição: {payload}")
+
+ # Usando requests para fazer a requisição síncrona com timeout
+ logger.info("🔄 Fazendo requisição síncrona para a API de conhecimento")
+ # response = requests.post(url, headers=headers, json=payload)
+ response = requests.get(url, headers=headers, timeout=10)
+
+ if response.status_code == 200:
+ logger.info("✅ Busca realizada com sucesso")
+ result = response.json()
+ logger.debug(f"Resultado da busca: {result}")
+ return result
+ else:
+ logger.error(
+ f"❌ Erro ao realizar busca. Status code: {response.status_code}"
+ )
+ return None
+ except requests.exceptions.Timeout:
+ logger.error("❌ Timeout ao realizar busca na base de conhecimento")
+ return None
+ except requests.exceptions.RequestException as e:
+ logger.error(f"❌ Erro na requisição: {str(e)}", exc_info=True)
+ return None
+ except Exception as e:
+ logger.error(f"❌ Erro ao realizar busca: {str(e)}", exc_info=True)
+ return None
+
+
class AgentBuilder:
- def __init__(self, db: Session):
+ def __init__(self, db: Session, memory_service: InMemoryMemoryService):
self.db = db
self.custom_tool_builder = CustomToolBuilder()
self.mcp_service = MCPService()
+ self.memory_service = memory_service
- async def _create_llm_agent(self, agent) -> Tuple[LlmAgent, Optional[AsyncExitStack]]:
+ async def _create_llm_agent(
+ self, agent
+ ) -> Tuple[LlmAgent, Optional[AsyncExitStack]]:
"""Cria um agente LLM a partir dos dados do agente."""
# Obtém ferramentas personalizadas da configuração
custom_tools = []
@@ -34,81 +185,129 @@ class AgentBuilder:
# Combina todas as ferramentas
all_tools = custom_tools + mcp_tools
- return LlmAgent(
- name=agent.name,
- model=LiteLlm(model=agent.model, api_key=agent.api_key),
- instruction=agent.instruction,
- description=agent.config.get("description", ""),
- tools=all_tools,
- ), mcp_exit_stack
+ # Verifica se load_memory está habilitado
+ before_model_callback_func = None
+ if agent.config.get("load_memory") == True:
+ before_model_callback_func = before_model_callback
+
+ now = datetime.now()
+ current_datetime = now.strftime("%d/%m/%Y %H:%M")
+ current_day_of_week = now.strftime("%A")
+ current_date_iso = now.strftime("%Y-%m-%d")
+ current_time = now.strftime("%H:%M")
- async def _get_sub_agents(self, sub_agent_ids: List[str]) -> List[Tuple[LlmAgent, Optional[AsyncExitStack]]]:
+ # Substitui as variáveis no prompt
+ formatted_prompt = agent.instruction.format(
+ current_datetime=current_datetime,
+ current_day_of_week=current_day_of_week,
+ current_date_iso=current_date_iso,
+ current_time=current_time,
+ )
+
+ return (
+ LlmAgent(
+ name=agent.name,
+ model=LiteLlm(model=agent.model, api_key=agent.api_key),
+ instruction=formatted_prompt,
+ description=agent.description,
+ tools=all_tools,
+ before_model_callback=before_model_callback_func,
+ ),
+ mcp_exit_stack,
+ )
+
+ async def _get_sub_agents(
+ self, sub_agent_ids: List[str]
+ ) -> List[Tuple[LlmAgent, Optional[AsyncExitStack]]]:
"""Obtém e cria os sub-agentes LLM."""
sub_agents = []
for sub_agent_id in sub_agent_ids:
agent = get_agent(self.db, sub_agent_id)
-
+
if agent is None:
raise AgentNotFoundError(f"Agente com ID {sub_agent_id} não encontrado")
-
+
if agent.type != "llm":
- raise ValueError(f"Agente {agent.name} (ID: {agent.id}) não é um agente LLM")
-
+ raise ValueError(
+ f"Agente {agent.name} (ID: {agent.id}) não é um agente LLM"
+ )
+
sub_agent, exit_stack = await self._create_llm_agent(agent)
sub_agents.append((sub_agent, exit_stack))
-
+
return sub_agents
- async def build_llm_agent(self, root_agent) -> Tuple[LlmAgent, Optional[AsyncExitStack]]:
+ async def build_llm_agent(
+ self, root_agent
+ ) -> Tuple[LlmAgent, Optional[AsyncExitStack]]:
"""Constrói um agente LLM com seus sub-agentes."""
logger.info("Criando agente LLM")
-
+
sub_agents = []
if root_agent.config.get("sub_agents"):
- sub_agents_with_stacks = await self._get_sub_agents(root_agent.config.get("sub_agents"))
+ sub_agents_with_stacks = await self._get_sub_agents(
+ root_agent.config.get("sub_agents")
+ )
sub_agents = [agent for agent, _ in sub_agents_with_stacks]
-
+
root_llm_agent, exit_stack = await self._create_llm_agent(root_agent)
if sub_agents:
root_llm_agent.sub_agents = sub_agents
-
+
return root_llm_agent, exit_stack
- async def build_composite_agent(self, root_agent) -> Tuple[SequentialAgent | ParallelAgent | LoopAgent, Optional[AsyncExitStack]]:
+ async def build_composite_agent(
+ self, root_agent
+ ) -> Tuple[SequentialAgent | ParallelAgent | LoopAgent, Optional[AsyncExitStack]]:
"""Constrói um agente composto (Sequential, Parallel ou Loop) com seus sub-agentes."""
logger.info(f"Processando sub-agentes para agente {root_agent.type}")
-
- sub_agents_with_stacks = await self._get_sub_agents(root_agent.config.get("sub_agents", []))
+
+ sub_agents_with_stacks = await self._get_sub_agents(
+ root_agent.config.get("sub_agents", [])
+ )
sub_agents = [agent for agent, _ in sub_agents_with_stacks]
-
+
if root_agent.type == "sequential":
logger.info("Criando SequentialAgent")
- return SequentialAgent(
- name=root_agent.name,
- sub_agents=sub_agents,
- description=root_agent.config.get("description", ""),
- ), None
+ return (
+ SequentialAgent(
+ name=root_agent.name,
+ sub_agents=sub_agents,
+ description=root_agent.config.get("description", ""),
+ ),
+ None,
+ )
elif root_agent.type == "parallel":
logger.info("Criando ParallelAgent")
- return ParallelAgent(
- name=root_agent.name,
- sub_agents=sub_agents,
- description=root_agent.config.get("description", ""),
- ), None
+ return (
+ ParallelAgent(
+ name=root_agent.name,
+ sub_agents=sub_agents,
+ description=root_agent.config.get("description", ""),
+ ),
+ None,
+ )
elif root_agent.type == "loop":
logger.info("Criando LoopAgent")
- return LoopAgent(
- name=root_agent.name,
- sub_agents=sub_agents,
- description=root_agent.config.get("description", ""),
- max_iterations=root_agent.config.get("max_iterations", 5),
- ), None
+ return (
+ LoopAgent(
+ name=root_agent.name,
+ sub_agents=sub_agents,
+ description=root_agent.config.get("description", ""),
+ max_iterations=root_agent.config.get("max_iterations", 5),
+ ),
+ None,
+ )
else:
raise ValueError(f"Tipo de agente inválido: {root_agent.type}")
- async def build_agent(self, root_agent) -> Tuple[LlmAgent | SequentialAgent | ParallelAgent | LoopAgent, Optional[AsyncExitStack]]:
+ async def build_agent(
+ self, root_agent
+ ) -> Tuple[
+ LlmAgent | SequentialAgent | ParallelAgent | LoopAgent, Optional[AsyncExitStack]
+ ]:
"""Constrói o agente apropriado baseado no tipo do agente root."""
if root_agent.type == "llm":
return await self.build_llm_agent(root_agent)
else:
- return await self.build_composite_agent(root_agent)
\ No newline at end of file
+ return await self.build_composite_agent(root_agent)
diff --git a/src/services/agent_runner.py b/src/services/agent_runner.py
index e3e2af8c..2a88e590 100644
--- a/src/services/agent_runner.py
+++ b/src/services/agent_runner.py
@@ -6,6 +6,7 @@ from google.adk.agents import SequentialAgent, ParallelAgent, LoopAgent
from google.adk.runners import Runner
from google.genai.types import Content, Part
from google.adk.sessions import DatabaseSessionService
+from google.adk.memory import InMemoryMemoryService
from google.adk.artifacts.in_memory_artifact_service import InMemoryArtifactService
from src.utils.logger import setup_logger
from src.core.exceptions import AgentNotFoundError, InternalServerError
@@ -23,6 +24,7 @@ async def run_agent(
message: str,
session_service: DatabaseSessionService,
artifacts_service: InMemoryArtifactService,
+ memory_service: InMemoryMemoryService,
db: Session,
):
try:
@@ -40,7 +42,7 @@ async def run_agent(
raise AgentNotFoundError(f"Agente com ID {agent_id} não encontrado")
# Usando o AgentBuilder para criar o agente
- agent_builder = AgentBuilder(db)
+ agent_builder = AgentBuilder(db, memory_service)
root_agent, exit_stack = await agent_builder.build_agent(get_root_agent)
logger.info("Configurando Runner")
diff --git a/src/services/message_service.py b/src/services/message_service.py
deleted file mode 100644
index 4e80e536..00000000
--- a/src/services/message_service.py
+++ /dev/null
@@ -1,35 +0,0 @@
-from sqlalchemy.orm import Session
-from src.models.models import Message
-from src.schemas.schemas import MessageCreate
-from typing import List
-import uuid
-
-def get_message(db: Session, message_id: int) -> Message:
- return db.query(Message).filter(Message.id == message_id).first()
-
-def get_messages_by_session(db: Session, session_id: uuid.UUID, skip: int = 0, limit: int = 100) -> List[Message]:
- return db.query(Message).filter(Message.session_id == session_id).offset(skip).limit(limit).all()
-
-def create_message(db: Session, message: MessageCreate) -> Message:
- db_message = Message(**message.model_dump())
- db.add(db_message)
- db.commit()
- db.refresh(db_message)
- return db_message
-
-def update_message(db: Session, message_id: int, message: MessageCreate) -> Message:
- db_message = db.query(Message).filter(Message.id == message_id).first()
- if db_message:
- for key, value in message.model_dump().items():
- setattr(db_message, key, value)
- db.commit()
- db.refresh(db_message)
- return db_message
-
-def delete_message(db: Session, message_id: int) -> bool:
- db_message = db.query(Message).filter(Message.id == message_id).first()
- if db_message:
- db.delete(db_message)
- db.commit()
- return True
- return False
\ No newline at end of file
diff --git a/src/utils/__pycache__/logger.cpython-310.pyc b/src/utils/__pycache__/logger.cpython-310.pyc
index 2276fbac..d404ff3e 100644
Binary files a/src/utils/__pycache__/logger.cpython-310.pyc and b/src/utils/__pycache__/logger.cpython-310.pyc differ
diff --git a/src/utils/logger.py b/src/utils/logger.py
index f012078d..48e08107 100644
--- a/src/utils/logger.py
+++ b/src/utils/logger.py
@@ -1,6 +1,8 @@
import logging
+import os
import sys
from typing import Optional
+from src.config.settings import settings
class CustomFormatter(logging.Formatter):
"""Formatação personalizada para logs"""
@@ -26,25 +28,33 @@ class CustomFormatter(logging.Formatter):
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
-def setup_logger(name: str, level: Optional[int] = logging.INFO) -> logging.Logger:
+def setup_logger(name: str) -> logging.Logger:
"""
Configura um logger personalizado
Args:
name: Nome do logger
- level: Nível de log (default: INFO)
Returns:
logging.Logger: Logger configurado
"""
logger = logging.getLogger(name)
- logger.setLevel(level)
- # Evitar duplicação de handlers
- if not logger.handlers:
- # Handler para console
- console_handler = logging.StreamHandler(sys.stdout)
- console_handler.setFormatter(CustomFormatter())
- logger.addHandler(console_handler)
+ # Remove handlers existentes para evitar duplicação
+ if logger.handlers:
+ logger.handlers.clear()
+
+ # Configura o nível do logger baseado na variável de ambiente ou configuração
+ log_level = getattr(logging, os.getenv("LOG_LEVEL", settings.LOG_LEVEL).upper())
+ logger.setLevel(log_level)
+
+ # Handler para console
+ console_handler = logging.StreamHandler(sys.stdout)
+ console_handler.setFormatter(CustomFormatter())
+ console_handler.setLevel(log_level)
+ logger.addHandler(console_handler)
+
+ # Impede que os logs sejam propagados para o logger root
+ logger.propagate = False
return logger
\ No newline at end of file