fix(api): add error logging for agent card generation and request processing

This commit is contained in:
Davidson Gomes 2025-04-29 19:34:19 -03:00
parent 13a6247780
commit d97ddc06c9
6 changed files with 4 additions and 158 deletions

View File

@ -206,6 +206,7 @@ async def get_agent_json(
}
return AGENT_CARD
except Exception as e:
logger.error(f"Error generating agent card: {str(e)}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Error generating agent card",
@ -240,6 +241,7 @@ async def handle_task(
try:
task_request = await request.json()
except Exception as e:
logger.error(f"Error processing request: {str(e)}")
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid request format"
)

View File

@ -32,11 +32,6 @@ class Settings(BaseSettings):
LOG_LEVEL: str = os.getenv("LOG_LEVEL", "INFO")
LOG_DIR: str = "logs"
# Knowledge API settings
KNOWLEDGE_API_URL: str = os.getenv("KNOWLEDGE_API_URL", "http://localhost:5540")
KNOWLEDGE_API_KEY: str = os.getenv("KNOWLEDGE_API_KEY", "")
TENANT_ID: str = os.getenv("TENANT_ID", "")
# Redis settings
REDIS_HOST: str = os.getenv("REDIS_HOST", "localhost")
REDIS_PORT: int = int(os.getenv("REDIS_PORT", 6379))

View File

@ -1,4 +1,4 @@
from pydantic import BaseModel, Field, validator, EmailStr
from pydantic import BaseModel, Field, validator
from typing import Optional, Dict, Any, Union, List
from datetime import datetime
from uuid import UUID

View File

@ -9,157 +9,13 @@ from src.services.custom_tools import CustomToolBuilder
from src.services.mcp_service import MCPService
from sqlalchemy.orm import Session
from contextlib import AsyncExitStack
from google.adk.agents.callback_context import CallbackContext
from google.adk.models import LlmResponse, LlmRequest
from google.adk.tools import load_memory
import requests
import os
from datetime import datetime
logger = setup_logger(__name__)
def before_model_callback(
callback_context: CallbackContext, llm_request: LlmRequest
) -> Optional[LlmResponse]:
"""
Callback executed before the model generates a response.
Always executes a search in the knowledge base before proceeding.
"""
try:
agent_name = callback_context.agent_name
logger.debug(f"🔄 Before model call for agent: {agent_name}")
# Extract the last user message
last_user_message = ""
if llm_request.contents and llm_request.contents[-1].role == "user":
if llm_request.contents[-1].parts:
last_user_message = llm_request.contents[-1].parts[0].text
logger.debug(f"📝 Última mensagem do usuário: {last_user_message}")
# Extract and format the history of messages
history = []
for content in llm_request.contents:
if content.parts and content.parts[0].text:
# Replace 'model' with 'assistant' in the role
role = "assistant" if content.role == "model" else content.role
history.append(
{
"role": role,
"content": {
"type": "text",
"text": content.parts[0].text,
},
}
)
# log the history of messages
logger.debug(f"📝 History of messages: {history}")
if last_user_message:
logger.info("🔍 Executing knowledge base search")
# Execute the knowledge base search synchronously
search_results = search_knowledge_base_function_sync(
last_user_message, history
)
if search_results:
logger.info("✅ Resultados encontrados, adicionando ao contexto")
# Get the original system instruction
original_instruction = llm_request.config.system_instruction or ""
# Add the search results and history to the system context
modified_text = (
original_instruction
+ "\n\n<knowledge_context>\n"
+ str(search_results)
+ "\n</knowledge_context>\n\n<history>\n"
+ str(history)
+ "\n</history>"
)
llm_request.config.system_instruction = modified_text
logger.debug(
"📝 System instruction updated with search results and history"
)
else:
logger.warning("⚠️ No results found in the search")
else:
logger.warning("⚠️ No user message found")
logger.info("✅ before_model_callback finished")
return None
except Exception as e:
logger.error(f"❌ Error in before_model_callback: {str(e)}", exc_info=True)
return None
def search_knowledge_base_function_sync(query: str, history=[]):
"""
Search knowledge base synchronously.
Args:
query (str): The search query, with user message and history messages, all in one string
Returns:
dict: The search results
"""
try:
logger.info("🔍 Starting knowledge base search")
logger.debug(f"Received query: {query}")
# url = os.getenv("KNOWLEDGE_API_URL") + "/api/v1/search"
url = os.getenv("KNOWLEDGE_API_URL") + "/api/v1/knowledge"
tenant_id = os.getenv("TENANT_ID")
url = url + "?tenant_id=" + tenant_id
logger.debug(f"API URL: {url}")
logger.debug(f"Tenant ID: {tenant_id}")
headers = {
"x-api-key": f"{os.getenv('KNOWLEDGE_API_KEY')}",
"Content-Type": "application/json",
}
logger.debug(f"Headers configured: {headers}")
payload = {
"gemini_api_key": os.getenv("GOOGLE_API_KEY"),
"gemini_model": "gemini-2.0-flash-lite-001",
"gemini_temperature": 0.7,
"query": query,
"tenant_id": tenant_id,
"history": history,
}
logger.debug(f"Request payload: {payload}")
# Using requests to make a synchronous request with timeout
logger.info("🔄 Making synchronous request to the knowledge API")
# response = requests.post(url, headers=headers, json=payload)
response = requests.get(url, headers=headers, timeout=10)
if response.status_code == 200:
logger.info("✅ Search executed successfully")
result = response.json()
logger.debug(f"Search result: {result}")
return result
else:
logger.error(
f"❌ Error performing search. Status code: {response.status_code}"
)
return None
except requests.exceptions.Timeout:
logger.error("❌ Timeout performing search")
return None
except requests.exceptions.RequestException as e:
logger.error(f"❌ Error in request: {str(e)}", exc_info=True)
return None
except Exception as e:
logger.error(f"❌ Error performing search: {str(e)}", exc_info=True)
return None
class AgentBuilder:
def __init__(self, db: Session):
self.db = db
@ -204,7 +60,6 @@ class AgentBuilder:
# before_model_callback_func = None
if agent.config.get("load_memory"):
all_tools.append(load_memory)
# before_model_callback_func = before_model_callback
formatted_prompt = (
formatted_prompt
+ "\n\n<memory_instructions>ALWAYS use the load_memory tool to retrieve knowledge for your context</memory_instructions>\n\n"
@ -217,7 +72,6 @@ class AgentBuilder:
instruction=formatted_prompt,
description=agent.description,
tools=all_tools,
# before_model_callback=before_model_callback_func,
),
mcp_exit_stack,
)

View File

@ -1,4 +1,3 @@
import os
from sqlalchemy.orm import Session
from sqlalchemy.exc import SQLAlchemyError
from fastapi import HTTPException, status
@ -51,8 +50,6 @@ def get_agents_by_client(
agents = query.offset(skip).limit(limit).all()
# A propriedade virtual agent_card_url será automaticamente incluída
# quando os agentes forem serializados para JSON
return agents
except SQLAlchemyError as e:
logger.error(f"Error searching for client agents {client_id}: {str(e)}")
@ -146,8 +143,6 @@ def create_agent(db: Session, agent: AgentCreate) -> Agent:
db.refresh(db_agent)
logger.info(f"Agent created successfully: {db_agent.id}")
# A propriedade virtual agent_card_url será automaticamente incluída
# quando o agente for serializado para JSON
return db_agent
except SQLAlchemyError as e:
db.rollback()

View File

@ -2,7 +2,7 @@ from sqlalchemy.orm import Session
from sqlalchemy.exc import SQLAlchemyError
from fastapi import HTTPException, status
from src.models.models import MCPServer
from src.schemas.schemas import MCPServerCreate, ToolConfig
from src.schemas.schemas import MCPServerCreate
from typing import List, Optional
import uuid
import logging