From 257d50a58475c5b4e5673b5ba76ed14272f62fcb Mon Sep 17 00:00:00 2001 From: Victor Calazans Date: Fri, 16 May 2025 22:25:39 -0300 Subject: [PATCH 01/14] =?UTF-8?q?=E2=9C=A8=20feat:=20Create=20delay=20node?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/services/custom_agents/workflow_agent.py | 75 ++++++++++++++++++-- 1 file changed, 70 insertions(+), 5 deletions(-) diff --git a/src/services/custom_agents/workflow_agent.py b/src/services/custom_agents/workflow_agent.py index 97c99ba4..26d7e059 100644 --- a/src/services/custom_agents/workflow_agent.py +++ b/src/services/custom_agents/workflow_agent.py @@ -320,10 +320,9 @@ class WorkflowAgent(BaseAgent): ) ] ), - ) - ] + ) ] content = content + condition_content - + yield { "content": content, "status": "condition_evaluated", @@ -332,7 +331,7 @@ class WorkflowAgent(BaseAgent): "conversation_history": conversation_history, "session_id": session_id, } - + async def message_node_function( state: State, node_id: str, node_data: Dict[str, Any] ) -> AsyncGenerator[State, None]: @@ -365,15 +364,81 @@ class WorkflowAgent(BaseAgent): "status": "message_added", "node_outputs": node_outputs, "cycle_count": state.get("cycle_count", 0), + "conversation_history": conversation_history, "session_id": session_id, + } + + async def delay_node_function( + state: State, node_id: str, node_data: Dict[str, Any] + ) -> AsyncGenerator[State, None]: + delay_data = node_data.get("delay", {}) + delay_value = delay_data.get("value", 0) + delay_unit = delay_data.get("unit", "seconds") + delay_description = delay_data.get("description", "") + + # Convert to seconds based on unit + delay_seconds = delay_value + if delay_unit == "minutes": + delay_seconds = delay_value * 60 + elif delay_unit == "hours": + delay_seconds = delay_value * 3600 + + label = node_data.get("label", "delay_node") + print(f"\n⏱️ DELAY-NODE: {delay_value} {delay_unit} - {delay_description}") + + content = state.get("content", []) + session_id = state.get("session_id", "") + conversation_history = state.get("conversation_history", []) + + # Add a message indicating the delay + delay_message = f"Aguardando {delay_value} {delay_unit}..." + if delay_description: + delay_message += f" ({delay_description})" + + new_event = Event( + author=label, + content=Content(parts=[Part(text=delay_message)]), + ) + content = content + [new_event] + + # Store node output information + node_outputs = state.get("node_outputs", {}) + node_outputs[node_id] = { + "delay_value": delay_value, + "delay_unit": delay_unit, + "delay_seconds": delay_seconds, + "delay_start_time": datetime.now().isoformat(), + } + + # Actually perform the delay + import asyncio + await asyncio.sleep(delay_seconds) + + # Add completion message + complete_message = f"Delay de {delay_value} {delay_unit} concluído." + complete_event = Event( + author=label, + content=Content(parts=[Part(text=complete_message)]), + ) + content = content + [complete_event] + + # Update node outputs with completion information + node_outputs[node_id]["delay_end_time"] = datetime.now().isoformat() + node_outputs[node_id]["delay_completed"] = True + + yield { + "content": content, + "status": "delay_completed", + "node_outputs": node_outputs, "cycle_count": state.get("cycle_count", 0), "conversation_history": conversation_history, "session_id": session_id, } - + return { "start-node": start_node_function, "agent-node": agent_node_function, "condition-node": condition_node_function, "message-node": message_node_function, + "delay-node": delay_node_function, } def _evaluate_condition(self, condition: Dict[str, Any], state: State) -> bool: From f000a0870189e165975bfa858245387435239bd5 Mon Sep 17 00:00:00 2001 From: Arley Daniel Peter Date: Fri, 16 May 2025 22:48:35 -0300 Subject: [PATCH 02/14] Update docker-compose.yml image to correct image Updating image to correct image https://hub.docker.com/r/evoapicloud/evo-ai/tags on latest --- docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index 07334f1b..8dd9a27b 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -2,7 +2,7 @@ version: "3.8" services: api: - image: evo-ai-api:latest + image: evoapicloud/evo-ai:latest depends_on: - postgres - redis From 86258efcbdb802564f6ff2722efe8c33aacf735a Mon Sep 17 00:00:00 2001 From: Victor Calazans Date: Sat, 17 May 2025 09:17:43 -0300 Subject: [PATCH 03/14] Remove messages Remove messages --- src/services/custom_agents/workflow_agent.py | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/src/services/custom_agents/workflow_agent.py b/src/services/custom_agents/workflow_agent.py index 26d7e059..66f310e0 100644 --- a/src/services/custom_agents/workflow_agent.py +++ b/src/services/custom_agents/workflow_agent.py @@ -389,17 +389,6 @@ class WorkflowAgent(BaseAgent): session_id = state.get("session_id", "") conversation_history = state.get("conversation_history", []) - # Add a message indicating the delay - delay_message = f"Aguardando {delay_value} {delay_unit}..." - if delay_description: - delay_message += f" ({delay_description})" - - new_event = Event( - author=label, - content=Content(parts=[Part(text=delay_message)]), - ) - content = content + [new_event] - # Store node output information node_outputs = state.get("node_outputs", {}) node_outputs[node_id] = { @@ -413,13 +402,6 @@ class WorkflowAgent(BaseAgent): import asyncio await asyncio.sleep(delay_seconds) - # Add completion message - complete_message = f"Delay de {delay_value} {delay_unit} concluído." - complete_event = Event( - author=label, - content=Content(parts=[Part(text=complete_message)]), - ) - content = content + [complete_event] # Update node outputs with completion information node_outputs[node_id]["delay_end_time"] = datetime.now().isoformat() From d01644c00c89d554b7cb4625f85226127f5b1721 Mon Sep 17 00:00:00 2001 From: Victor Calazans Date: Sat, 17 May 2025 09:37:56 -0300 Subject: [PATCH 04/14] Change doc Change doc --- src/services/custom_agents/workflow_agent.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/services/custom_agents/workflow_agent.py b/src/services/custom_agents/workflow_agent.py index 66f310e0..bf7e7bc5 100644 --- a/src/services/custom_agents/workflow_agent.py +++ b/src/services/custom_agents/workflow_agent.py @@ -6,6 +6,9 @@ │ Creation date: May 13, 2025 │ │ Contact: contato@evolution-api.com │ ├──────────────────────────────────────────────────────────────────────────────┤ +│ @contributors: │ +│ Victor Calazans - delay node implementation (May 17, 2025) │ +├──────────────────────────────────────────────────────────────────────────────┤ │ @copyright © Evolution API 2025. All rights reserved. │ │ Licensed under the Apache License, Version 2.0 │ │ │ From c469bf1998ad54110d5bb540dec7a59df5ad8d8c Mon Sep 17 00:00:00 2001 From: Arley Daniel Peter Date: Sat, 17 May 2025 16:32:31 -0300 Subject: [PATCH 05/14] feat: use run_in_threadpool to fetch tools --- src/api/mcp_server_routes.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/api/mcp_server_routes.py b/src/api/mcp_server_routes.py index 143ae8ce..4f3a7447 100644 --- a/src/api/mcp_server_routes.py +++ b/src/api/mcp_server_routes.py @@ -28,6 +28,7 @@ """ from fastapi import APIRouter, Depends, HTTPException, status +from starlette.concurrency import run_in_threadpool from sqlalchemy.orm import Session from src.config.database import get_db from typing import List @@ -54,7 +55,7 @@ router = APIRouter( responses={404: {"description": "Not found"}}, ) - +# Last edited by Arley Peter on 2025-05-17 @router.post("/", response_model=MCPServer, status_code=status.HTTP_201_CREATED) async def create_mcp_server( server: MCPServerCreate, @@ -64,7 +65,7 @@ async def create_mcp_server( # Only administrators can create MCP servers await verify_admin(payload) - return mcp_server_service.create_mcp_server(db, server) + return await run_in_threadpool(mcp_server_service.create_mcp_server, db, server) @router.get("/", response_model=List[MCPServer]) From 2c7e5d05289127020e1ab50b0e259f434e4476e1 Mon Sep 17 00:00:00 2001 From: Arley Daniel Peter Date: Sat, 17 May 2025 16:33:17 -0300 Subject: [PATCH 06/14] feat: update schemas to make tools optional since they are automatically fetched, no need to make them mandatory --- src/schemas/schemas.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/schemas/schemas.py b/src/schemas/schemas.py index fcef62d2..5a3945c7 100644 --- a/src/schemas/schemas.py +++ b/src/schemas/schemas.py @@ -262,14 +262,14 @@ class ToolConfig(BaseModel): inputModes: List[str] = Field(default_factory=list) outputModes: List[str] = Field(default_factory=list) - +# Last edited by Arley Peter on 2025-05-17 class MCPServerBase(BaseModel): name: str description: Optional[str] = None config_type: str = Field(default="studio") config_json: Dict[str, Any] = Field(default_factory=dict) environments: Dict[str, Any] = Field(default_factory=dict) - tools: List[ToolConfig] = Field(default_factory=list) + tools: Optional[List[ToolConfig]] = Field(default_factory=list) type: str = Field(default="official") From b619d88d4e75902670694e1aacace24c8c5bbb39 Mon Sep 17 00:00:00 2001 From: Arley Daniel Peter Date: Sat, 17 May 2025 16:34:01 -0300 Subject: [PATCH 07/14] feat: if tools are empty, auto-fetch and save --- src/services/mcp_server_service.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/services/mcp_server_service.py b/src/services/mcp_server_service.py index 52c1703d..8fea426b 100644 --- a/src/services/mcp_server_service.py +++ b/src/services/mcp_server_service.py @@ -32,6 +32,7 @@ from sqlalchemy.exc import SQLAlchemyError from fastapi import HTTPException, status from src.models.models import MCPServer from src.schemas.schemas import MCPServerCreate +from src.utils.mcp_discovery import discover_mcp_tools from typing import List, Optional import uuid import logging @@ -72,8 +73,16 @@ def create_mcp_server(db: Session, server: MCPServerCreate) -> MCPServer: try: # Convert tools to JSON serializable format server_data = server.model_dump() - server_data["tools"] = [tool.model_dump() for tool in server.tools] + # Last edited by Arley Peter on 2025-05-17 + supplied_tools = server_data.pop("tools", []) + if not supplied_tools: + discovered = discover_mcp_tools(server_data["config_json"]) + print(f"🔍 Found {len(discovered)} tools.") + server_data["tools"] = discovered + + else: + server_data["tools"] = [tool.model_dump() for tool in supplied_tools] db_server = MCPServer(**server_data) db.add(db_server) db.commit() From 7a9d3e147708318405baf72a0b0112bfcb8e238b Mon Sep 17 00:00:00 2001 From: Arley Daniel Peter Date: Sat, 17 May 2025 16:35:34 -0300 Subject: [PATCH 08/14] feat: Add MCP tools discovery functionality - Implement async MCP server tool discovery - Add sync wrapper for tool discovery - Include tool metadata serialization - Add proper file documentation and licensing --- src/utils/mcp_discovery.py | 55 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 src/utils/mcp_discovery.py diff --git a/src/utils/mcp_discovery.py b/src/utils/mcp_discovery.py new file mode 100644 index 00000000..b45bbb78 --- /dev/null +++ b/src/utils/mcp_discovery.py @@ -0,0 +1,55 @@ +""" +┌──────────────────────────────────────────────────────────────────────────────┐ +│ @author: Arley Peter │ +│ @file: mcp_discovery.py │ +│ Developed by: Arley Peter │ +│ Creation date: May 05, 2025 │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ @copyright © Evolution API 2025. All rights reserved. │ +│ Licensed under the Apache License, Version 2.0 │ +│ │ +│ You may not use this file except in compliance with the License. │ +│ You may obtain a copy of the License at │ +│ │ +│ http://www.apache.org/licenses/LICENSE-2.0 │ +│ │ +│ Unless required by applicable law or agreed to in writing, software │ +│ distributed under the License is distributed on an "AS IS" BASIS, │ +│ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. │ +│ See the License for the specific language governing permissions and │ +│ limitations under the License. │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ @important │ +│ For any future changes to the code in this file, it is recommended to │ +│ include, together with the modification, the information of the developer │ +│ who changed it and the date of modification. │ +└──────────────────────────────────────────────────────────────────────────────┘ +""" + +from typing import List, Dict, Any +import asyncio + +async def _discover_async(config_json: Dict[str, Any]) -> List[Dict[str, Any]]: + """Return a list[dict] with the tool metadata advertised by the MCP server.""" + + from src.services.mcp_service import MCPService + + service = MCPService() + tools, exit_stack = await service._connect_to_mcp_server(config_json) + serialised = [t.to_dict() if hasattr(t, "to_dict") else { + "id": t.name, + "name": t.name, + "description": getattr(t, "description", t.name), + "tags": getattr(t, "tags", []), + "examples": getattr(t, "examples", []), + "inputModes": getattr(t, "input_modes", ["text"]), + "outputModes": getattr(t, "output_modes", ["text"]), + } for t in tools] + if exit_stack: + await exit_stack.aclose() + return serialised + + +def discover_mcp_tools(config_json: Dict[str, Any]) -> List[Dict[str, Any]]: + """Sync wrapper so we can call it from a sync service function.""" + return asyncio.run(_discover_async(config_json)) From 9135aa59d69216d3525093229536f38d2617c1f7 Mon Sep 17 00:00:00 2001 From: Guilherme Gomes Date: Mon, 19 May 2025 01:21:30 -0300 Subject: [PATCH 09/14] feat(custom_tools): URL encode path parameters and improve response handling --- src/services/custom_tools.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/services/custom_tools.py b/src/services/custom_tools.py index f8ac96b8..0adde66c 100644 --- a/src/services/custom_tools.py +++ b/src/services/custom_tools.py @@ -31,6 +31,7 @@ from typing import Any, Dict, List from google.adk.tools import FunctionTool import requests import json +import urllib.parse from src.utils.logger import setup_logger logger = setup_logger(__name__) @@ -70,7 +71,9 @@ class CustomToolBuilder: url = endpoint for param, value in path_params.items(): if param in all_values: - url = url.replace(f"{{{param}}}", str(all_values[param])) + # URL encode the value for URL safe characters + replacement_value = urllib.parse.quote(str(all_values[param]), safe='') + url = url.replace(f"{{{param}}}", replacement_value) # Process query parameters query_params_dict = {} @@ -119,8 +122,12 @@ class CustomToolBuilder: f"Error in the request: {response.status_code} - {response.text}" ) - # Always returns the response as a string - return json.dumps(response.json()) + # Try to parse the response as JSON, if it fails, return the text content + try: + return json.dumps(response.json()) + except ValueError: + # Response is not JSON, return the text content + return json.dumps({"content": response.text}) except Exception as e: logger.error(f"Error executing tool {name}: {str(e)}") From cf24a7ce5db010f21a3afeb28b2e27bb59c83d0e Mon Sep 17 00:00:00 2001 From: Davidson Gomes Date: Mon, 19 May 2025 15:22:37 -0300 Subject: [PATCH 10/14] feat(api): integrate new AI engines and update chat routes for dynamic agent handling --- pyproject.toml | 3 + src/api/chat_routes.py | 38 +- src/config/__init__.py | 3 + src/config/settings.py | 9 +- src/schemas/chat.py | 6 +- src/services/__init__.py | 2 +- src/services/a2a_task_manager.py | 13 +- .../{custom_agents => adk}/__init__.py | 0 src/services/{ => adk}/agent_builder.py | 10 +- src/services/{ => adk}/agent_runner.py | 2 +- src/services/adk/custom_agents/__init__.py | 0 .../{ => adk}/custom_agents/a2a_agent.py | 0 .../{ => adk}/custom_agents/task_agent.py | 2 +- .../{ => adk}/custom_agents/workflow_agent.py | 2 +- src/services/{ => adk}/custom_tools.py | 0 src/services/{ => adk}/mcp_service.py | 0 src/services/crewai/agent_builder.py | 219 ++++++ src/services/crewai/agent_runner.py | 595 ++++++++++++++++ src/services/crewai/custom_tool.py | 369 ++++++++++ src/services/crewai/mcp_service.py | 264 ++++++++ src/services/crewai/session_service.py | 637 ++++++++++++++++++ src/services/service_providers.py | 16 +- 22 files changed, 2153 insertions(+), 37 deletions(-) rename src/services/{custom_agents => adk}/__init__.py (100%) rename src/services/{ => adk}/agent_builder.py (98%) rename src/services/{ => adk}/agent_runner.py (99%) create mode 100644 src/services/adk/custom_agents/__init__.py rename src/services/{ => adk}/custom_agents/a2a_agent.py (100%) rename src/services/{ => adk}/custom_agents/task_agent.py (99%) rename src/services/{ => adk}/custom_agents/workflow_agent.py (99%) rename src/services/{ => adk}/custom_tools.py (100%) rename src/services/{ => adk}/mcp_service.py (100%) create mode 100644 src/services/crewai/agent_builder.py create mode 100644 src/services/crewai/agent_runner.py create mode 100644 src/services/crewai/custom_tool.py create mode 100644 src/services/crewai/mcp_service.py create mode 100644 src/services/crewai/session_service.py diff --git a/pyproject.toml b/pyproject.toml index ada85ccd..400f3969 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,6 +51,9 @@ dependencies = [ "langgraph==0.4.1", "opentelemetry-sdk==1.33.0", "opentelemetry-exporter-otlp==1.33.0", + "mcp==1.9.0", + "crewai==0.120.1", + "crewai-tools==0.45.0", ] [project.optional-dependencies] diff --git a/src/api/chat_routes.py b/src/api/chat_routes.py index bf7cd0a2..e34ab755 100644 --- a/src/api/chat_routes.py +++ b/src/api/chat_routes.py @@ -39,6 +39,7 @@ from fastapi import ( Header, ) from sqlalchemy.orm import Session +from src.config.settings import settings from src.config.database import get_db from src.core.jwt_middleware import ( get_jwt_token, @@ -49,7 +50,8 @@ from src.services import ( agent_service, ) from src.schemas.chat import ChatRequest, ChatResponse, ErrorResponse, FileData -from src.services.agent_runner import run_agent, run_agent_stream +from src.services.adk.agent_runner import run_agent as run_agent_adk, run_agent_stream +from src.services.crewai.agent_runner import run_agent as run_agent_crewai from src.core.exceptions import AgentNotFoundError from src.services.service_providers import ( session_service, @@ -262,7 +264,7 @@ async def websocket_chat( @router.post( - "", + "/{agent_id}/{external_id}", response_model=ChatResponse, responses={ 400: {"model": ErrorResponse}, @@ -272,20 +274,32 @@ async def websocket_chat( ) async def chat( request: ChatRequest, + agent_id: str, + external_id: str, _=Depends(get_agent_by_api_key), db: Session = Depends(get_db), ): try: - final_response = await run_agent( - request.agent_id, - request.external_id, - request.message, - session_service, - artifacts_service, - memory_service, - db, - files=request.files, - ) + if settings.AI_ENGINE == "adk": + final_response = await run_agent_adk( + agent_id, + external_id, + request.message, + session_service, + artifacts_service, + memory_service, + db, + files=request.files, + ) + elif settings.AI_ENGINE == "crewai": + final_response = await run_agent_crewai( + agent_id, + external_id, + request.message, + session_service, + db, + files=request.files, + ) return { "response": final_response["final_response"], diff --git a/src/config/__init__.py b/src/config/__init__.py index e69de29b..957fe205 100644 --- a/src/config/__init__.py +++ b/src/config/__init__.py @@ -0,0 +1,3 @@ +from src.config.settings import settings + +__all__ = ["settings"] diff --git a/src/config/settings.py b/src/config/settings.py index 1dbb8440..5165b1f2 100644 --- a/src/config/settings.py +++ b/src/config/settings.py @@ -57,6 +57,9 @@ class Settings(BaseSettings): "POSTGRES_CONNECTION_STRING", "postgresql://postgres:root@localhost:5432/evo_ai" ) + # AI engine settings + AI_ENGINE: str = os.getenv("AI_ENGINE", "adk") + # Logging settings LOG_LEVEL: str = os.getenv("LOG_LEVEL", "INFO") LOG_DIR: str = "logs" @@ -83,11 +86,11 @@ class Settings(BaseSettings): # Email provider settings EMAIL_PROVIDER: str = os.getenv("EMAIL_PROVIDER", "sendgrid") - + # SendGrid settings SENDGRID_API_KEY: str = os.getenv("SENDGRID_API_KEY", "") EMAIL_FROM: str = os.getenv("EMAIL_FROM", "noreply@yourdomain.com") - + # SMTP settings SMTP_HOST: str = os.getenv("SMTP_HOST", "") SMTP_PORT: int = int(os.getenv("SMTP_PORT", 587)) @@ -96,7 +99,7 @@ class Settings(BaseSettings): SMTP_USE_TLS: bool = os.getenv("SMTP_USE_TLS", "true").lower() == "true" SMTP_USE_SSL: bool = os.getenv("SMTP_USE_SSL", "false").lower() == "true" SMTP_FROM: str = os.getenv("SMTP_FROM", "") - + APP_URL: str = os.getenv("APP_URL", "http://localhost:8000") # Server settings diff --git a/src/schemas/chat.py b/src/schemas/chat.py index 6188ca7a..184427ad 100644 --- a/src/schemas/chat.py +++ b/src/schemas/chat.py @@ -43,9 +43,11 @@ class FileData(BaseModel): class ChatRequest(BaseModel): """Model to represent a chat request.""" - agent_id: str = Field(..., description="Agent ID to process the message") - external_id: str = Field(..., description="External ID for user identification") message: str = Field(..., description="User message to the agent") + agent_id: Optional[str] = Field(None, description="Agent ID to process the message") + external_id: Optional[str] = Field( + None, description="External ID for user identification" + ) files: Optional[List[FileData]] = Field( None, description="List of files attached to the message" ) diff --git a/src/services/__init__.py b/src/services/__init__.py index 255943f4..3cebda35 100644 --- a/src/services/__init__.py +++ b/src/services/__init__.py @@ -1 +1 @@ -from .agent_runner import run_agent +from .adk.agent_runner import run_agent diff --git a/src/services/a2a_task_manager.py b/src/services/a2a_task_manager.py index c37c898e..62c8e427 100644 --- a/src/services/a2a_task_manager.py +++ b/src/services/a2a_task_manager.py @@ -45,7 +45,7 @@ from src.services.agent_service import ( ) from src.services.mcp_server_service import get_mcp_server -from src.services.agent_runner import run_agent, run_agent_stream +from src.services.adk.agent_runner import run_agent, run_agent_stream from src.services.service_providers import ( session_service, artifacts_service, @@ -388,7 +388,6 @@ class A2ATaskManager: self, request: SendTaskStreamingRequest, agent: Agent ) -> AsyncIterable[SendTaskStreamingResponse]: """Processes a task in streaming mode using the specified agent.""" - # Extrair e processar arquivos da mesma forma que no método _process_task query = self._extract_user_query(request.params) try: @@ -448,21 +447,19 @@ class A2ATaskManager: ), ) - # Use os arquivos processados do _extract_user_query files = getattr(self, "_last_processed_files", None) - # Log sobre os arquivos processados if files: logger.info( - f"Streaming: Passando {len(files)} arquivos processados para run_agent_stream" + f"Streaming: Uploading {len(files)} files to run_agent_stream" ) for file_info in files: logger.info( - f"Streaming: Arquivo sendo enviado: {file_info.filename} ({file_info.content_type})" + f"Streaming: File being sent: {file_info.filename} ({file_info.content_type})" ) else: logger.warning( - "Streaming: Nenhum arquivo processado disponível para enviar ao agente" + "Streaming: No processed files available to send to the agent" ) async for chunk in run_agent_stream( @@ -473,7 +470,7 @@ class A2ATaskManager: artifacts_service=artifacts_service, memory_service=memory_service, db=self.db, - files=files, # Passar os arquivos processados para o streaming + files=files, ): try: chunk_data = json.loads(chunk) diff --git a/src/services/custom_agents/__init__.py b/src/services/adk/__init__.py similarity index 100% rename from src/services/custom_agents/__init__.py rename to src/services/adk/__init__.py diff --git a/src/services/agent_builder.py b/src/services/adk/agent_builder.py similarity index 98% rename from src/services/agent_builder.py rename to src/services/adk/agent_builder.py index 00b1b368..e99eaba0 100644 --- a/src/services/agent_builder.py +++ b/src/services/adk/agent_builder.py @@ -36,11 +36,11 @@ from src.schemas.schemas import Agent from src.utils.logger import setup_logger from src.core.exceptions import AgentNotFoundError from src.services.agent_service import get_agent -from src.services.custom_tools import CustomToolBuilder -from src.services.mcp_service import MCPService -from src.services.custom_agents.a2a_agent import A2ACustomAgent -from src.services.custom_agents.workflow_agent import WorkflowAgent -from src.services.custom_agents.task_agent import TaskAgent +from src.services.adk.custom_tools import CustomToolBuilder +from src.services.adk.mcp_service import MCPService +from src.services.adk.custom_agents.a2a_agent import A2ACustomAgent +from src.services.adk.custom_agents.workflow_agent import WorkflowAgent +from src.services.adk.custom_agents.task_agent import TaskAgent from src.services.apikey_service import get_decrypted_api_key from sqlalchemy.orm import Session from contextlib import AsyncExitStack diff --git a/src/services/agent_runner.py b/src/services/adk/agent_runner.py similarity index 99% rename from src/services/agent_runner.py rename to src/services/adk/agent_runner.py index 4e205875..ef727f5f 100644 --- a/src/services/agent_runner.py +++ b/src/services/adk/agent_runner.py @@ -35,7 +35,7 @@ from google.adk.artifacts.in_memory_artifact_service import InMemoryArtifactServ from src.utils.logger import setup_logger from src.core.exceptions import AgentNotFoundError, InternalServerError from src.services.agent_service import get_agent -from src.services.agent_builder import AgentBuilder +from src.services.adk.agent_builder import AgentBuilder from sqlalchemy.orm import Session from typing import Optional, AsyncGenerator import asyncio diff --git a/src/services/adk/custom_agents/__init__.py b/src/services/adk/custom_agents/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/services/custom_agents/a2a_agent.py b/src/services/adk/custom_agents/a2a_agent.py similarity index 100% rename from src/services/custom_agents/a2a_agent.py rename to src/services/adk/custom_agents/a2a_agent.py diff --git a/src/services/custom_agents/task_agent.py b/src/services/adk/custom_agents/task_agent.py similarity index 99% rename from src/services/custom_agents/task_agent.py rename to src/services/adk/custom_agents/task_agent.py index b2247b4b..a1329dd2 100644 --- a/src/services/custom_agents/task_agent.py +++ b/src/services/adk/custom_agents/task_agent.py @@ -162,7 +162,7 @@ class TaskAgent(BaseAgent): ), ) - from src.services.agent_builder import AgentBuilder + from src.services.adk.agent_builder import AgentBuilder print(f"Building agent in Task agent: {agent.name}") agent_builder = AgentBuilder(self.db) diff --git a/src/services/custom_agents/workflow_agent.py b/src/services/adk/custom_agents/workflow_agent.py similarity index 99% rename from src/services/custom_agents/workflow_agent.py rename to src/services/adk/custom_agents/workflow_agent.py index 97c99ba4..04256eac 100644 --- a/src/services/custom_agents/workflow_agent.py +++ b/src/services/adk/custom_agents/workflow_agent.py @@ -181,7 +181,7 @@ class WorkflowAgent(BaseAgent): return # Import moved to inside the function to avoid circular import - from src.services.agent_builder import AgentBuilder + from src.services.adk.agent_builder import AgentBuilder agent_builder = AgentBuilder(self.db) root_agent, exit_stack = await agent_builder.build_agent(agent) diff --git a/src/services/custom_tools.py b/src/services/adk/custom_tools.py similarity index 100% rename from src/services/custom_tools.py rename to src/services/adk/custom_tools.py diff --git a/src/services/mcp_service.py b/src/services/adk/mcp_service.py similarity index 100% rename from src/services/mcp_service.py rename to src/services/adk/mcp_service.py diff --git a/src/services/crewai/agent_builder.py b/src/services/crewai/agent_builder.py new file mode 100644 index 00000000..d9a3b685 --- /dev/null +++ b/src/services/crewai/agent_builder.py @@ -0,0 +1,219 @@ +""" +┌──────────────────────────────────────────────────────────────────────────────┐ +│ @author: Davidson Gomes │ +│ @file: agent_builder.py │ +│ Developed by: Davidson Gomes │ +│ Creation date: May 13, 2025 │ +│ Contact: contato@evolution-api.com │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ @copyright © Evolution API 2025. All rights reserved. │ +│ Licensed under the Apache License, Version 2.0 │ +│ │ +│ You may not use this file except in compliance with the License. │ +│ You may obtain a copy of the License at │ +│ │ +│ http://www.apache.org/licenses/LICENSE-2.0 │ +│ │ +│ Unless required by applicable law or agreed to in writing, software │ +│ distributed under the License is distributed on an "AS IS" BASIS, │ +│ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. │ +│ See the License for the specific language governing permissions and │ +│ limitations under the License. │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ @important │ +│ For any future changes to the code in this file, it is recommended to │ +│ include, together with the modification, the information of the developer │ +│ who changed it and the date of modification. │ +└──────────────────────────────────────────────────────────────────────────────┘ +""" + +from typing import List, Tuple, Optional +from src.schemas.schemas import Agent +from src.schemas.agent_config import AgentTask +from src.services.crewai.custom_tool import CustomToolBuilder +from src.services.crewai.mcp_service import MCPService +from src.utils.logger import setup_logger +from src.services.apikey_service import get_decrypted_api_key +from sqlalchemy.orm import Session +from contextlib import AsyncExitStack +from crewai import LLM, Agent as LlmAgent, Crew, Task, Process + +from datetime import datetime +import uuid + +logger = setup_logger(__name__) + + +class AgentBuilder: + def __init__(self, db: Session): + self.db = db + self.custom_tool_builder = CustomToolBuilder() + self.mcp_service = MCPService() + + async def _get_api_key(self, agent: Agent) -> str: + """Get the API key for the agent.""" + api_key = None + + # Get API key from api_key_id + if hasattr(agent, "api_key_id") and agent.api_key_id: + if decrypted_key := get_decrypted_api_key(self.db, agent.api_key_id): + logger.info(f"Using stored API key for agent {agent.name}") + api_key = decrypted_key + else: + logger.error(f"Stored API key not found for agent {agent.name}") + raise ValueError( + f"API key with ID {agent.api_key_id} not found or inactive" + ) + else: + # Check if there is an API key in the config (temporary field) + config_api_key = agent.config.get("api_key") if agent.config else None + if config_api_key: + logger.info(f"Using config API key for agent {agent.name}") + # Check if it is a UUID of a stored key + try: + key_id = uuid.UUID(config_api_key) + if decrypted_key := get_decrypted_api_key(self.db, key_id): + logger.info("Config API key is a valid reference") + api_key = decrypted_key + else: + # Use the key directly + api_key = config_api_key + except (ValueError, TypeError): + # It is not a UUID, use directly + api_key = config_api_key + else: + logger.error(f"No API key configured for agent {agent.name}") + raise ValueError( + f"Agent {agent.name} does not have a configured API key" + ) + + return api_key + + async def _create_llm(self, agent: Agent) -> LLM: + """Create an LLM from the agent data.""" + api_key = await self._get_api_key(agent) + + return LLM(model=agent.model, api_key=api_key) + + async def _create_llm_agent( + self, agent: Agent, enabled_tools: List[str] = [] + ) -> Tuple[LlmAgent, Optional[AsyncExitStack]]: + """Create an LLM agent from the agent data.""" + # Get custom tools from the configuration + custom_tools = [] + custom_tools = self.custom_tool_builder.build_tools(agent.config) + + # # Get MCP tools from the configuration + mcp_tools = [] + mcp_exit_stack = None + if agent.config.get("mcp_servers") or agent.config.get("custom_mcp_servers"): + try: + mcp_tools, mcp_exit_stack = await self.mcp_service.build_tools( + agent.config, self.db + ) + except Exception as e: + logger.error(f"Error building MCP tools: {e}") + # Continue without MCP tools + mcp_tools = [] + mcp_exit_stack = None + + # # Get agent tools + # agent_tools = await self._agent_tools_builder(agent) + + # Combine all tools + all_tools = custom_tools + mcp_tools + + if enabled_tools: + all_tools = [tool for tool in all_tools if tool.name in enabled_tools] + logger.info(f"Enabled tools enabled. Total tools: {len(all_tools)}") + + now = datetime.now() + current_datetime = now.strftime("%d/%m/%Y %H:%M") + current_day_of_week = now.strftime("%A") + current_date_iso = now.strftime("%Y-%m-%d") + current_time = now.strftime("%H:%M") + + # Substitute variables in the prompt + formatted_prompt = agent.instruction.format( + current_datetime=current_datetime, + current_day_of_week=current_day_of_week, + current_date_iso=current_date_iso, + current_time=current_time, + ) + + llm_agent = LlmAgent( + role=agent.role, + goal=agent.goal, + backstory=formatted_prompt, + llm=await self._create_llm(agent), + tools=all_tools, + verbose=True, + cache=True, + # memory=True, + ) + + return llm_agent, mcp_exit_stack + + async def _create_tasks( + self, agent: LlmAgent, tasks: List[AgentTask] = [] + ) -> List[Task]: + """Create tasks from the agent data.""" + tasks_list = [] + if tasks: + tasks_list.extend( + Task( + name=task.name, + description=task.description, + expected_output=task.expected_output, + agent=agent, + verbose=True, + ) + for task in tasks + ) + return tasks_list + + async def build_crew(self, agents: List[LlmAgent], tasks: List[Task] = []) -> Crew: + """Create a crew from the agent data.""" + return Crew( + agents=agents, + tasks=tasks, + process=Process.sequential, + verbose=True, + ) + + async def build_llm_agent( + self, root_agent, enabled_tools: List[str] = [] + ) -> Tuple[LlmAgent, Optional[AsyncExitStack]]: + """Build an LLM agent with its sub-agents.""" + logger.info("Creating LLM agent") + + try: + result = await self._create_llm_agent(root_agent, enabled_tools) + + if isinstance(result, tuple) and len(result) == 2: + return result + else: + return result, None + except Exception as e: + logger.error(f"Error in build_llm_agent: {e}") + raise + + async def build_agent( + self, root_agent, enabled_tools: List[str] = [] + ) -> Tuple[LlmAgent, Optional[AsyncExitStack]]: + """Build the appropriate agent based on the type of the root agent.""" + if root_agent.type == "llm": + agent, exit_stack = await self.build_llm_agent(root_agent, enabled_tools) + return agent, exit_stack + elif root_agent.type == "a2a": + raise ValueError("A2A agents are not supported yet") + # return await self.build_a2a_agent(root_agent) + elif root_agent.type == "workflow": + raise ValueError("Workflow agents are not supported yet") + # return await self.build_workflow_agent(root_agent) + elif root_agent.type == "task": + raise ValueError("Task agents are not supported yet") + # return await self.build_task_agent(root_agent) + else: + raise ValueError(f"Invalid agent type: {root_agent.type}") + # return await self.build_composite_agent(root_agent) diff --git a/src/services/crewai/agent_runner.py b/src/services/crewai/agent_runner.py new file mode 100644 index 00000000..e3855d16 --- /dev/null +++ b/src/services/crewai/agent_runner.py @@ -0,0 +1,595 @@ +""" +┌──────────────────────────────────────────────────────────────────────────────┐ +│ @author: Davidson Gomes │ +│ @file: agent_runner.py │ +│ Developed by: Davidson Gomes │ +│ Creation date: May 13, 2025 │ +│ Contact: contato@evolution-api.com │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ @copyright © Evolution API 2025. All rights reserved. │ +│ Licensed under the Apache License, Version 2.0 │ +│ │ +│ You may not use this file except in compliance with the License. │ +│ You may obtain a copy of the License at │ +│ │ +│ http://www.apache.org/licenses/LICENSE-2.0 │ +│ │ +│ Unless required by applicable law or agreed to in writing, software │ +│ distributed under the License is distributed on an "AS IS" BASIS, │ +│ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. │ +│ See the License for the specific language governing permissions and │ +│ limitations under the License. │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ @important │ +│ For any future changes to the code in this file, it is recommended to │ +│ include, together with the modification, the information of the developer │ +│ who changed it and the date of modification. │ +└──────────────────────────────────────────────────────────────────────────────┘ +""" + +from crewai import Crew, Task, Agent as LlmAgent +from src.services.crewai.session_service import ( + CrewSessionService, + Event, + Content, + Part, + Session, +) +from src.services.crewai.agent_builder import AgentBuilder +from src.utils.logger import setup_logger +from src.core.exceptions import AgentNotFoundError, InternalServerError +from src.services.agent_service import get_agent +from sqlalchemy.orm import Session +from typing import Optional, AsyncGenerator +import asyncio +import json +from datetime import datetime +from src.utils.otel import get_tracer +from opentelemetry import trace +import base64 + +logger = setup_logger(__name__) + + +def extract_text_from_output(crew_output): + """Extract text from CrewOutput object.""" + if hasattr(crew_output, "raw") and crew_output.raw: + return crew_output.raw + elif hasattr(crew_output, "__str__"): + return str(crew_output) + + # Fallback if no text found + return "Unable to extract a valid response." + + +async def run_agent( + agent_id: str, + external_id: str, + message: str, + session_service: CrewSessionService, + db: Session, + session_id: Optional[str] = None, + timeout: float = 60.0, + files: Optional[list] = None, +): + tracer = get_tracer() + with tracer.start_as_current_span( + "run_agent", + attributes={ + "agent_id": agent_id, + "external_id": external_id, + "session_id": session_id or f"{external_id}_{agent_id}", + "message": message, + "has_files": files is not None and len(files) > 0, + }, + ): + exit_stack = None + try: + logger.info( + f"Starting execution of agent {agent_id} for external_id {external_id}" + ) + logger.info(f"Received message: {message}") + + if files and len(files) > 0: + logger.info(f"Received {len(files)} files with message") + + get_root_agent = get_agent(db, agent_id) + logger.info( + f"Root agent found: {get_root_agent.name} (type: {get_root_agent.type})" + ) + + if get_root_agent is None: + raise AgentNotFoundError(f"Agent with ID {agent_id} not found") + + # Using the AgentBuilder to create the agent + agent_builder = AgentBuilder(db) + result = await agent_builder.build_agent(get_root_agent) + + # Check how the result is structured + if isinstance(result, tuple) and len(result) == 2: + root_agent, exit_stack = result + else: + # If the result is not a tuple of 2 elements + root_agent = result + exit_stack = None + logger.warning("build_agent did not return an exit_stack") + + # TODO: files should be processed here + + # Fetch session information + crew_session_id = f"{external_id}_{agent_id}" + if session_id is None: + session_id = crew_session_id + + logger.info(f"Searching session for external_id {external_id}") + try: + session = session_service.get_session( + agent_id=agent_id, + external_id=external_id, + session_id=crew_session_id, + ) + except Exception as e: + logger.warning(f"Error getting session: {str(e)}") + session = None + + if session is None: + logger.info(f"Creating new session for external_id {external_id}") + session = session_service.create_session( + agent_id=agent_id, + external_id=external_id, + session_id=crew_session_id, + ) + + # Add user message to session + session.events.append( + Event( + author="user", + content=Content(parts=[{"text": message}]), + timestamp=datetime.now().timestamp(), + ) + ) + + # Save session to database + session_service.save_session(session) + + # Build message history for context + conversation_history = [] + if session and session.events: + for event in session.events: + if event.author and event.content and event.content.parts: + for part in event.content.parts: + if isinstance(part, dict) and "text" in part: + role = "User" if event.author == "user" else "Assistant" + conversation_history.append(f"{role}: {part['text']}") + + # Build description with history as context + task_description = ( + f"Conversation history:\n" + "\n".join(conversation_history) + if conversation_history + else "" + ) + task_description += f"\n\nCurrent user message: {message}" + + task = Task( + name="resolve_user_request", + description=task_description, + expected_output="Response to the user request", + agent=root_agent, + verbose=True, + ) + + crew = await agent_builder.build_crew([root_agent], [task]) + + # Use normal kickoff or kickoff_async instead of kickoff_for_each + if hasattr(crew, "kickoff_async"): + crew_output = await crew.kickoff_async(inputs={"message": message}) + else: + loop = asyncio.get_event_loop() + crew_output = await loop.run_in_executor( + None, lambda: crew.kickoff(inputs={"message": message}) + ) + + # Extract response and add to session + final_text = extract_text_from_output(crew_output) + + # Add agent response as event in session + session.events.append( + Event( + author=get_root_agent.name, + content=Content(parts=[{"text": final_text}]), + timestamp=datetime.now().timestamp(), + ) + ) + + # Save session with new event + session_service.save_session(session) + + logger.info("Starting agent execution") + + final_response_text = "No final response captured." + message_history = [] + + try: + response_queue = asyncio.Queue() + execution_completed = asyncio.Event() + + async def process_events(): + try: + # Log the result + logger.info(f"Crew output: {crew_output}") + + # Signal that execution is complete + execution_completed.set() + + # Extract text from CrewOutput object + final_text = "Unable to extract a valid response." + + if hasattr(crew_output, "raw") and crew_output.raw: + final_text = crew_output.raw + elif hasattr(crew_output, "__str__"): + final_text = str(crew_output) + + # If still empty or None, check crew artifacts + if not final_text or final_text.strip() == "": + # Try to get from agent messages + if hasattr(root_agent, "messages") and root_agent.messages: + # Get the last message from the agent + for msg in reversed(root_agent.messages): + if hasattr(msg, "content") and msg.content: + final_text = msg.content + break + + # If still empty, use a fallback + if not final_text or final_text.strip() == "": + final_text = "The agent could not produce a valid response. Please try again with a different question." + + # Put the extracted text in the queue + await response_queue.put(final_text) + except Exception as e: + logger.error(f"Error in process_events: {str(e)}") + # Provide a more helpful error response + error_response = f"An error occurred during processing: {str(e)}\n\nIf you are trying to use external tools such as Brave Search, please make sure the connection is working properly." + await response_queue.put(error_response) + execution_completed.set() + + task = asyncio.create_task(process_events()) + + try: + wait_task = asyncio.create_task(execution_completed.wait()) + done, pending = await asyncio.wait({wait_task}, timeout=timeout) + + for p in pending: + p.cancel() + + if not execution_completed.is_set(): + logger.warning( + f"Agent execution timed out after {timeout} seconds" + ) + await response_queue.put( + "The response took too long and was interrupted." + ) + + final_response_text = await response_queue.get() + + except Exception as e: + logger.error(f"Error waiting for response: {str(e)}") + final_response_text = f"Error processing response: {str(e)}" + + # Add the session to memory after completion + # completed_session = session_service.get_session( + # app_name=agent_id, + # user_id=external_id, + # session_id=crew_session_id, + # ) + + # memory_service.add_session_to_memory(completed_session) + + # Cancel the processing task if it is still running + if not task.done(): + task.cancel() + try: + await task + except asyncio.CancelledError: + logger.info("Task cancelled successfully") + except Exception as e: + logger.error(f"Error cancelling task: {str(e)}") + + except Exception as e: + logger.error(f"Error processing request: {str(e)}") + raise InternalServerError(str(e)) from e + + logger.info("Agent execution completed successfully") + return { + "final_response": final_response_text, + "message_history": message_history, + } + except AgentNotFoundError as e: + logger.error(f"Error processing request: {str(e)}") + raise e + except Exception as e: + logger.error(f"Internal error processing request: {str(e)}", exc_info=True) + raise InternalServerError(str(e)) + finally: + # Clean up MCP connection - MUST be executed in the same task + if exit_stack: + logger.info("Closing MCP server connection...") + try: + if hasattr(exit_stack, "aclose"): + # If it's an AsyncExitStack + await exit_stack.aclose() + elif isinstance(exit_stack, list): + # If it's a list of adapters + for adapter in exit_stack: + if hasattr(adapter, "close"): + adapter.close() + except Exception as e: + logger.error(f"Error closing MCP connection: {e}") + # Do not raise the exception to not obscure the original error + + +def convert_sets(obj): + if isinstance(obj, set): + return list(obj) + elif isinstance(obj, dict): + return {k: convert_sets(v) for k, v in obj.items()} + elif isinstance(obj, list): + return [convert_sets(i) for i in obj] + else: + return obj + + +async def run_agent_stream( + agent_id: str, + external_id: str, + message: str, + db: Session, + session_id: Optional[str] = None, + files: Optional[list] = None, +) -> AsyncGenerator[str, None]: + tracer = get_tracer() + span = tracer.start_span( + "run_agent_stream", + attributes={ + "agent_id": agent_id, + "external_id": external_id, + "session_id": session_id or f"{external_id}_{agent_id}", + "message": message, + "has_files": files is not None and len(files) > 0, + }, + ) + exit_stack = None + try: + with trace.use_span(span, end_on_exit=True): + try: + logger.info( + f"Starting streaming execution of agent {agent_id} for external_id {external_id}" + ) + logger.info(f"Received message: {message}") + + if files and len(files) > 0: + logger.info(f"Received {len(files)} files with message") + + get_root_agent = get_agent(db, agent_id) + logger.info( + f"Root agent found: {get_root_agent.name} (type: {get_root_agent.type})" + ) + + if get_root_agent is None: + raise AgentNotFoundError(f"Agent with ID {agent_id} not found") + + # Using the AgentBuilder to create the agent + agent_builder = AgentBuilder(db) + result = await agent_builder.build_agent(get_root_agent) + + # Check how the result is structured + if isinstance(result, tuple) and len(result) == 2: + root_agent, exit_stack = result + else: + # If the result is not a tuple of 2 elements + root_agent = result + exit_stack = None + logger.warning("build_agent did not return an exit_stack") + + # TODO: files should be processed here + + # Fetch session history if available + session_id = f"{external_id}_{agent_id}" + + # Create an instance of the session service + try: + from src.config.settings import get_settings + + settings = get_settings() + db_url = settings.DATABASE_URL + except ImportError: + # Fallback to local SQLite if cannot import settings + db_url = "sqlite:///data/crew_sessions.db" + + session_service = CrewSessionService(db_url) + + try: + # Try to get existing session + session = session_service.get_session( + agent_id=agent_id, + external_id=external_id, + session_id=session_id, + ) + except Exception as e: + logger.warning(f"Could not load session: {e}") + session = None + + # Build message history for context + conversation_history = [] + + if session and session.events: + for event in session.events: + if event.author and event.content and event.content.parts: + for part in event.content.parts: + if isinstance(part, dict) and "text" in part: + role = ( + "User" + if event.author == "user" + else "Assistant" + ) + conversation_history.append( + f"{role}: {part['text']}" + ) + + # Build description with history + task_description = ( + f"Conversation history:\n" + "\n".join(conversation_history) + if conversation_history + else "" + ) + task_description += f"\n\nCurrent user message: {message}" + + task = Task( + name="resolve_user_request", + description=task_description, + expected_output="Response to the user request", + agent=root_agent, + verbose=True, + ) + + crew = await agent_builder.build_crew([root_agent], [task]) + + logger.info("Starting agent streaming execution") + + try: + # Check if we can process messages with kickoff_for_each + if hasattr(crew, "kickoff_for_each"): + # Create input with current message + inputs = [{"message": message}] + logger.info( + f"Using kickoff_for_each for streaming with {len(inputs)} input(s)" + ) + + # Execute kickoff_for_each + results = crew.kickoff_for_each(inputs=inputs) + + # Print results and save to session + for i, result in enumerate(results): + logger.info(f"Result of event {i+1}: {result}") + + # If we have a session, save the response to it + if session: + # Add agent response as event + session.events.append( + Event( + author="agent", + content=Content(parts=[{"text": result}]), + timestamp=datetime.now().timestamp(), + ) + ) + + # Save current session with new message + if session: + # Also add user message if it doesn't exist yet + if not any( + e.author == "user" + and any( + p.get("text") == message for p in e.content.parts + ) + for e in session.events + if e.content and e.content.parts + ): + session.events.append( + Event( + author="user", + content=Content(parts=[{"text": message}]), + timestamp=datetime.now().timestamp(), + ) + ) + # Save session + try: + session_service.save_session(session) + logger.info(f"Session saved successfully: {session_id}") + except Exception as e: + logger.error(f"Error saving session: {e}") + + # Use last result as final output + crew_output = results[-1] if results else None + else: + # CrewAI kickoff method is synchronous, fallback if kickoff_for_each not available + logger.info( + "kickoff_for_each not available, using standard kickoff for streaming" + ) + crew_output = crew.kickoff() + + logger.info(f"Crew output: {crew_output}") + + # Extract the actual text content + if hasattr(crew_output, "raw") and crew_output.raw: + final_output = crew_output.raw + elif hasattr(crew_output, "__str__"): + final_output = str(crew_output) + else: + final_output = "Could not extract text from response" + + # Save response to session (for fallback case of normal kickoff) + if session and not hasattr(crew, "kickoff_for_each"): + # Add agent response + session.events.append( + Event( + author="agent", + content=Content(parts=[{"text": final_output}]), + timestamp=datetime.now().timestamp(), + ) + ) + + # Add user message if it doesn't exist yet + if not any( + e.author == "user" + and any(p.get("text") == message for p in e.content.parts) + for e in session.events + if e.content and e.content.parts + ): + session.events.append( + Event( + author="user", + content=Content(parts=[{"text": message}]), + timestamp=datetime.now().timestamp(), + ) + ) + + # Save session + try: + session_service.save_session(session) + logger.info( + f"Session saved successfully (method: kickoff): {session_id}" + ) + except Exception as e: + logger.error(f"Error saving session: {e}") + + yield json.dumps({"text": final_output}) + except Exception as e: + logger.error(f"Error processing request: {str(e)}") + raise InternalServerError(str(e)) from e + finally: + # Clean up MCP connection + if exit_stack: + logger.info("Closing MCP server connection...") + try: + if hasattr(exit_stack, "aclose"): + # If it's an AsyncExitStack + await exit_stack.aclose() + elif isinstance(exit_stack, list): + # If it's a list of adapters + for adapter in exit_stack: + if hasattr(adapter, "close"): + adapter.close() + except Exception as e: + logger.error(f"Error closing MCP connection: {e}") + # Do not raise the exception to not obscure the original error + + logger.info("Agent streaming execution completed successfully") + except AgentNotFoundError as e: + logger.error(f"Error processing request: {str(e)}") + raise InternalServerError(str(e)) from e + except Exception as e: + logger.error( + f"Internal error processing request: {str(e)}", exc_info=True + ) + raise InternalServerError(str(e)) + finally: + span.end() diff --git a/src/services/crewai/custom_tool.py b/src/services/crewai/custom_tool.py new file mode 100644 index 00000000..0a5d15ec --- /dev/null +++ b/src/services/crewai/custom_tool.py @@ -0,0 +1,369 @@ +""" +┌──────────────────────────────────────────────────────────────────────────────┐ +│ @author: Davidson Gomes │ +│ @file: custom_tool.py │ +│ Developed by: Davidson Gomes │ +│ Creation date: May 13, 2025 │ +│ Contact: contato@evolution-api.com │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ @copyright © Evolution API 2025. All rights reserved. │ +│ Licensed under the Apache License, Version 2.0 │ +│ │ +│ You may not use this file except in compliance with the License. │ +│ You may obtain a copy of the License at │ +│ │ +│ http://www.apache.org/licenses/LICENSE-2.0 │ +│ │ +│ Unless required by applicable law or agreed to in writing, software │ +│ distributed under the License is distributed on an "AS IS" BASIS, │ +│ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. │ +│ See the License for the specific language governing permissions and │ +│ limitations under the License. │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ @important │ +│ For any future changes to the code in this file, it is recommended to │ +│ include, together with the modification, the information of the developer │ +│ who changed it and the date of modification. │ +└──────────────────────────────────────────────────────────────────────────────┘ +""" + +from typing import Any, Dict, List, Type +from crewai.tools import BaseTool, tool +import requests +import json +from src.utils.logger import setup_logger +from pydantic import BaseModel, Field, create_model + +logger = setup_logger(__name__) + + +class CustomToolBuilder: + def __init__(self): + self.tools = [] + + def _create_http_tool(self, tool_config: Dict[str, Any]) -> BaseTool: + """Create an HTTP tool based on the provided configuration.""" + # Extract configuration parameters + name = tool_config["name"] + description = tool_config["description"] + endpoint = tool_config["endpoint"] + method = tool_config["method"] + headers = tool_config.get("headers", {}) + parameters = tool_config.get("parameters", {}) or {} + values = tool_config.get("values", {}) + error_handling = tool_config.get("error_handling", {}) + + path_params = parameters.get("path_params") or {} + query_params = parameters.get("query_params") or {} + body_params = parameters.get("body_params") or {} + + # Dynamic creation of the input schema for the tool + field_definitions = {} + + # Add all parameters as fields + for param in ( + list(path_params.keys()) + + list(query_params.keys()) + + list(body_params.keys()) + ): + # Default to string type for all parameters + field_definitions[param] = ( + str, + Field(..., description=f"Parameter {param}"), + ) + + # If there are no parameters but default values, use those as optional fields + if not field_definitions and values: + for param, value in values.items(): + param_type = type(value) + field_definitions[param] = ( + param_type, + Field(default=value, description=f"Parameter {param}"), + ) + + # Create dynamic input schema model in line with the documentation + tool_input_model = create_model( + f"{name.replace(' ', '')}Input", **field_definitions + ) + + # Create the HTTP tool using crewai's BaseTool class + # Following the pattern in the documentation + def create_http_tool_class(): + # Capture variables from outer scope + _name = name + _description = description + _tool_input_model = tool_input_model + + class HttpTool(BaseTool): + name: str = _name + description: str = _description + args_schema: Type[BaseModel] = _tool_input_model + + def _run(self, **kwargs): + """Execute the HTTP request and return the result.""" + try: + # Combines default values with provided values + all_values = {**values, **kwargs} + + # Substitutes placeholders in headers + processed_headers = { + k: v.format(**all_values) if isinstance(v, str) else v + for k, v in headers.items() + } + + # Processes path parameters + url = endpoint + for param, value in path_params.items(): + if param in all_values: + url = url.replace( + f"{{{param}}}", str(all_values[param]) + ) + + # Process query parameters + query_params_dict = {} + for param, value in query_params.items(): + if isinstance(value, list): + # If the value is a list, join with comma + query_params_dict[param] = ",".join(value) + elif param in all_values: + # If the parameter is in the values, use the provided value + query_params_dict[param] = all_values[param] + else: + # Otherwise, use the default value from the configuration + query_params_dict[param] = value + + # Adds default values to query params if they are not present + for param, value in values.items(): + if ( + param not in query_params_dict + and param not in path_params + ): + query_params_dict[param] = value + + body_data = {} + for param, param_config in body_params.items(): + if param in all_values: + body_data[param] = all_values[param] + + # Adds default values to body if they are not present + for param, value in values.items(): + if ( + param not in body_data + and param not in query_params_dict + and param not in path_params + ): + body_data[param] = value + + # Makes the HTTP request + response = requests.request( + method=method, + url=url, + headers=processed_headers, + params=query_params_dict, + json=body_data or None, + timeout=error_handling.get("timeout", 30), + ) + + if response.status_code >= 400: + raise requests.exceptions.HTTPError( + f"Error in the request: {response.status_code} - {response.text}" + ) + + # Always returns the response as a string + return json.dumps(response.json()) + + except Exception as e: + logger.error(f"Error executing tool {name}: {str(e)}") + return json.dumps( + error_handling.get( + "fallback_response", + {"error": "tool_execution_error", "message": str(e)}, + ) + ) + + return HttpTool + + # Create the tool instance + HttpToolClass = create_http_tool_class() + http_tool = HttpToolClass() + + # Add cache function following the documentation + def http_cache_function(arguments: dict, result: str) -> bool: + """Determines whether to cache the result based on arguments and result.""" + # Default implementation: cache all successful results + try: + # If the result is parseable JSON and not an error, cache it + result_obj = json.loads(result) + return not (isinstance(result_obj, dict) and "error" in result_obj) + except Exception: + # If result is not valid JSON, don't cache + return False + + # Assign the cache function to the tool + http_tool.cache_function = http_cache_function + + return http_tool + + def _create_http_tool_with_decorator(self, tool_config: Dict[str, Any]) -> Any: + """Create an HTTP tool using the tool decorator.""" + # Extract configuration parameters + name = tool_config["name"] + description = tool_config["description"] + endpoint = tool_config["endpoint"] + method = tool_config["method"] + headers = tool_config.get("headers", {}) + parameters = tool_config.get("parameters", {}) or {} + values = tool_config.get("values", {}) + error_handling = tool_config.get("error_handling", {}) + + path_params = parameters.get("path_params") or {} + query_params = parameters.get("query_params") or {} + body_params = parameters.get("body_params") or {} + + # Create function docstring with parameter documentation + param_list = ( + list(path_params.keys()) + + list(query_params.keys()) + + list(body_params.keys()) + ) + doc_params = [] + for param in param_list: + doc_params.append(f" {param}: Parameter description") + + docstring = ( + f"{description}\n\nParameters:\n" + + "\n".join(doc_params) + + "\n\nReturns:\n String containing the response in JSON format" + ) + + # Create the tool function using the decorator pattern in the documentation + @tool(name=name) + def http_tool(**kwargs): + """Tool function created dynamically.""" + try: + # Combines default values with provided values + all_values = {**values, **kwargs} + + # Substitutes placeholders in headers + processed_headers = { + k: v.format(**all_values) if isinstance(v, str) else v + for k, v in headers.items() + } + + # Processes path parameters + url = endpoint + for param, value in path_params.items(): + if param in all_values: + url = url.replace(f"{{{param}}}", str(all_values[param])) + + # Process query parameters + query_params_dict = {} + for param, value in query_params.items(): + if isinstance(value, list): + # If the value is a list, join with comma + query_params_dict[param] = ",".join(value) + elif param in all_values: + # If the parameter is in the values, use the provided value + query_params_dict[param] = all_values[param] + else: + # Otherwise, use the default value from the configuration + query_params_dict[param] = value + + # Adds default values to query params if they are not present + for param, value in values.items(): + if param not in query_params_dict and param not in path_params: + query_params_dict[param] = value + + body_data = {} + for param, param_config in body_params.items(): + if param in all_values: + body_data[param] = all_values[param] + + # Adds default values to body if they are not present + for param, value in values.items(): + if ( + param not in body_data + and param not in query_params_dict + and param not in path_params + ): + body_data[param] = value + + # Makes the HTTP request + response = requests.request( + method=method, + url=url, + headers=processed_headers, + params=query_params_dict, + json=body_data or None, + timeout=error_handling.get("timeout", 30), + ) + + if response.status_code >= 400: + raise requests.exceptions.HTTPError( + f"Error in the request: {response.status_code} - {response.text}" + ) + + # Always returns the response as a string + return json.dumps(response.json()) + + except Exception as e: + logger.error(f"Error executing tool {name}: {str(e)}") + return json.dumps( + error_handling.get( + "fallback_response", + {"error": "tool_execution_error", "message": str(e)}, + ) + ) + + # Replace the docstring + http_tool.__doc__ = docstring + + # Add cache function following the documentation + def http_cache_function(arguments: dict, result: str) -> bool: + """Determines whether to cache the result based on arguments and result.""" + # Default implementation: cache all successful results + try: + # If the result is parseable JSON and not an error, cache it + result_obj = json.loads(result) + return not (isinstance(result_obj, dict) and "error" in result_obj) + except Exception: + # If result is not valid JSON, don't cache + return False + + # Assign the cache function to the tool + http_tool.cache_function = http_cache_function + + return http_tool + + def build_tools(self, tools_config: Dict[str, Any]) -> List[BaseTool]: + """Builds a list of tools based on the provided configuration. Accepts both 'tools' and 'custom_tools' (with http_tools).""" + self.tools = [] + + # Find HTTP tools configuration in various possible locations + http_tools = [] + if tools_config.get("http_tools"): + http_tools = tools_config.get("http_tools", []) + elif tools_config.get("custom_tools") and tools_config["custom_tools"].get( + "http_tools" + ): + http_tools = tools_config["custom_tools"].get("http_tools", []) + elif ( + tools_config.get("tools") + and isinstance(tools_config["tools"], dict) + and tools_config["tools"].get("http_tools") + ): + http_tools = tools_config["tools"].get("http_tools", []) + + # Determine which implementation method to use (BaseTool or decorator) + use_decorator = tools_config.get("use_decorator", False) + + # Create tools for each HTTP tool configuration + for http_tool_config in http_tools: + if use_decorator: + self.tools.append( + self._create_http_tool_with_decorator(http_tool_config) + ) + else: + self.tools.append(self._create_http_tool(http_tool_config)) + + return self.tools diff --git a/src/services/crewai/mcp_service.py b/src/services/crewai/mcp_service.py new file mode 100644 index 00000000..f3f671d4 --- /dev/null +++ b/src/services/crewai/mcp_service.py @@ -0,0 +1,264 @@ +""" +┌──────────────────────────────────────────────────────────────────────────────┐ +│ @author: Davidson Gomes │ +│ @file: mcp_service.py │ +│ Developed by: Davidson Gomes │ +│ Creation date: May 13, 2025 │ +│ Contact: contato@evolution-api.com │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ @copyright © Evolution API 2025. All rights reserved. │ +│ Licensed under the Apache License, Version 2.0 │ +│ │ +│ You may not use this file except in compliance with the License. │ +│ You may obtain a copy of the License at │ +│ │ +│ http://www.apache.org/licenses/LICENSE-2.0 │ +│ │ +│ Unless required by applicable law or agreed to in writing, software │ +│ distributed under the License is distributed on an "AS IS" BASIS, │ +│ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. │ +│ See the License for the specific language governing permissions and │ +│ limitations under the License. │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ @important │ +│ For any future changes to the code in this file, it is recommended to │ +│ include, together with the modification, the information of the developer │ +│ who changed it and the date of modification. │ +└──────────────────────────────────────────────────────────────────────────────┘ +""" + +from typing import Any, Dict, List, Optional, Tuple +from contextlib import ExitStack +import os +import sys +from src.utils.logger import setup_logger +from src.services.mcp_server_service import get_mcp_server +from sqlalchemy.orm import Session + +try: + from crewai_tools import MCPServerAdapter + from mcp import StdioServerParameters + + HAS_MCP_PACKAGES = True +except ImportError: + logger = setup_logger(__name__) + logger.error( + "MCP packages are not installed. Please install mcp and crewai-tools[mcp]" + ) + HAS_MCP_PACKAGES = False + +logger = setup_logger(__name__) + + +class MCPService: + def __init__(self): + self.tools = [] + self.exit_stack = ExitStack() + + def _connect_to_mcp_server( + self, server_config: Dict[str, Any] + ) -> Tuple[List[Any], Optional[ExitStack]]: + """Connect to a specific MCP server and return its tools.""" + if not HAS_MCP_PACKAGES: + logger.error("Cannot connect to MCP server: MCP packages not installed") + return [], None + + try: + # Determines the type of server (local or remote) + if "url" in server_config: + # Remote server (SSE) - Simplified approach using direct dictionary + sse_config = {"url": server_config["url"]} + + # Add headers if provided + if "headers" in server_config and server_config["headers"]: + sse_config["headers"] = server_config["headers"] + + # Create the MCPServerAdapter with the SSE configuration + mcp_adapter = MCPServerAdapter(sse_config) + else: + # Local server (Stdio) + command = server_config.get("command", "npx") + args = server_config.get("args", []) + + # Adds environment variables if specified + env = server_config.get("env", {}) + if env: + for key, value in env.items(): + os.environ[key] = value + + connection_params = StdioServerParameters( + command=command, args=args, env=env + ) + + # Create the MCPServerAdapter with the Stdio connection parameters + mcp_adapter = MCPServerAdapter(connection_params) + + # Get tools from the adapter + tools = mcp_adapter.tools + + # Return tools and the adapter (which serves as an exit stack) + return tools, mcp_adapter + + except Exception as e: + logger.error(f"Error connecting to MCP server: {e}") + return [], None + + def _filter_incompatible_tools(self, tools: List[Any]) -> List[Any]: + """Filters incompatible tools with the model.""" + problematic_tools = [ + "create_pull_request_review", # This tool causes the 400 INVALID_ARGUMENT error + ] + + filtered_tools = [] + removed_count = 0 + + for tool in tools: + if tool.name in problematic_tools: + logger.warning(f"Removing incompatible tool: {tool.name}") + removed_count += 1 + else: + filtered_tools.append(tool) + + if removed_count > 0: + logger.warning(f"Removed {removed_count} incompatible tools.") + + return filtered_tools + + def _filter_tools_by_agent( + self, tools: List[Any], agent_tools: List[str] + ) -> List[Any]: + """Filters tools compatible with the agent.""" + if not agent_tools: + return tools + + filtered_tools = [] + for tool in tools: + logger.info(f"Tool: {tool.name}") + if tool.name in agent_tools: + filtered_tools.append(tool) + return filtered_tools + + async def build_tools( + self, mcp_config: Dict[str, Any], db: Session + ) -> Tuple[List[Any], Any]: + """Builds a list of tools from multiple MCP servers.""" + if not HAS_MCP_PACKAGES: + logger.error("Cannot build MCP tools: MCP packages not installed") + return [], None + + self.tools = [] + self.exit_stack = ExitStack() + adapter_list = [] + + try: + mcp_servers = mcp_config.get("mcp_servers", []) + if mcp_servers is not None: + # Process each MCP server in the configuration + for server in mcp_servers: + try: + # Search for the MCP server in the database + mcp_server = get_mcp_server(db, server["id"]) + if not mcp_server: + logger.warning(f"MCP Server not found: {server['id']}") + continue + + # Prepares the server configuration + server_config = mcp_server.config_json.copy() + + # Replaces the environment variables in the config_json + if "env" in server_config and server_config["env"] is not None: + for key, value in server_config["env"].items(): + if value and value.startswith("env@@"): + env_key = value.replace("env@@", "") + if server.get("envs") and env_key in server.get( + "envs", {} + ): + server_config["env"][key] = server["envs"][ + env_key + ] + else: + logger.warning( + f"Environment variable '{env_key}' not provided for the MCP server {mcp_server.name}" + ) + continue + + logger.info(f"Connecting to MCP server: {mcp_server.name}") + tools, adapter = self._connect_to_mcp_server(server_config) + + if tools and adapter: + # Filters incompatible tools + filtered_tools = self._filter_incompatible_tools(tools) + + # Filters tools compatible with the agent + if agent_tools := server.get("tools", []): + filtered_tools = self._filter_tools_by_agent( + filtered_tools, agent_tools + ) + self.tools.extend(filtered_tools) + + # Add to the adapter list for cleanup later + adapter_list.append(adapter) + logger.info( + f"MCP Server {mcp_server.name} connected successfully. Added {len(filtered_tools)} tools." + ) + else: + logger.warning( + f"Failed to connect or no tools available for {mcp_server.name}" + ) + + except Exception as e: + logger.error( + f"Error connecting to MCP server {server.get('id', 'unknown')}: {e}" + ) + continue + + custom_mcp_servers = mcp_config.get("custom_mcp_servers", []) + if custom_mcp_servers is not None: + # Process custom MCP servers + for server in custom_mcp_servers: + if not server: + logger.warning( + "Empty server configuration found in custom_mcp_servers" + ) + continue + + try: + logger.info( + f"Connecting to custom MCP server: {server.get('url', 'unknown')}" + ) + tools, adapter = self._connect_to_mcp_server(server) + + if tools: + self.tools.extend(tools) + else: + logger.warning("No tools returned from custom MCP server") + continue + + if adapter: + adapter_list.append(adapter) + logger.info( + f"Custom MCP server connected successfully. Added {len(tools)} tools." + ) + else: + logger.warning("No adapter returned from custom MCP server") + except Exception as e: + logger.error( + f"Error connecting to custom MCP server {server.get('url', 'unknown')}: {e}" + ) + continue + + logger.info( + f"MCP Toolset created successfully. Total of {len(self.tools)} tools." + ) + + except Exception as e: + # Ensure cleanup + for adapter in adapter_list: + if hasattr(adapter, "close"): + adapter.close() + logger.error(f"Fatal error connecting to MCP servers: {e}") + # Return empty lists in case of error + return [], None + + # Return the tools and the adapter list for cleanup + return self.tools, adapter_list diff --git a/src/services/crewai/session_service.py b/src/services/crewai/session_service.py new file mode 100644 index 00000000..6b8cec75 --- /dev/null +++ b/src/services/crewai/session_service.py @@ -0,0 +1,637 @@ +""" +┌──────────────────────────────────────────────────────────────────────────────┐ +│ @author: Davidson Gomes │ +│ @file: session_service.py │ +│ Developed by: Davidson Gomes │ +│ Creation date: May 13, 2025 │ +│ Contact: contato@evolution-api.com │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ @copyright © Evolution API 2025. All rights reserved. │ +│ Licensed under the Apache License, Version 2.0 │ +│ │ +│ You may not use this file except in compliance with the License. │ +│ You may obtain a copy of the License at │ +│ │ +│ http://www.apache.org/licenses/LICENSE-2.0 │ +│ │ +│ Unless required by applicable law or agreed to in writing, software │ +│ distributed under the License is distributed on an "AS IS" BASIS, │ +│ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. │ +│ See the License for the specific language governing permissions and │ +│ limitations under the License. │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ @important │ +│ For any future changes to the code in this file, it is recommended to │ +│ include, together with the modification, the information of the developer │ +│ who changed it and the date of modification. │ +└──────────────────────────────────────────────────────────────────────────────┘ +""" + +from datetime import datetime +import json +import uuid +import base64 +import copy +from typing import Any, Dict, List, Optional, Union, Set + +from sqlalchemy import create_engine, Boolean, Text, ForeignKeyConstraint +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.ext.mutable import MutableDict +from sqlalchemy.orm import ( + sessionmaker, + relationship, + DeclarativeBase, + Mapped, + mapped_column, +) +from sqlalchemy.sql import func +from sqlalchemy.types import DateTime, PickleType, String +from sqlalchemy.dialects import postgresql +from sqlalchemy.types import TypeDecorator + +from pydantic import BaseModel + +from src.utils.logger import setup_logger + +logger = setup_logger(__name__) + + +class DynamicJSON(TypeDecorator): + """JSON type compatible with ADK that uses JSONB in PostgreSQL and TEXT with JSON + serialization for other databases.""" + + impl = Text # Default implementation is TEXT + + def load_dialect_impl(self, dialect): + if dialect.name == "postgresql": + return dialect.type_descriptor(postgresql.JSONB) + else: + return dialect.type_descriptor(Text) + + def process_bind_param(self, value, dialect): + if value is not None: + if dialect.name == "postgresql": + return value + else: + return json.dumps(value) + return value + + def process_result_value(self, value, dialect): + if value is not None: + if dialect.name == "postgresql": + return value + else: + return json.loads(value) + return value + + +class Base(DeclarativeBase): + """Base class for database tables.""" + + pass + + +class StorageSession(Base): + """Represents a session stored in the database, compatible with ADK.""" + + __tablename__ = "sessions" + + app_name: Mapped[str] = mapped_column(String, primary_key=True) + user_id: Mapped[str] = mapped_column(String, primary_key=True) + id: Mapped[str] = mapped_column( + String, primary_key=True, default=lambda: str(uuid.uuid4()) + ) + + state: Mapped[MutableDict[str, Any]] = mapped_column( + MutableDict.as_mutable(DynamicJSON), default={} + ) + + create_time: Mapped[DateTime] = mapped_column(DateTime(), default=func.now()) + update_time: Mapped[DateTime] = mapped_column( + DateTime(), default=func.now(), onupdate=func.now() + ) + + storage_events: Mapped[list["StorageEvent"]] = relationship( + "StorageEvent", + back_populates="storage_session", + ) + + def __repr__(self): + return f"" + + +class StorageEvent(Base): + """Represents an event stored in the database, compatible with ADK.""" + + __tablename__ = "events" + + id: Mapped[str] = mapped_column(String, primary_key=True) + app_name: Mapped[str] = mapped_column(String, primary_key=True) + user_id: Mapped[str] = mapped_column(String, primary_key=True) + session_id: Mapped[str] = mapped_column(String, primary_key=True) + + invocation_id: Mapped[str] = mapped_column(String) + author: Mapped[str] = mapped_column(String) + branch: Mapped[str] = mapped_column(String, nullable=True) + timestamp: Mapped[DateTime] = mapped_column(DateTime(), default=func.now()) + content: Mapped[dict[str, Any]] = mapped_column(DynamicJSON, nullable=True) + actions: Mapped[MutableDict[str, Any]] = mapped_column(PickleType) + + long_running_tool_ids_json: Mapped[Optional[str]] = mapped_column( + Text, nullable=True + ) + grounding_metadata: Mapped[dict[str, Any]] = mapped_column( + DynamicJSON, nullable=True + ) + partial: Mapped[bool] = mapped_column(Boolean, nullable=True) + turn_complete: Mapped[bool] = mapped_column(Boolean, nullable=True) + error_code: Mapped[str] = mapped_column(String, nullable=True) + error_message: Mapped[str] = mapped_column(String, nullable=True) + interrupted: Mapped[bool] = mapped_column(Boolean, nullable=True) + + storage_session: Mapped[StorageSession] = relationship( + "StorageSession", + back_populates="storage_events", + ) + + __table_args__ = ( + ForeignKeyConstraint( + ["app_name", "user_id", "session_id"], + ["sessions.app_name", "sessions.user_id", "sessions.id"], + ondelete="CASCADE", + ), + ) + + @property + def long_running_tool_ids(self) -> set[str]: + return ( + set(json.loads(self.long_running_tool_ids_json)) + if self.long_running_tool_ids_json + else set() + ) + + @long_running_tool_ids.setter + def long_running_tool_ids(self, value: set[str]): + if value is None: + self.long_running_tool_ids_json = None + else: + self.long_running_tool_ids_json = json.dumps(list(value)) + + +class StorageAppState(Base): + """Represents an application state stored in the database, compatible with ADK.""" + + __tablename__ = "app_states" + + app_name: Mapped[str] = mapped_column(String, primary_key=True) + state: Mapped[MutableDict[str, Any]] = mapped_column( + MutableDict.as_mutable(DynamicJSON), default={} + ) + update_time: Mapped[DateTime] = mapped_column( + DateTime(), default=func.now(), onupdate=func.now() + ) + + +class StorageUserState(Base): + """Represents a user state stored in the database, compatible with ADK.""" + + __tablename__ = "user_states" + + app_name: Mapped[str] = mapped_column(String, primary_key=True) + user_id: Mapped[str] = mapped_column(String, primary_key=True) + state: Mapped[MutableDict[str, Any]] = mapped_column( + MutableDict.as_mutable(DynamicJSON), default={} + ) + update_time: Mapped[DateTime] = mapped_column( + DateTime(), default=func.now(), onupdate=func.now() + ) + + +# Pydantic model classes compatible with ADK +class State: + """Utility class for states, compatible with ADK.""" + + APP_PREFIX = "app:" + USER_PREFIX = "user:" + TEMP_PREFIX = "temp:" + + +class Content(BaseModel): + """Event content model, compatible with ADK.""" + + parts: List[Dict[str, Any]] + + +class Part(BaseModel): + """Content part model, compatible with ADK.""" + + text: Optional[str] = None + + +class Event(BaseModel): + """Event model, compatible with ADK.""" + + id: Optional[str] = None + author: str + branch: Optional[str] = None + invocation_id: Optional[str] = None + content: Optional[Content] = None + actions: Optional[Dict[str, Any]] = None + timestamp: Optional[float] = None + long_running_tool_ids: Optional[Set[str]] = None + grounding_metadata: Optional[Dict[str, Any]] = None + partial: Optional[bool] = None + turn_complete: Optional[bool] = None + error_code: Optional[str] = None + error_message: Optional[str] = None + interrupted: Optional[bool] = None + + +class Session(BaseModel): + """Session model, compatible with ADK.""" + + app_name: str + user_id: str + id: str + state: Dict[str, Any] = {} + events: List[Event] = [] + last_update_time: float + + class Config: + arbitrary_types_allowed = True + + +class CrewSessionService: + """Service for managing CrewAI agent sessions using ADK tables.""" + + def __init__(self, db_url: str): + """ + Initializes the session service. + + Args: + db_url: Database connection URL. + """ + try: + self.engine = create_engine(db_url) + except Exception as e: + raise ValueError(f"Failed to create database engine: {e}") + + # Create all tables + Base.metadata.create_all(self.engine) + self.Session = sessionmaker(bind=self.engine) + logger.info(f"CrewSessionService started with database at {db_url}") + + def create_session( + self, agent_id: str, external_id: str, session_id: Optional[str] = None + ) -> Session: + """ + Creates a new session for an agent. + + Args: + agent_id: Agent ID (used as app_name in ADK) + external_id: External ID (used as user_id in ADK) + session_id: Optional session ID + + Returns: + Session: The created session + """ + session_id = session_id or str(uuid.uuid4()) + + with self.Session() as db_session: + # Check if app and user states already exist + storage_app_state = db_session.get(StorageAppState, (agent_id)) + storage_user_state = db_session.get( + StorageUserState, (agent_id, external_id) + ) + + app_state = storage_app_state.state if storage_app_state else {} + user_state = storage_user_state.state if storage_user_state else {} + + # Create states if they don't exist + if not storage_app_state: + storage_app_state = StorageAppState(app_name=agent_id, state={}) + db_session.add(storage_app_state) + + if not storage_user_state: + storage_user_state = StorageUserState( + app_name=agent_id, user_id=external_id, state={} + ) + db_session.add(storage_user_state) + + # Create session + storage_session = StorageSession( + app_name=agent_id, + user_id=external_id, + id=session_id, + state={}, + ) + db_session.add(storage_session) + db_session.commit() + + # Get timestamp + db_session.refresh(storage_session) + + # Merge states for response + merged_state = _merge_state(app_state, user_state, {}) + + # Create Session object for return + session = Session( + app_name=agent_id, + user_id=external_id, + id=session_id, + state=merged_state, + last_update_time=storage_session.update_time.timestamp(), + ) + + logger.info( + f"Session created: {session_id} for agent {agent_id} and user {external_id}" + ) + return session + + def get_session( + self, agent_id: str, external_id: str, session_id: str + ) -> Optional[Session]: + """ + Retrieves a session from the database. + + Args: + agent_id: Agent ID + external_id: User ID + session_id: Session ID + + Returns: + Optional[Session]: The retrieved session or None if not found + """ + with self.Session() as db_session: + storage_session = db_session.get( + StorageSession, (agent_id, external_id, session_id) + ) + + if storage_session is None: + return None + + # Fetch session events + storage_events = ( + db_session.query(StorageEvent) + .filter(StorageEvent.session_id == storage_session.id) + .filter(StorageEvent.app_name == agent_id) + .filter(StorageEvent.user_id == external_id) + .all() + ) + + # Fetch states + storage_app_state = db_session.get(StorageAppState, (agent_id)) + storage_user_state = db_session.get( + StorageUserState, (agent_id, external_id) + ) + + app_state = storage_app_state.state if storage_app_state else {} + user_state = storage_user_state.state if storage_user_state else {} + session_state = storage_session.state + + # Merge states + merged_state = _merge_state(app_state, user_state, session_state) + + # Create session + session = Session( + app_name=agent_id, + user_id=external_id, + id=session_id, + state=merged_state, + last_update_time=storage_session.update_time.timestamp(), + ) + + # Add events + session.events = [ + Event( + id=e.id, + author=e.author, + branch=e.branch, + invocation_id=e.invocation_id, + content=_decode_content(e.content), + actions=e.actions, + timestamp=e.timestamp.timestamp(), + long_running_tool_ids=e.long_running_tool_ids, + grounding_metadata=e.grounding_metadata, + partial=e.partial, + turn_complete=e.turn_complete, + error_code=e.error_code, + error_message=e.error_message, + interrupted=e.interrupted, + ) + for e in storage_events + ] + + return session + + def save_session(self, session: Session) -> None: + """ + Saves a session to the database. + + Args: + session: The session to save + """ + with self.Session() as db_session: + storage_session = db_session.get( + StorageSession, (session.app_name, session.user_id, session.id) + ) + + if not storage_session: + logger.error(f"Session not found: {session.id}") + return + + # Check states + storage_app_state = db_session.get(StorageAppState, (session.app_name)) + storage_user_state = db_session.get( + StorageUserState, (session.app_name, session.user_id) + ) + + # Extract state deltas + app_state_delta = {} + user_state_delta = {} + session_state_delta = {} + + # Apply state deltas + if storage_app_state and app_state_delta: + storage_app_state.state.update(app_state_delta) + + if storage_user_state and user_state_delta: + storage_user_state.state.update(user_state_delta) + + storage_session.state.update(session_state_delta) + + # Save new events + for event in session.events: + # Check if event already exists + existing_event = ( + ( + db_session.query(StorageEvent) + .filter(StorageEvent.id == event.id) + .filter(StorageEvent.app_name == session.app_name) + .filter(StorageEvent.user_id == session.user_id) + .filter(StorageEvent.session_id == session.id) + .first() + ) + if event.id + else None + ) + + if existing_event: + continue + + # Generate ID for the event if it doesn't exist + if not event.id: + event.id = str(uuid.uuid4()) + + # Create timestamp if it doesn't exist + if not event.timestamp: + event.timestamp = datetime.now().timestamp() + + # Create StorageEvent object + storage_event = StorageEvent( + id=event.id, + app_name=session.app_name, + user_id=session.user_id, + session_id=session.id, + invocation_id=event.invocation_id or str(uuid.uuid4()), + author=event.author, + branch=event.branch, + timestamp=datetime.fromtimestamp(event.timestamp), + actions=event.actions or {}, + long_running_tool_ids=event.long_running_tool_ids or set(), + grounding_metadata=event.grounding_metadata, + partial=event.partial, + turn_complete=event.turn_complete, + error_code=event.error_code, + error_message=event.error_message, + interrupted=event.interrupted, + ) + + # Encode content, if it exists + if event.content: + encoded_content = event.content.model_dump(exclude_none=True) + # Solution for serialization issues with multimedia content + for p in encoded_content.get("parts", []): + if "inline_data" in p: + p["inline_data"]["data"] = ( + base64.b64encode(p["inline_data"]["data"]).decode( + "utf-8" + ), + ) + storage_event.content = encoded_content + + db_session.add(storage_event) + + # Commit changes + db_session.commit() + + # Update timestamp in session + db_session.refresh(storage_session) + session.last_update_time = storage_session.update_time.timestamp() + + logger.info(f"Session saved: {session.id} with {len(session.events)} events") + + def list_sessions(self, agent_id: str, external_id: str) -> List[Dict[str, Any]]: + """ + Lists all sessions for an agent and user. + + Args: + agent_id: Agent ID + external_id: User ID + + Returns: + List[Dict[str, Any]]: List of summarized sessions + """ + with self.Session() as db_session: + sessions = ( + db_session.query(StorageSession) + .filter(StorageSession.app_name == agent_id) + .filter(StorageSession.user_id == external_id) + .all() + ) + + result = [] + for session in sessions: + result.append( + { + "app_name": session.app_name, + "user_id": session.user_id, + "id": session.id, + "created_at": session.create_time.isoformat(), + "updated_at": session.update_time.isoformat(), + } + ) + + return result + + def delete_session(self, agent_id: str, external_id: str, session_id: str) -> bool: + """ + Deletes a session from the database. + + Args: + agent_id: Agent ID + external_id: User ID + session_id: Session ID + + Returns: + bool: True if the session was deleted, False otherwise + """ + from sqlalchemy import delete + + with self.Session() as db_session: + stmt = delete(StorageSession).where( + StorageSession.app_name == agent_id, + StorageSession.user_id == external_id, + StorageSession.id == session_id, + ) + result = db_session.execute(stmt) + db_session.commit() + + logger.info(f"Session deleted: {session_id}") + return result.rowcount > 0 + + +# Utility functions compatible with ADK + + +def _extract_state_delta(state: dict[str, Any]): + """Extracts state deltas between app, user, and session.""" + app_state_delta = {} + user_state_delta = {} + session_state_delta = {} + + if state: + for key in state.keys(): + if key.startswith(State.APP_PREFIX): + app_state_delta[key.removeprefix(State.APP_PREFIX)] = state[key] + elif key.startswith(State.USER_PREFIX): + user_state_delta[key.removeprefix(State.USER_PREFIX)] = state[key] + elif not key.startswith(State.TEMP_PREFIX): + session_state_delta[key] = state[key] + + return app_state_delta, user_state_delta, session_state_delta + + +def _merge_state(app_state, user_state, session_state): + """Merges app, user, and session states into a single object.""" + merged_state = copy.deepcopy(session_state) + + for key in app_state.keys(): + merged_state[State.APP_PREFIX + key] = app_state[key] + + for key in user_state.keys(): + merged_state[State.USER_PREFIX + key] = user_state[key] + + return merged_state + + +def _decode_content(content: Optional[dict[str, Any]]) -> Optional[Content]: + """Decodes event content potentially with binary data.""" + if not content: + return None + + for p in content.get("parts", []): + if "inline_data" in p and isinstance(p["inline_data"].get("data"), tuple): + p["inline_data"]["data"] = base64.b64decode(p["inline_data"]["data"][0]) + + return Content.model_validate(content) diff --git a/src/services/service_providers.py b/src/services/service_providers.py index 07d2d8d3..78d3acf2 100644 --- a/src/services/service_providers.py +++ b/src/services/service_providers.py @@ -27,12 +27,22 @@ └──────────────────────────────────────────────────────────────────────────────┘ """ -from src.config.settings import settings +import os from google.adk.artifacts.in_memory_artifact_service import InMemoryArtifactService from google.adk.sessions import DatabaseSessionService from google.adk.memory import InMemoryMemoryService +from dotenv import load_dotenv + +load_dotenv() + +from src.services.crewai.session_service import CrewSessionService + +if os.getenv("AI_ENGINE") == "crewai": + session_service = CrewSessionService(db_url=os.getenv("POSTGRES_CONNECTION_STRING")) +else: + session_service = DatabaseSessionService( + db_url=os.getenv("POSTGRES_CONNECTION_STRING") + ) -# Initialize service instances -session_service = DatabaseSessionService(db_url=settings.POSTGRES_CONNECTION_STRING) artifacts_service = InMemoryArtifactService() memory_service = InMemoryMemoryService() From 482c1693d1c6a2bb0ba47b59e5d30391ee512717 Mon Sep 17 00:00:00 2001 From: Davidson Gomes Date: Mon, 19 May 2025 15:34:42 -0300 Subject: [PATCH 11/14] feat(env): add AI engine configuration option to .env.example and update README for improved clarity --- .env.example | 3 + README.md | 864 ++++----------------------------------------------- 2 files changed, 57 insertions(+), 810 deletions(-) diff --git a/.env.example b/.env.example index 9f9c8c15..065cfeca 100644 --- a/.env.example +++ b/.env.example @@ -6,6 +6,9 @@ API_URL="http://localhost:8000" ORGANIZATION_NAME="Evo AI" ORGANIZATION_URL="https://evoai.evoapicloud.com" +# AI Engine configuration: "adk" or "crewai" +AI_ENGINE="adk" + # Database settings POSTGRES_CONNECTION_STRING="postgresql://postgres:root@localhost:5432/evo_ai" diff --git a/README.md b/README.md index 87bac869..2b89d624 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,18 @@ -# Evo AI - AI Agents Platform +

Evo AI - AI Agents Platform

+ +
+ +[![Whatsapp Group](https://img.shields.io/badge/Group-WhatsApp-%2322BC18)](https://evolution-api.com/whatsapp) +[![Discord Community](https://img.shields.io/badge/Discord-Community-blue)](https://evolution-api.com/discord) +[![Postman Collection](https://img.shields.io/badge/Postman-Collection-orange)](https://evolution-api.com/postman) +[![Documentation](https://img.shields.io/badge/Documentation-Official-green)](https://doc.evolution-api.com) +[![License](https://img.shields.io/badge/license-Apache--2.0-blue)](./LICENSE) +[![Support](https://img.shields.io/badge/Donation-picpay-green)](https://app.picpay.com/user/davidsongomes1998) +[![Sponsors](https://img.shields.io/badge/Github-sponsor-orange)](https://github.com/sponsors/EvolutionAPI) + +
+ +## Evo AI - AI Agents Platform Evo AI is an open-source platform for creating and managing AI agents, enabling integration with different AI models and services. @@ -8,318 +22,47 @@ The Evo AI platform allows: - Creation and management of AI agents - Integration with different language models -- Client management -- MCP server configuration +- Client management and MCP server configuration - Custom tools management -- **[Google Agent Development Kit (ADK)](https://google.github.io/adk-docs/)**: Base framework for agent development, providing support for LLM Agents, Sequential Agents, Loop Agents, Parallel Agents and Custom Agents +- **[Google Agent Development Kit (ADK)](https://google.github.io/adk-docs/)**: Base framework for agent development +- **[CrewAI Support](https://github.com/crewAI/crewAI)**: Alternative framework for agent development (in development) - JWT authentication with email verification -- **[Agent 2 Agent (A2A) Protocol Support](https://developers.googleblog.com/en/a2a-a-new-era-of-agent-interoperability/)**: Interoperability between AI agents following Google's A2A specification -- **[Workflow Agent with LangGraph](https://www.langchain.com/langgraph)**: Building complex agent workflows with LangGraph and ReactFlow -- **Secure API Key Management**: Encrypted storage of API keys with Fernet encryption +- **[Agent 2 Agent (A2A) Protocol Support](https://developers.googleblog.com/en/a2a-a-new-era-of-agent-interoperability/)**: Interoperability between AI agents +- **[Workflow Agent with LangGraph](https://www.langchain.com/langgraph)**: Building complex agent workflows +- **Secure API Key Management**: Encrypted storage of API keys - **Agent Organization**: Folder structure for organizing agents by categories -## 🤖 Agent Types and Creation +## 🤖 Agent Types -Evo AI supports different types of agents that can be flexibly combined to create complex solutions: +Evo AI supports different types of agents that can be flexibly combined: ### 1. LLM Agent (Language Model) Agent based on language models like GPT-4, Claude, etc. Can be configured with tools, MCP servers, and sub-agents. -```json -{ - "client_id": "{{client_id}}", - "name": "personal_assistant", - "description": "Specialized personal assistant", - "role": "Personal Assistant", - "goal": "Help users with daily tasks and provide relevant information", - "type": "llm", - "model": "gpt-4", - "api_key_id": "stored-api-key-uuid", - "folder_id": "folder_id (optional)", - "instruction": "Detailed instructions for agent behavior", - "config": { - "tools": [ - { - "id": "tool-uuid", - "envs": { - "API_KEY": "tool-api-key", - "ENDPOINT": "http://localhost:8000" - } - } - ], - "mcp_servers": [ - { - "id": "server-uuid", - "envs": { - "API_KEY": "server-api-key", - "ENDPOINT": "http://localhost:8001" - }, - "tools": ["tool_name1", "tool_name2"] - } - ], - "custom_tools": { - "http_tools": [] - }, - "sub_agents": ["sub-agent-uuid"] - } -} -``` - ### 2. A2A Agent (Agent-to-Agent) Agent that implements Google's A2A protocol for agent interoperability. -```json -{ - "client_id": "{{client_id}}", - "type": "a2a", - "agent_card_url": "http://localhost:8001/api/v1/a2a/your-agent/.well-known/agent.json", - "folder_id": "folder_id (optional)", - "config": { - "sub_agents": ["sub-agent-uuid"] - } -} -``` - ### 3. Sequential Agent Executes a sequence of sub-agents in a specific order. -```json -{ - "client_id": "{{client_id}}", - "name": "processing_flow", - "type": "sequential", - "folder_id": "folder_id (optional)", - "config": { - "sub_agents": ["agent-uuid-1", "agent-uuid-2", "agent-uuid-3"] - } -} -``` - ### 4. Parallel Agent Executes multiple sub-agents simultaneously. -```json -{ - "client_id": "{{client_id}}", - "name": "parallel_processing", - "type": "parallel", - "folder_id": "folder_id (optional)", - "config": { - "sub_agents": ["agent-uuid-1", "agent-uuid-2"] - } -} -``` - ### 5. Loop Agent Executes sub-agents in a loop with a defined maximum number of iterations. -```json -{ - "client_id": "{{client_id}}", - "name": "loop_processing", - "type": "loop", - "folder_id": "folder_id (optional)", - "config": { - "sub_agents": ["sub-agent-uuid"], - "max_iterations": 5 - } -} -``` - ### 6. Workflow Agent -Executes sub-agents in a custom workflow defined by a graph structure. This agent type uses LangGraph for implementing complex agent workflows with conditional execution paths. - -```json -{ - "client_id": "{{client_id}}", - "name": "workflow_agent", - "type": "workflow", - "folder_id": "folder_id (optional)", - "config": { - "sub_agents": ["agent-uuid-1", "agent-uuid-2", "agent-uuid-3"], - "workflow": { - "nodes": [], - "edges": [] - } - } -} -``` - -The workflow structure is built using ReactFlow in the frontend, allowing visual creation and editing of complex agent workflows with nodes (representing agents or decision points) and edges (representing flow connections). +Executes sub-agents in a custom workflow defined by a graph structure using LangGraph. ### 7. Task Agent -Executes a specific task using a target agent. Task Agent provides a streamlined approach for structured task execution, where the agent_id specifies which agent will process the task, and the task description can include dynamic content placeholders. - -```json -{ - "client_id": "{{client_id}}", - "name": "web_search_task", - "type": "task", - "folder_id": "folder_id (optional)", - "config": { - "tasks": [ - { - "agent_id": "search-agent-uuid", - "description": "Search the web for information about {content}", - "expected_output": "Comprehensive search results with relevant information" - } - ], - "sub_agents": ["post-processing-agent-uuid"] - } -} -``` - -Key features of Task Agent: - -- Passes structured task instructions to the designated agent -- Supports variable content using {content} placeholder in the task description -- Provides clear task definition with instructions and expected output format -- Can execute sub-agents after the main task is completed -- Simplifies orchestration for single-focused task execution - -Task Agent is ideal for scenarios where you need to execute a specific, well-defined task with clear instructions and expectations. - -### Common Characteristics - -- All agent types can have sub-agents -- Sub-agents can be of any type -- Agents can be flexibly combined -- Type-specific configurations -- Support for custom tools and MCP servers - -### MCP Server Configuration - -Agents can be integrated with MCP (Model Context Protocol) servers for distributed processing: - -```json -{ - "config": { - "mcp_servers": [ - { - "id": "server-uuid", - "envs": { - "API_KEY": "server-api-key", - "ENDPOINT": "http://localhost:8001", - "MODEL_NAME": "gpt-4", - "TEMPERATURE": 0.7, - "MAX_TOKENS": 2000 - }, - "tools": ["tool_name1", "tool_name2"] - } - ] - } -} -``` - -Available configurations for MCP servers: - -- **id**: Unique MCP server identifier -- **envs**: Environment variables for configuration - - API_KEY: Server authentication key - - ENDPOINT: MCP server URL - - MODEL_NAME: Model name to be used - - TEMPERATURE: Text generation temperature (0.0 to 1.0) - - MAX_TOKENS: Maximum token limit per request - - Other server-specific variables -- **tools**: MCP server tool names for agent use - -### Agent Composition Examples - -Different types of agents can be combined to create complex processing flows: - -#### 1. Sequential Processing Pipeline - -```json -{ - "client_id": "{{client_id}}", - "name": "processing_pipeline", - "type": "sequential", - "config": { - "sub_agents": [ - "llm-analysis-agent-uuid", // LLM Agent for initial analysis - "a2a-translation-agent-uuid", // A2A Agent for translation - "llm-formatting-agent-uuid" // LLM Agent for final formatting - ] - } -} -``` - -#### 2. Parallel Processing with Aggregation - -```json -{ - "client_id": "{{client_id}}", - "name": "parallel_analysis", - "type": "sequential", - "config": { - "sub_agents": [ - { - "type": "parallel", - "config": { - "sub_agents": [ - "analysis-agent-uuid-1", - "analysis-agent-uuid-2", - "analysis-agent-uuid-3" - ] - } - }, - "aggregation-agent-uuid" // Agent for aggregating results - ] - } -} -``` - -#### 3. Multi-Agent Conversation System - -```json -{ - "client_id": "{{client_id}}", - "name": "conversation_system", - "type": "parallel", - "config": { - "sub_agents": [ - { - "type": "llm", - "name": "context_agent", - "model": "gpt-4", - "instruction": "Maintain conversation context" - }, - { - "type": "a2a", - "agent_card_url": "expert-agent-url" - }, - { - "type": "loop", - "config": { - "sub_agents": ["memory-agent-uuid"], - "max_iterations": 1 - } - } - ] - } -} -``` - -### API Creation - -For creating a new agent, use the endpoint: - -```http -POST /api/v1/agents -Content-Type: application/json -Authorization: Bearer your-token-jwt - -{ - // Configuration of the agent as per the examples above -} -``` +Executes a specific task using a target agent with structured task instructions. ## 🛠️ Technologies @@ -341,137 +84,32 @@ Authorization: Bearer your-token-jwt Evo AI platform natively supports integration with [Langfuse](https://langfuse.com/) for detailed tracing of agent executions, prompts, model responses, and tool calls, using the OpenTelemetry (OTel) standard. -### Why use Langfuse? - -- Visual dashboard for agent traces, prompts, and executions -- Detailed analytics for debugging and evaluating LLM apps -- Easy integration with Google ADK and other frameworks - -### How it works - -- Every agent execution (including streaming) is automatically traced via OpenTelemetry spans -- Data is sent to Langfuse, where it can be visualized and analyzed - ### How to configure 1. **Set environment variables in your `.env`:** ```env - LANGFUSE_PUBLIC_KEY="pk-lf-..." # Your Langfuse public key - LANGFUSE_SECRET_KEY="sk-lf-..." # Your Langfuse secret key - OTEL_EXPORTER_OTLP_ENDPOINT="https://cloud.langfuse.com/api/public/otel" # (or us.cloud... for US region) + LANGFUSE_PUBLIC_KEY="pk-lf-..." + LANGFUSE_SECRET_KEY="sk-lf-..." + OTEL_EXPORTER_OTLP_ENDPOINT="https://cloud.langfuse.com/api/public/otel" ``` - > **Attention:** Do not swap the keys! `pk-...` is public, `sk-...` is secret. - -2. **Automatic initialization** - - - Tracing is automatically initialized when the application starts (`src/main.py`). - - Agent execution functions are already instrumented with spans (`src/services/agent_runner.py`). - -3. **View in the Langfuse dashboard** +2. **View in the Langfuse dashboard** - Access your Langfuse dashboard to see real-time traces. -### Troubleshooting - -- **401 Error (Invalid credentials):** - - Check if the keys are correct and not swapped in your `.env`. - - Make sure the endpoint matches your region (EU or US). -- **Context error in async generator:** - - The code is already adjusted to avoid OpenTelemetry context issues in async generators. -- **Questions about integration:** - - See the [official Langfuse documentation - Google ADK](https://langfuse.com/docs/integrations/google-adk) - ## 🤖 Agent 2 Agent (A2A) Protocol Support -Evo AI implements the Google's Agent 2 Agent (A2A) protocol, enabling seamless communication and interoperability between AI agents. This implementation includes: - -### Key Features - -- **Standardized Communication**: Agents can communicate using a common protocol regardless of their underlying implementation -- **Interoperability**: Support for agents built with different frameworks and technologies -- **Well-Known Endpoints**: Standardized endpoints for agent discovery and interaction -- **Task Management**: Support for task creation, execution, and status tracking -- **State Management**: Tracking of agent states and conversation history -- **Authentication**: Secure API key-based authentication for agent interactions - -### Implementation Details - -- **Agent Card**: Each agent exposes a `.well-known/agent.json` endpoint with its capabilities and configuration -- **Task Handling**: Support for task creation, execution, and status tracking -- **Message Format**: Standardized message format for agent communication -- **History Tracking**: Maintains conversation history between agents -- **Artifact Management**: Support for handling different types of artifacts (text, files, etc.) - -### Example Usage - -```json -// Agent Card Example -{ - "name": "My Agent", - "description": "A helpful AI assistant", - "url": "https://api.example.com/agents/123", - "capabilities": { - "streaming": false, - "pushNotifications": false, - "stateTransitionHistory": true - }, - "authentication": { - "schemes": ["apiKey"], - "credentials": { - "in": "header", - "name": "x-api-key" - } - }, - "skills": [ - { - "id": "search", - "name": "Web Search", - "description": "Search the web for information" - } - ] -} -``` +Evo AI implements the Google's Agent 2 Agent (A2A) protocol, enabling seamless communication and interoperability between AI agents. For more information about the A2A protocol, visit [Google's A2A Protocol Documentation](https://google.github.io/A2A/). -## 📁 Project Structure - -``` -src/ -├── api/ # API endpoints -├── core/ # Core business logic -├── models/ # Data models -├── schemas/ # Pydantic schemas for validation -├── services/ # Business services -├── templates/ # Email templates -│ └── emails/ # Jinja2 email templates -├── utils/ # Utilities -└── config/ # Configurations -``` - ## 📋 Prerequisites -Before starting, make sure you have the following installed: - - **Python**: 3.10 or higher - **PostgreSQL**: 13.0 or higher - **Redis**: 6.0 or higher - **Git**: For version control -- **Make**: For running Makefile commands (usually pre-installed on Linux/Mac, for Windows use WSL or install via chocolatey) - -You'll also need the following accounts/API keys: - -- **SendGrid Account**: For email functionality - -## 📋 Requirements - -- Python 3.10+ -- PostgreSQL -- Redis -- Email provider: - - SendGrid Account (if using SendGrid email provider) - - SMTP Server (if using SMTP email provider) +- **Make**: For running Makefile commands ## 🔧 Installation @@ -482,54 +120,30 @@ git clone https://github.com/EvolutionAPI/evo-ai.git cd evo-ai ``` -2. Create a virtual environment: +2. Create a virtual environment and install dependencies: ```bash make venv source venv/bin/activate # Linux/Mac -# or -venv\Scripts\activate # Windows -``` - -3. Install dependencies: - -```bash -pip install -e . # For basic installation -# or -pip install -e ".[dev]" # For development dependencies -``` - -Or using the Makefile: - -```bash -make install # For basic installation -# or make install-dev # For development dependencies ``` -4. Set up environment variables: +3. Set up environment variables: ```bash cp .env.example .env # Edit the .env file with your settings ``` -5. Initialize the database and run migrations: +4. Initialize the database and seed data: ```bash make alembic-upgrade -``` - -6. Seed the database with initial data: - -```bash make seed-all ``` ## 🖥️ Frontend Installation -After installing Evo AI (the backend), you need to install the frontend to access the web interface: - 1. Clone the frontend repository: ```bash @@ -537,21 +151,15 @@ git clone https://github.com/EvolutionAPI/evo-ai-frontend.git cd evo-ai-frontend ``` -2. Follow the installation instructions in the frontend repository's README to set up and run the web interface. - -> The backend (API) and frontend are separate projects. Make sure both are running for full platform functionality. +2. Follow the installation instructions in the frontend repository's README. ## 🚀 Getting Started -After installation, follow these steps to set up your first agent: - -1. **Configure MCP Server**: Set up your Model Context Protocol server configuration first -2. **Create Client or Register**: Create a new client or register a user account -3. **Create Agents**: Set up the agents according to your needs (LLM, A2A, Sequential, Parallel, Loop, or Workflow) +After installation, start by configuring your MCP server, creating a client, and setting up your agents. ### Configuration (.env file) -Configure your environment using the following key settings: +Key settings include: ```bash # Database settings @@ -560,138 +168,35 @@ POSTGRES_CONNECTION_STRING="postgresql://postgres:root@localhost:5432/evo_ai" # Redis settings REDIS_HOST="localhost" REDIS_PORT=6379 -REDIS_DB=0 -REDIS_PASSWORD="your-redis-password" + +# AI Engine configuration +AI_ENGINE="adk" # Options: "adk" (Google Agent Development Kit) or "crewai" (CrewAI framework) # JWT settings JWT_SECRET_KEY="your-jwt-secret-key" -JWT_ALGORITHM="HS256" -JWT_EXPIRATION_TIME=30 # In seconds # Email provider configuration EMAIL_PROVIDER="sendgrid" # Options: "sendgrid" or "smtp" -# SendGrid (if EMAIL_PROVIDER=sendgrid) -SENDGRID_API_KEY="your-sendgrid-api-key" -EMAIL_FROM="noreply@yourdomain.com" -APP_URL="https://yourdomain.com" - -# SMTP (if EMAIL_PROVIDER=smtp) -SMTP_FROM="noreply-smtp@yourdomain.com" -SMTP_USER="your-smtp-username" -SMTP_PASSWORD="your-smtp-password" -SMTP_HOST="your-smtp-host" -SMTP_PORT=587 -SMTP_USE_TLS=true -SMTP_USE_SSL=false - # Encryption for API keys ENCRYPTION_KEY="your-encryption-key" ``` -### Project Dependencies - -The project uses modern Python packaging standards with `pyproject.toml`. Key dependencies include: - -```toml -dependencies = [ - "fastapi==0.115.12", - "uvicorn==0.34.2", - "pydantic==2.11.3", - "sqlalchemy==2.0.40", - "psycopg2==2.9.10", - "alembic==1.15.2", - "redis==5.3.0", - "langgraph==0.4.1", - # ... other dependencies -] -``` - -For development, additional packages can be installed with: - -```bash -pip install -e ".[dev]" -``` - -This includes development tools like black, flake8, pytest, and more. +> **Note**: While Google ADK is fully supported, the CrewAI engine option is still under active development. For production environments, it's recommended to use the default "adk" engine. ## 🔐 Authentication -The API uses JWT (JSON Web Token) authentication. To access the endpoints, you need to: +The API uses JWT (JSON Web Token) authentication with: -1. Register a user or log in to obtain a JWT token -2. Include the JWT token in the `Authorization` header of all requests in the format `Bearer ` -3. Tokens expire after a configured period (default: 30 minutes) - -### Authentication Flow - -1. **User Registration**: - -```http -POST /api/v1/auth/register -``` - -2. **Email Verification**: - An email will be sent containing a verification link. - -3. **Login**: - -```http -POST /api/v1/auth/login -``` - -Returns a JWT token to be used in requests. - -4. **Password Recovery (if needed)**: - -```http -POST /api/v1/auth/forgot-password -POST /api/v1/auth/reset-password -``` - -5. **Recover logged user data**: - -```http -POST /api/v1/auth/me -``` - -### Example Usage with curl: - -```bash -# Login -curl -X POST "http://localhost:8000/api/v1/auth/login" \ - -H "Content-Type: application/json" \ - -d '{"email": "your-email@example.com", "password": "your-password"}' - -# Use received token -curl -X GET "http://localhost:8000/api/v1/clients/" \ - -H "Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9..." -``` - -### Access Control - -- Regular users (associated with a client) only have access to their client's resources -- Admin users have access to all resources -- Certain operations (such as creating MCP servers) are restricted to administrators only -- Account lockout mechanism after multiple failed login attempts for enhanced security - -## 📧 Email Templates - -The platform uses Jinja2 templates for email rendering with a unified design system: - -- **Base Template**: All emails extend a common base template for consistent styling -- **Verification Email**: Sent when users register to verify their email address -- **Password Reset**: Sent when users request a password reset -- **Welcome Email**: Sent after email verification to guide new users -- **Account Locked**: Security alert when an account is locked due to multiple failed login attempts - -All email templates feature responsive design, clear call-to-action buttons, and fallback mechanisms. +- User registration and email verification +- Login to obtain JWT tokens +- Password recovery flow +- Account lockout after multiple failed login attempts ## 🚀 Running the Project ```bash make run # For development with automatic reload -# or make run-prod # For production with multiple workers ``` @@ -701,260 +206,28 @@ The API will be available at `http://localhost:8000` ```bash # Database migrations -make init # Initialize Alembic +make alembic-upgrade # Update database to latest version make alembic-revision message="description" # Create new migration -make alembic-upgrade # Update database to latest version (use to execute existing migrations) -make alembic-downgrade # Revert latest migration -make alembic-migrate message="description" # Create and apply migration -make alembic-reset # Reset database # Seeders -make seed-admin # Create default admin -make seed-client # Create default client -make seed-mcp-servers # Create example MCP servers -make seed-tools # Create example tools make seed-all # Run all seeders # Code verification make lint # Verify code with flake8 make format # Format code with black -make clear-cache # Clear project cache ``` ## 🐳 Running with Docker -For quick setup and deployment, we provide Docker and Docker Compose configurations. - -### Prerequisites - -- Docker installed -- Docker Compose installed - -### Configuration - -1. Create and configure the `.env` file: - -```bash -cp .env.example .env -# Edit the .env file with your settings, especially: -# - POSTGRES_CONNECTION_STRING -# - REDIS_HOST (should be "redis" when using Docker) -# - JWT_SECRET_KEY -# - SENDGRID_API_KEY -``` - -2. Build the Docker image: +1. Configure the `.env` file +2. Start the services: ```bash make docker-build -``` - -3. Start the services (API, PostgreSQL, and Redis): - -```bash make docker-up -``` - -4. Apply migrations (first time only): - -```bash -docker-compose exec api python -m alembic upgrade head -``` - -5. Populate the database with initial data: - -```bash make docker-seed ``` -6. To check application logs: - -```bash -make docker-logs -``` - -7. To stop the services: - -```bash -make docker-down -``` - -### Available Services - -- **API**: http://localhost:8000 -- **API Documentation**: http://localhost:8000/docs -- **PostgreSQL**: localhost:5432 -- **Redis**: localhost:6379 - -### Persistent Volumes - -Docker Compose sets up persistent volumes for: - -- PostgreSQL data -- Redis data -- Application logs directory - -### Environment Variables - -The main environment variables used by the API container: - -- `POSTGRES_CONNECTION_STRING`: PostgreSQL connection string -- `REDIS_HOST`: Redis host (use "redis" when running with Docker) -- `JWT_SECRET_KEY`: Secret key for JWT token generation -- `EMAIL_PROVIDER`: Email provider to use ("sendgrid" or "smtp") -- `SENDGRID_API_KEY`: SendGrid API key (if using SendGrid) -- `EMAIL_FROM`: Email used as sender (for SendGrid) -- `SMTP_FROM`: Email used as sender (for SMTP) -- `SMTP_HOST`, `SMTP_PORT`, `SMTP_USER`, `SMTP_PASSWORD`: SMTP server configuration -- `SMTP_USE_TLS`, `SMTP_USE_SSL`: SMTP security settings -- `APP_URL`: Base URL of the application - -## 🔒 Secure API Key Management - -Evo AI implements a secure API key management system that protects sensitive credentials: - -- **Encrypted Storage**: API keys are encrypted using Fernet symmetric encryption before storage -- **Secure References**: Agents reference API keys by UUID (api_key_id) instead of storing raw keys -- **Centralized Management**: API keys can be created, updated, and rotated without changing agent configurations -- **Client Isolation**: API keys are scoped to specific clients for better security isolation - -### Encryption Configuration - -The encryption system uses a secure key defined in the `.env` file: - -```env -ENCRYPTION_KEY="your-secure-encryption-key" -``` - -If not provided, a secure key will be generated automatically at startup. - -### API Key Management - -API keys can be managed through dedicated endpoints: - -```http -# Create a new API key -POST /api/v1/agents/apikeys -Content-Type: application/json -Authorization: Bearer your-token-jwt -x-client-id: client-uuid - -{ - "client_id": "client-uuid", - "name": "My OpenAI Key", - "provider": "openai", - "key_value": "sk-actual-api-key-value" -} - -# List all API keys for a client -GET /api/v1/agents/apikeys -Authorization: Bearer your-token-jwt -x-client-id: client-uuid - -# Get a specific API key -GET /api/v1/agents/apikeys/{key_id} -Authorization: Bearer your-token-jwt -x-client-id: client-uuid - -# Update an API key -PUT /api/v1/agents/apikeys/{key_id} -Content-Type: application/json -Authorization: Bearer your-token-jwt -x-client-id: client-uuid - -{ - "name": "Updated Key Name", - "provider": "anthropic", - "key_value": "new-key-value", - "is_active": true -} - -# Delete an API key (soft delete) -DELETE /api/v1/agents/apikeys/{key_id} -Authorization: Bearer your-token-jwt -x-client-id: client-uuid -``` - -## 🤖 Agent Organization - -Agents can be organized into folders for better management: - -### Creating and Managing Folders - -```http -# Create a new folder -POST /api/v1/agents/folders -Content-Type: application/json -Authorization: Bearer your-token-jwt - -{ - "client_id": "client-uuid", - "name": "Marketing Agents", - "description": "Agents for content marketing tasks" -} - -# List all folders -GET /api/v1/agents/folders -Authorization: Bearer your-token-jwt -x-client-id: client-uuid - -# Get a specific folder -GET /api/v1/agents/folders/{folder_id} -Authorization: Bearer your-token-jwt -x-client-id: client-uuid - -# Update a folder -PUT /api/v1/agents/folders/{folder_id} -Content-Type: application/json -Authorization: Bearer your-token-jwt -x-client-id: client-uuid - -{ - "name": "Updated Folder Name", - "description": "Updated folder description" -} - -# Delete a folder -DELETE /api/v1/agents/folders/{folder_id} -Authorization: Bearer your-token-jwt -x-client-id: client-uuid - -# List agents in a folder -GET /api/v1/agents/folders/{folder_id}/agents -Authorization: Bearer your-token-jwt -x-client-id: client-uuid - -# Assign an agent to a folder -PUT /api/v1/agents/{agent_id}/folder -Content-Type: application/json -Authorization: Bearer your-token-jwt -x-client-id: client-uuid - -{ - "folder_id": "folder-uuid" -} - -# Remove an agent from any folder -PUT /api/v1/agents/{agent_id}/folder -Content-Type: application/json -Authorization: Bearer your-token-jwt -x-client-id: client-uuid - -{ - "folder_id": null -} -``` - -### Filtering Agents by Folder - -When listing agents, you can filter by folder: - -```http -GET /api/v1/agents?folder_id=folder-uuid -Authorization: Bearer your-token-jwt -x-client-id: client-uuid -``` - ## 📚 API Documentation The interactive API documentation is available at: @@ -962,45 +235,16 @@ The interactive API documentation is available at: - Swagger UI: `http://localhost:8000/docs` - ReDoc: `http://localhost:8000/redoc` -## 📊 Logs and Audit +## ⭐ Star Us on GitHub -- Logs are stored in the `logs/` directory with the following format: - - `{logger_name}_{date}.log` -- The system maintains audit logs for important administrative actions -- Each action is recorded with information such as user, IP, date/time, and details +If you find EvoAI useful, please consider giving us a star! Your support helps us grow our community and continue improving the product. + +[![Star History Chart](https://api.star-history.com/svg?repos=EvolutionAPI/evo-ai&type=Date)](https://www.star-history.com/#EvolutionAPI/evo-ai&Date) ## 🤝 Contributing -We welcome contributions from the community! Here's how you can help: - -1. Fork the project -2. Create a feature branch (`git checkout -b feature/AmazingFeature`) -3. Make your changes and add tests if possible -4. Run tests and make sure they pass -5. Commit your changes following conventional commits format (`feat: add amazing feature`) -6. Push to the branch (`git push origin feature/AmazingFeature`) -7. Open a Pull Request - -Please read our [Contributing Guidelines](CONTRIBUTING.md) for more details. +We welcome contributions from the community! Please read our [Contributing Guidelines](CONTRIBUTING.md) for more details. ## 📄 License This project is licensed under the [Apache License 2.0](./LICENSE). - -The use of the name, logo, or trademark "Evolution API" is protected and not automatically granted by the license. See section 6 (Trademarks) of the license for details about trademark usage. - -## 📊 Stargazers - -[![Stargazers repo roster for @EvolutionAPI/evo-ai](https://reporoster.com/stars/EvolutionAPI/evo-ai)](https://github.com/EvolutionAPI/evo-ai/stargazers) - -## 🔄 Forks - -[![Forkers repo roster for @EvolutionAPI/evo-ai](https://reporoster.com/forks/EvolutionAPI/evo-ai)](https://github.com/EvolutionAPI/evo-ai/network/members) - -## 🙏 Acknowledgments - -- [FastAPI](https://fastapi.tiangolo.com/) -- [SQLAlchemy](https://www.sqlalchemy.org/) -- [Google ADK](https://github.com/google/adk) -- [LangGraph](https://github.com/langchain-ai/langgraph) -- [ReactFlow](https://reactflow.dev/) From 956d16a854aef845d04fe9752cd10553a135e789 Mon Sep 17 00:00:00 2001 From: Davidson Gomes Date: Sat, 24 May 2025 09:51:34 -0300 Subject: [PATCH 12/14] feat(frontend): add initial frontend structure with components, services, and assets --- frontend/.cursorrules | 120 + frontend/.env.example | 1 + frontend/.github/workflows/docker-image.yml | 48 + .../publish_docker_image_homolog.yml | 48 + .../workflows/publish_docker_image_latest.yml | 48 + frontend/.gitignore | 31 + frontend/CHANGELOG.md | 67 + frontend/Dockerfile | 63 + frontend/LICENSE | 201 + frontend/README.md | 237 + frontend/app/agents/AgentCard.tsx | 506 ++ frontend/app/agents/AgentList.tsx | 130 + frontend/app/agents/AgentSidebar.tsx | 186 + frontend/app/agents/AgentTypeSelector.tsx | 96 + frontend/app/agents/EmptyState.tsx | 107 + frontend/app/agents/SearchInput.tsx | 153 + frontend/app/agents/config/A2AAgentConfig.tsx | 73 + frontend/app/agents/config/LLMAgentConfig.tsx | 367 ++ .../agents/config/LoopAgentConfig copy.tsx | 133 + .../app/agents/config/ParallelAgentConfig.tsx | 125 + .../agents/config/SequentialAgentConfig.tsx | 120 + .../app/agents/config/TaskAgentConfig.tsx | 801 +++ .../app/agents/dialogs/AgentToolDialog.tsx | 184 + frontend/app/agents/dialogs/ApiKeysDialog.tsx | 445 ++ .../app/agents/dialogs/ConfirmationDialog.tsx | 93 + .../app/agents/dialogs/CustomMCPDialog.tsx | 237 + .../app/agents/dialogs/CustomToolDialog.tsx | 864 +++ frontend/app/agents/dialogs/FolderDialog.tsx | 158 + .../app/agents/dialogs/ImportAgentDialog.tsx | 239 + frontend/app/agents/dialogs/MCPDialog.tsx | 278 + .../app/agents/dialogs/MoveAgentDialog.tsx | 136 + .../app/agents/dialogs/ShareAgentDialog.tsx | 172 + frontend/app/agents/forms/AgentForm.tsx | 331 + frontend/app/agents/forms/BasicInfoTab.tsx | 243 + .../app/agents/forms/ConfigurationTab.tsx | 722 +++ frontend/app/agents/forms/SubAgentsTab.tsx | 246 + frontend/app/agents/page.tsx | 754 +++ frontend/app/agents/workflows/Canva.tsx | 689 ++ frontend/app/agents/workflows/ContextMenu.tsx | 118 + frontend/app/agents/workflows/HelperLines.tsx | 98 + frontend/app/agents/workflows/NodePanel.tsx | 273 + frontend/app/agents/workflows/canva.css | 188 + .../agents/workflows/edges/DefaultEdge.tsx | 119 + frontend/app/agents/workflows/edges/index.ts | 41 + .../app/agents/workflows/nodes/BaseNode.tsx | 166 + .../components/agent/AgentChatMessageList.tsx | 347 + .../nodes/components/agent/AgentForm.tsx | 631 ++ .../nodes/components/agent/AgentNode.tsx | 176 + .../components/agent/AgentTestChatModal.tsx | 470 ++ .../nodes/components/agent/styles.css | 68 + .../components/condition/ConditionDialog.tsx | 283 + .../components/condition/ConditionForm.tsx | 290 + .../components/condition/ConditionNode.tsx | 204 + .../nodes/components/delay/DelayForm.tsx | 256 + .../nodes/components/delay/DelayNode.tsx | 146 + .../nodes/components/message/MessageForm.tsx | 292 + .../nodes/components/message/MessageNode.tsx | 162 + .../nodes/components/start/StartNode.tsx | 100 + frontend/app/agents/workflows/nodes/index.ts | 109 + .../agents/workflows/nodes/nodeFunctions.ts | 65 + frontend/app/agents/workflows/nodes/style.css | 54 + frontend/app/agents/workflows/page.tsx | 218 + frontend/app/agents/workflows/utils.ts | 199 + .../app/chat/components/AgentInfoDialog.tsx | 497 ++ .../app/chat/components/AttachedFiles.tsx | 158 + .../app/chat/components/ChatContainer.tsx | 190 + frontend/app/chat/components/ChatInput.tsx | 270 + frontend/app/chat/components/ChatMessage.tsx | 375 ++ frontend/app/chat/components/FileUpload.tsx | 185 + .../chat/components/InlineDataAttachments.tsx | 182 + frontend/app/chat/components/SessionList.tsx | 248 + frontend/app/chat/page.tsx | 881 +++ frontend/app/client-layout.tsx | 52 + frontend/app/clients/loading.tsx | 31 + frontend/app/clients/page.tsx | 462 ++ .../components/A2AComplianceCard.tsx | 333 + .../documentation/components/CodeBlock.tsx | 78 + .../components/CodeExamplesSection.tsx | 317 + .../components/DocumentationSection.tsx | 588 ++ .../FrontendImplementationSection.tsx | 796 +++ .../documentation/components/HttpLabForm.tsx | 523 ++ .../documentation/components/LabSection.tsx | 185 + .../components/QuickStartTemplates.tsx | 179 + .../components/StreamLabForm.tsx | 366 ++ .../components/TechnicalDetailsSection.tsx | 470 ++ frontend/app/documentation/page.tsx | 1611 +++++ frontend/app/globals.css | 87 + frontend/app/layout.tsx | 73 + frontend/app/login/page.tsx | 556 ++ frontend/app/logout/page.tsx | 75 + frontend/app/mcp-servers/loading.tsx | 31 + frontend/app/mcp-servers/page.tsx | 940 +++ frontend/app/page.tsx | 33 + frontend/app/profile/page.tsx | 190 + frontend/app/security/page.tsx | 163 + frontend/app/security/reset-password/page.tsx | 198 + frontend/app/security/verify-email/page.tsx | 119 + frontend/app/shared-chat/AgentInfo.tsx | 302 + .../components/SharedChatPanel.tsx | 180 + .../components/SharedSessionList.tsx | 204 + frontend/app/shared-chat/page.tsx | 717 ++ frontend/components.json | 21 + frontend/components/ImpersonationBar.tsx | 129 + frontend/components/sidebar.tsx | 357 + frontend/components/theme-provider.tsx | 39 + frontend/components/toaster.tsx | 52 + frontend/components/ui/accordion.tsx | 58 + frontend/components/ui/alert-dialog.tsx | 141 + frontend/components/ui/alert.tsx | 59 + frontend/components/ui/aspect-ratio.tsx | 7 + frontend/components/ui/avatar.tsx | 50 + frontend/components/ui/badge.tsx | 29 + frontend/components/ui/breadcrumb.tsx | 115 + frontend/components/ui/button.tsx | 56 + frontend/components/ui/calendar.tsx | 66 + frontend/components/ui/card.tsx | 79 + frontend/components/ui/carousel.tsx | 262 + frontend/components/ui/chart.tsx | 365 ++ frontend/components/ui/checkbox.tsx | 28 + frontend/components/ui/collapsible.tsx | 11 + frontend/components/ui/command.tsx | 153 + frontend/components/ui/context-menu.tsx | 200 + frontend/components/ui/dialog.tsx | 122 + frontend/components/ui/drawer.tsx | 118 + frontend/components/ui/dropdown-menu.tsx | 200 + frontend/components/ui/form.tsx | 178 + frontend/components/ui/hover-card.tsx | 29 + frontend/components/ui/input-otp.tsx | 71 + frontend/components/ui/input.tsx | 22 + frontend/components/ui/label.tsx | 26 + frontend/components/ui/menubar.tsx | 236 + frontend/components/ui/navigation-menu.tsx | 128 + frontend/components/ui/pagination.tsx | 117 + frontend/components/ui/popover.tsx | 31 + frontend/components/ui/progress.tsx | 28 + frontend/components/ui/radio-group.tsx | 44 + frontend/components/ui/resizable.tsx | 45 + frontend/components/ui/scroll-area.tsx | 40 + frontend/components/ui/select.tsx | 160 + frontend/components/ui/separator.tsx | 31 + frontend/components/ui/sheet.tsx | 140 + frontend/components/ui/sidebar.tsx | 763 +++ frontend/components/ui/skeleton.tsx | 15 + frontend/components/ui/slider.tsx | 28 + frontend/components/ui/sonner.tsx | 31 + frontend/components/ui/switch.tsx | 29 + frontend/components/ui/table.tsx | 117 + frontend/components/ui/tabs.tsx | 55 + frontend/components/ui/textarea.tsx | 21 + frontend/components/ui/toast.tsx | 111 + frontend/components/ui/toaster.tsx | 35 + frontend/components/ui/toggle-group.tsx | 61 + frontend/components/ui/toggle.tsx | 45 + frontend/components/ui/tooltip.tsx | 30 + frontend/components/ui/use-mobile.tsx | 19 + frontend/components/ui/use-toast.ts | 179 + frontend/contexts/DnDContext.tsx | 68 + frontend/contexts/NodeDataContext.tsx | 118 + frontend/contexts/SourceClickContext.tsx | 65 + frontend/docker-compose.yml | 15 + frontend/docker-entrypoint.sh | 20 + frontend/docker_build.sh | 2 + frontend/hooks/use-agent-webSocket.ts | 165 + frontend/hooks/use-mobile.tsx | 47 + frontend/hooks/use-toast.ts | 223 + frontend/middleware.ts | 84 + frontend/next.config.mjs | 14 + frontend/package.json | 107 + frontend/pnpm-lock.yaml | 5765 +++++++++++++++++ frontend/postcss.config.mjs | 8 + frontend/public/favicon.svg | 4 + frontend/public/images/discord.webp | Bin 0 -> 21764 bytes frontend/public/images/email.webp | Bin 0 -> 9976 bytes frontend/public/images/evolution-ai-logo.png | Bin 0 -> 1513 bytes frontend/public/images/evolution.png | Bin 0 -> 3273 bytes frontend/public/images/facebook.png | Bin 0 -> 54771 bytes frontend/public/images/imessage.webp | Bin 0 -> 17574 bytes frontend/public/images/instagram.png | Bin 0 -> 1004190 bytes frontend/public/images/linkedin.webp | Bin 0 -> 22324 bytes frontend/public/images/mercadolivre.png | Bin 0 -> 7412 bytes frontend/public/images/sms.png | Bin 0 -> 14570 bytes frontend/public/images/svg/facebook.svg | 1 + frontend/public/images/svg/instagram.svg | 1 + frontend/public/images/svg/whatsapp.svg | 1 + frontend/public/images/telegram.webp | Bin 0 -> 26178 bytes frontend/public/images/threads.png | Bin 0 -> 23302 bytes frontend/public/images/tiktok.webp | Bin 0 -> 31110 bytes frontend/public/images/webchat.webp | Bin 0 -> 27504 bytes frontend/public/images/wechat.png | Bin 0 -> 125907 bytes frontend/public/images/whatsapp.png | Bin 0 -> 53944 bytes frontend/public/images/x-twitter.png | Bin 0 -> 6769 bytes frontend/public/images/youtube.webp | Bin 0 -> 1898 bytes frontend/public/placeholder-logo.png | Bin 0 -> 958 bytes frontend/public/placeholder-logo.svg | 1 + frontend/public/placeholder-user.jpg | Bin 0 -> 2615 bytes frontend/public/placeholder.jpg | Bin 0 -> 1596 bytes frontend/public/placeholder.svg | 1 + frontend/services/agentService.ts | 238 + frontend/services/api.ts | 122 + frontend/services/authService.ts | 56 + frontend/services/clientService.ts | 87 + frontend/services/mcpServerService.ts | 45 + frontend/services/sessionService.ts | 142 + frontend/styles/globals.css | 122 + frontend/tailwind.config.ts | 81 + frontend/tsconfig.json | 27 + frontend/types/agent.ts | 153 + frontend/types/aiModels.ts | 414 ++ frontend/types/auth.ts | 77 + frontend/types/mcpServer.ts | 60 + pyproject.toml | 1 + src/api/a2a_routes.py | 785 ++- src/schemas/a2a_enhanced_types.py | 612 ++ src/services/a2a_sdk_adapter.py | 397 ++ src/services/a2a_task_manager.py | 1069 --- src/services/adk/agent_builder.py | 2 +- src/services/adk/custom_agents/a2a_agent.py | 602 +- src/utils/a2a_enhanced_client.py | 746 +++ 218 files changed, 44036 insertions(+), 1464 deletions(-) create mode 100644 frontend/.cursorrules create mode 100644 frontend/.env.example create mode 100644 frontend/.github/workflows/docker-image.yml create mode 100644 frontend/.github/workflows/publish_docker_image_homolog.yml create mode 100644 frontend/.github/workflows/publish_docker_image_latest.yml create mode 100644 frontend/.gitignore create mode 100644 frontend/CHANGELOG.md create mode 100644 frontend/Dockerfile create mode 100644 frontend/LICENSE create mode 100644 frontend/README.md create mode 100644 frontend/app/agents/AgentCard.tsx create mode 100644 frontend/app/agents/AgentList.tsx create mode 100644 frontend/app/agents/AgentSidebar.tsx create mode 100644 frontend/app/agents/AgentTypeSelector.tsx create mode 100644 frontend/app/agents/EmptyState.tsx create mode 100644 frontend/app/agents/SearchInput.tsx create mode 100644 frontend/app/agents/config/A2AAgentConfig.tsx create mode 100644 frontend/app/agents/config/LLMAgentConfig.tsx create mode 100644 frontend/app/agents/config/LoopAgentConfig copy.tsx create mode 100644 frontend/app/agents/config/ParallelAgentConfig.tsx create mode 100644 frontend/app/agents/config/SequentialAgentConfig.tsx create mode 100644 frontend/app/agents/config/TaskAgentConfig.tsx create mode 100644 frontend/app/agents/dialogs/AgentToolDialog.tsx create mode 100644 frontend/app/agents/dialogs/ApiKeysDialog.tsx create mode 100644 frontend/app/agents/dialogs/ConfirmationDialog.tsx create mode 100644 frontend/app/agents/dialogs/CustomMCPDialog.tsx create mode 100644 frontend/app/agents/dialogs/CustomToolDialog.tsx create mode 100644 frontend/app/agents/dialogs/FolderDialog.tsx create mode 100644 frontend/app/agents/dialogs/ImportAgentDialog.tsx create mode 100644 frontend/app/agents/dialogs/MCPDialog.tsx create mode 100644 frontend/app/agents/dialogs/MoveAgentDialog.tsx create mode 100644 frontend/app/agents/dialogs/ShareAgentDialog.tsx create mode 100644 frontend/app/agents/forms/AgentForm.tsx create mode 100644 frontend/app/agents/forms/BasicInfoTab.tsx create mode 100644 frontend/app/agents/forms/ConfigurationTab.tsx create mode 100644 frontend/app/agents/forms/SubAgentsTab.tsx create mode 100644 frontend/app/agents/page.tsx create mode 100644 frontend/app/agents/workflows/Canva.tsx create mode 100644 frontend/app/agents/workflows/ContextMenu.tsx create mode 100644 frontend/app/agents/workflows/HelperLines.tsx create mode 100644 frontend/app/agents/workflows/NodePanel.tsx create mode 100644 frontend/app/agents/workflows/canva.css create mode 100644 frontend/app/agents/workflows/edges/DefaultEdge.tsx create mode 100644 frontend/app/agents/workflows/edges/index.ts create mode 100644 frontend/app/agents/workflows/nodes/BaseNode.tsx create mode 100644 frontend/app/agents/workflows/nodes/components/agent/AgentChatMessageList.tsx create mode 100644 frontend/app/agents/workflows/nodes/components/agent/AgentForm.tsx create mode 100644 frontend/app/agents/workflows/nodes/components/agent/AgentNode.tsx create mode 100644 frontend/app/agents/workflows/nodes/components/agent/AgentTestChatModal.tsx create mode 100644 frontend/app/agents/workflows/nodes/components/agent/styles.css create mode 100644 frontend/app/agents/workflows/nodes/components/condition/ConditionDialog.tsx create mode 100644 frontend/app/agents/workflows/nodes/components/condition/ConditionForm.tsx create mode 100644 frontend/app/agents/workflows/nodes/components/condition/ConditionNode.tsx create mode 100644 frontend/app/agents/workflows/nodes/components/delay/DelayForm.tsx create mode 100644 frontend/app/agents/workflows/nodes/components/delay/DelayNode.tsx create mode 100644 frontend/app/agents/workflows/nodes/components/message/MessageForm.tsx create mode 100644 frontend/app/agents/workflows/nodes/components/message/MessageNode.tsx create mode 100644 frontend/app/agents/workflows/nodes/components/start/StartNode.tsx create mode 100644 frontend/app/agents/workflows/nodes/index.ts create mode 100644 frontend/app/agents/workflows/nodes/nodeFunctions.ts create mode 100644 frontend/app/agents/workflows/nodes/style.css create mode 100644 frontend/app/agents/workflows/page.tsx create mode 100644 frontend/app/agents/workflows/utils.ts create mode 100644 frontend/app/chat/components/AgentInfoDialog.tsx create mode 100644 frontend/app/chat/components/AttachedFiles.tsx create mode 100644 frontend/app/chat/components/ChatContainer.tsx create mode 100644 frontend/app/chat/components/ChatInput.tsx create mode 100644 frontend/app/chat/components/ChatMessage.tsx create mode 100644 frontend/app/chat/components/FileUpload.tsx create mode 100644 frontend/app/chat/components/InlineDataAttachments.tsx create mode 100644 frontend/app/chat/components/SessionList.tsx create mode 100644 frontend/app/chat/page.tsx create mode 100644 frontend/app/client-layout.tsx create mode 100644 frontend/app/clients/loading.tsx create mode 100644 frontend/app/clients/page.tsx create mode 100644 frontend/app/documentation/components/A2AComplianceCard.tsx create mode 100644 frontend/app/documentation/components/CodeBlock.tsx create mode 100644 frontend/app/documentation/components/CodeExamplesSection.tsx create mode 100644 frontend/app/documentation/components/DocumentationSection.tsx create mode 100644 frontend/app/documentation/components/FrontendImplementationSection.tsx create mode 100644 frontend/app/documentation/components/HttpLabForm.tsx create mode 100644 frontend/app/documentation/components/LabSection.tsx create mode 100644 frontend/app/documentation/components/QuickStartTemplates.tsx create mode 100644 frontend/app/documentation/components/StreamLabForm.tsx create mode 100644 frontend/app/documentation/components/TechnicalDetailsSection.tsx create mode 100644 frontend/app/documentation/page.tsx create mode 100644 frontend/app/globals.css create mode 100644 frontend/app/layout.tsx create mode 100644 frontend/app/login/page.tsx create mode 100644 frontend/app/logout/page.tsx create mode 100644 frontend/app/mcp-servers/loading.tsx create mode 100644 frontend/app/mcp-servers/page.tsx create mode 100644 frontend/app/page.tsx create mode 100644 frontend/app/profile/page.tsx create mode 100644 frontend/app/security/page.tsx create mode 100644 frontend/app/security/reset-password/page.tsx create mode 100644 frontend/app/security/verify-email/page.tsx create mode 100644 frontend/app/shared-chat/AgentInfo.tsx create mode 100644 frontend/app/shared-chat/components/SharedChatPanel.tsx create mode 100644 frontend/app/shared-chat/components/SharedSessionList.tsx create mode 100644 frontend/app/shared-chat/page.tsx create mode 100644 frontend/components.json create mode 100644 frontend/components/ImpersonationBar.tsx create mode 100644 frontend/components/sidebar.tsx create mode 100644 frontend/components/theme-provider.tsx create mode 100644 frontend/components/toaster.tsx create mode 100644 frontend/components/ui/accordion.tsx create mode 100644 frontend/components/ui/alert-dialog.tsx create mode 100644 frontend/components/ui/alert.tsx create mode 100644 frontend/components/ui/aspect-ratio.tsx create mode 100644 frontend/components/ui/avatar.tsx create mode 100644 frontend/components/ui/badge.tsx create mode 100644 frontend/components/ui/breadcrumb.tsx create mode 100644 frontend/components/ui/button.tsx create mode 100644 frontend/components/ui/calendar.tsx create mode 100644 frontend/components/ui/card.tsx create mode 100644 frontend/components/ui/carousel.tsx create mode 100644 frontend/components/ui/chart.tsx create mode 100644 frontend/components/ui/checkbox.tsx create mode 100644 frontend/components/ui/collapsible.tsx create mode 100644 frontend/components/ui/command.tsx create mode 100644 frontend/components/ui/context-menu.tsx create mode 100644 frontend/components/ui/dialog.tsx create mode 100644 frontend/components/ui/drawer.tsx create mode 100644 frontend/components/ui/dropdown-menu.tsx create mode 100644 frontend/components/ui/form.tsx create mode 100644 frontend/components/ui/hover-card.tsx create mode 100644 frontend/components/ui/input-otp.tsx create mode 100644 frontend/components/ui/input.tsx create mode 100644 frontend/components/ui/label.tsx create mode 100644 frontend/components/ui/menubar.tsx create mode 100644 frontend/components/ui/navigation-menu.tsx create mode 100644 frontend/components/ui/pagination.tsx create mode 100644 frontend/components/ui/popover.tsx create mode 100644 frontend/components/ui/progress.tsx create mode 100644 frontend/components/ui/radio-group.tsx create mode 100644 frontend/components/ui/resizable.tsx create mode 100644 frontend/components/ui/scroll-area.tsx create mode 100644 frontend/components/ui/select.tsx create mode 100644 frontend/components/ui/separator.tsx create mode 100644 frontend/components/ui/sheet.tsx create mode 100644 frontend/components/ui/sidebar.tsx create mode 100644 frontend/components/ui/skeleton.tsx create mode 100644 frontend/components/ui/slider.tsx create mode 100644 frontend/components/ui/sonner.tsx create mode 100644 frontend/components/ui/switch.tsx create mode 100644 frontend/components/ui/table.tsx create mode 100644 frontend/components/ui/tabs.tsx create mode 100644 frontend/components/ui/textarea.tsx create mode 100644 frontend/components/ui/toast.tsx create mode 100644 frontend/components/ui/toaster.tsx create mode 100644 frontend/components/ui/toggle-group.tsx create mode 100644 frontend/components/ui/toggle.tsx create mode 100644 frontend/components/ui/tooltip.tsx create mode 100644 frontend/components/ui/use-mobile.tsx create mode 100644 frontend/components/ui/use-toast.ts create mode 100644 frontend/contexts/DnDContext.tsx create mode 100644 frontend/contexts/NodeDataContext.tsx create mode 100644 frontend/contexts/SourceClickContext.tsx create mode 100644 frontend/docker-compose.yml create mode 100755 frontend/docker-entrypoint.sh create mode 100755 frontend/docker_build.sh create mode 100644 frontend/hooks/use-agent-webSocket.ts create mode 100644 frontend/hooks/use-mobile.tsx create mode 100644 frontend/hooks/use-toast.ts create mode 100644 frontend/middleware.ts create mode 100644 frontend/next.config.mjs create mode 100644 frontend/package.json create mode 100644 frontend/pnpm-lock.yaml create mode 100644 frontend/postcss.config.mjs create mode 100644 frontend/public/favicon.svg create mode 100644 frontend/public/images/discord.webp create mode 100644 frontend/public/images/email.webp create mode 100644 frontend/public/images/evolution-ai-logo.png create mode 100644 frontend/public/images/evolution.png create mode 100644 frontend/public/images/facebook.png create mode 100644 frontend/public/images/imessage.webp create mode 100644 frontend/public/images/instagram.png create mode 100644 frontend/public/images/linkedin.webp create mode 100644 frontend/public/images/mercadolivre.png create mode 100644 frontend/public/images/sms.png create mode 100644 frontend/public/images/svg/facebook.svg create mode 100644 frontend/public/images/svg/instagram.svg create mode 100644 frontend/public/images/svg/whatsapp.svg create mode 100644 frontend/public/images/telegram.webp create mode 100644 frontend/public/images/threads.png create mode 100644 frontend/public/images/tiktok.webp create mode 100644 frontend/public/images/webchat.webp create mode 100644 frontend/public/images/wechat.png create mode 100644 frontend/public/images/whatsapp.png create mode 100644 frontend/public/images/x-twitter.png create mode 100644 frontend/public/images/youtube.webp create mode 100644 frontend/public/placeholder-logo.png create mode 100644 frontend/public/placeholder-logo.svg create mode 100644 frontend/public/placeholder-user.jpg create mode 100644 frontend/public/placeholder.jpg create mode 100644 frontend/public/placeholder.svg create mode 100644 frontend/services/agentService.ts create mode 100644 frontend/services/api.ts create mode 100644 frontend/services/authService.ts create mode 100644 frontend/services/clientService.ts create mode 100644 frontend/services/mcpServerService.ts create mode 100644 frontend/services/sessionService.ts create mode 100644 frontend/styles/globals.css create mode 100644 frontend/tailwind.config.ts create mode 100644 frontend/tsconfig.json create mode 100644 frontend/types/agent.ts create mode 100644 frontend/types/aiModels.ts create mode 100644 frontend/types/auth.ts create mode 100644 frontend/types/mcpServer.ts create mode 100644 src/schemas/a2a_enhanced_types.py create mode 100644 src/services/a2a_sdk_adapter.py delete mode 100644 src/services/a2a_task_manager.py create mode 100644 src/utils/a2a_enhanced_client.py diff --git a/frontend/.cursorrules b/frontend/.cursorrules new file mode 100644 index 00000000..5d384dd9 --- /dev/null +++ b/frontend/.cursorrules @@ -0,0 +1,120 @@ +# Next.js Project Rules + +## Language +- All code, comments, documentation, commits, and PRs MUST be written in English. + +## Architecture + +### Folder Structure +- `/app`: App router pages and API routes + - Route-specific components should be placed in their respective route folders +- `/components`: Reusable UI components + - `/ui`: Shadcn UI components and their derivatives +- `/contexts`: React Context providers +- `/hooks`: Custom React hooks +- `/lib`: Utility functions and configuration +- `/public`: Static assets +- `/services`: API service functions +- `/styles`: Global styles +- `/types`: TypeScript type definitions + +### Component Guidelines +- Use functional components with TypeScript +- Use the `.tsx` extension for React components +- Follow a logical naming convention: + - Complex components: Use PascalCase and create folders with an index.tsx file + - Simple components: Single PascalCase named files + +### State Management +- Use React Context for global state +- Use React hooks for local state +- Avoid prop drilling more than 2 levels deep + +### API & Data Fetching +- Use API service modules in `/services` directory +- Implement proper error handling and loading states +- Use React Query or SWR for complex data fetching where appropriate + +## Development Patterns + +### Code Quality +- Maintain type safety - avoid using `any` type +- Write self-documenting code with descriptive names +- Keep components focused on a single responsibility +- Extract complex logic into custom hooks +- Follow DRY (Don't Repeat Yourself) principle + +### CSS & Styling +- Use Tailwind CSS for styling +- Use Shadcn UI components as base building blocks +- Maintain consistent spacing and sizing + +### Performance +- Avoid unnecessary re-renders +- Optimize images and assets +- Implement code splitting where appropriate +- Use dynamic imports for large components/pages + +### Testing +- Write tests for critical business logic +- Test components in isolation +- Implement end-to-end tests for critical user flows + +## Git Workflow + +### Branch Naming +- Features: `feature/short-description` +- Bugfixes: `fix/short-description` +- Hotfixes: `hotfix/short-description` +- Releases: `release/version` + +## Conventions +- Variable and function names in English +- Log and error messages in English +- Documentation in English +- User-facing content (emails, responses) in English +- Indentation with 4 spaces +- Maximum of 79 characters per line + +## Commit Rules +- Use Conventional Commits format for all commit messages +- Format: `(): ` +- Types: + - `feat`: A new feature + - `fix`: A bug fix + - `docs`: Documentation changes + - `style`: Changes that do not affect code meaning (formatting, etc.) + - `refactor`: Code changes that neither fix a bug nor add a feature + - `perf`: Performance improvements + - `test`: Adding or modifying tests + - `chore`: Changes to build process or auxiliary tools +- Scope is optional and should be the module or component affected +- Description should be concise, in the imperative mood, and not capitalized +- Use body for more detailed explanations if needed +- Reference issues in the footer with `Fixes #123` or `Relates to #123` +- Examples: + - `feat(auth): add password reset functionality` + - `fix(api): correct validation error in client registration` + - `docs: update API documentation for new endpoints` + - `refactor(services): improve error handling in authentication` + +Format: `type(scope): subject` + +Examples: +- `feat(auth): add login form validation` +- `fix(api): resolve user data fetching issue` +- `docs(readme): update installation instructions` +- `style(components): format according to style guide` + +### Pull Requests +- Keep PRs focused on a single feature or fix +- Include descriptive titles and descriptions +- Reference related issues +- Request code reviews from appropriate team members +- Ensure CI checks pass before merging + +## Code Review Guidelines +- Focus on code quality, architecture, and maintainability +- Provide constructive feedback +- Address all review comments before merging +- Maintain a respectful and collaborative tone \ No newline at end of file diff --git a/frontend/.env.example b/frontend/.env.example new file mode 100644 index 00000000..f9feddb3 --- /dev/null +++ b/frontend/.env.example @@ -0,0 +1 @@ +NEXT_PUBLIC_API_URL=http://localhost:8000 \ No newline at end of file diff --git a/frontend/.github/workflows/docker-image.yml b/frontend/.github/workflows/docker-image.yml new file mode 100644 index 00000000..32994151 --- /dev/null +++ b/frontend/.github/workflows/docker-image.yml @@ -0,0 +1,48 @@ +name: Build Docker image + +on: + push: + tags: + - "*.*.*" + +jobs: + build_deploy: + name: Build and Deploy + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: evoapicloud/evo-ai-frontend + tags: type=semver,pattern=v{{version}} + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Build and push + id: docker_build + uses: docker/build-push-action@v5 + with: + platforms: linux/amd64,linux/arm64 + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + + - name: Image digest + run: echo ${{ steps.docker_build.outputs.digest }} \ No newline at end of file diff --git a/frontend/.github/workflows/publish_docker_image_homolog.yml b/frontend/.github/workflows/publish_docker_image_homolog.yml new file mode 100644 index 00000000..b292329b --- /dev/null +++ b/frontend/.github/workflows/publish_docker_image_homolog.yml @@ -0,0 +1,48 @@ +name: Build Docker image + +on: + push: + branches: + - develop + +jobs: + build_deploy: + name: Build and Deploy + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: evoapicloud/evo-ai-frontend + tags: homolog + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Build and push + id: docker_build + uses: docker/build-push-action@v5 + with: + platforms: linux/amd64,linux/arm64 + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + + - name: Image digest + run: echo ${{ steps.docker_build.outputs.digest }} \ No newline at end of file diff --git a/frontend/.github/workflows/publish_docker_image_latest.yml b/frontend/.github/workflows/publish_docker_image_latest.yml new file mode 100644 index 00000000..deeb2aab --- /dev/null +++ b/frontend/.github/workflows/publish_docker_image_latest.yml @@ -0,0 +1,48 @@ +name: Build Docker image + +on: + push: + branches: + - main + +jobs: + build_deploy: + name: Build and Deploy + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: evoapicloud/evo-ai-frontend + tags: latest + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Build and push + id: docker_build + uses: docker/build-push-action@v5 + with: + platforms: linux/amd64,linux/arm64 + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + + - name: Image digest + run: echo ${{ steps.docker_build.outputs.digest }} \ No newline at end of file diff --git a/frontend/.gitignore b/frontend/.gitignore new file mode 100644 index 00000000..400e48ed --- /dev/null +++ b/frontend/.gitignore @@ -0,0 +1,31 @@ +# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. + +# dependencies +/node_modules + +# next.js +/.next/ +/out/ + +# production +/build + +# debug +npm-debug.log* +yarn-debug.log* +yarn-error.log* +.pnpm-debug.log* + +# Lock files +package-lock.json +yarn.lock + +# env files +.env + +# vercel +.vercel + +# typescript +*.tsbuildinfo +next-env.d.ts \ No newline at end of file diff --git a/frontend/CHANGELOG.md b/frontend/CHANGELOG.md new file mode 100644 index 00000000..7a1f9205 --- /dev/null +++ b/frontend/CHANGELOG.md @@ -0,0 +1,67 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [0.0.7] - 2025-05-15 + +### Added + +- Add Task agents +- Add file support for A2A protocol (Agent-to-Agent) endpoints +- Add entrypoint script for dynamic environment variable handling +- Add agent card URL input and copy functionality + +## [0.0.6] - 2025-05-13 + +### Added + +- Agent sharing functionality with third parties via API keys +- Dedicated shared-chat page for accessing shared agents +- Local storage mechanism to save recently used shared agents +- Public access to shared agents without full authentication + +### Changed + +- Add example environment file and update .gitignore +- Add clientId prop to agent-related components and improve agent data processing +- Refactor middleware to handle shared agent routes as public paths +- Update API interceptors to prevent forced logout on shared chat pages + +### security + +- Implement force logout functionality on 401 Unauthorized responses + +## [0.0.5] - 2025-05-13 + +### Changed + +- Update author information in multiple files + +## [0.0.4] - 2025-05-13 + +### Added +- Initial public release +- User-friendly interface for creating and managing AI agents +- Integration with multiple language models (e.g., GPT-4, Claude) +- Client management interface +- Visual configuration for MCP servers +- Custom tools management +- JWT authentication with email verification +- Agent 2 Agent (A2A) protocol support (Google's A2A spec) +- Workflow Agent with ReactFlow for visual workflow creation +- Secure API key management (encrypted storage) +- Agent organization with folders and categories +- Dashboard with agent overview, usage stats, and recent activities +- Agent editor for creating, editing, and configuring agents +- Workflow editor for building and visualizing agent flows +- API key manager for adding, encrypting, and rotating keys +- RESTful API and WebSocket backend integration +- Docker support for containerized deployment +- Complete documentation and contribution guidelines + +--- + +Older versions and future releases will be listed here. diff --git a/frontend/Dockerfile b/frontend/Dockerfile new file mode 100644 index 00000000..1560e70b --- /dev/null +++ b/frontend/Dockerfile @@ -0,0 +1,63 @@ +# Build stage +FROM node:20.15.1-alpine AS builder + +WORKDIR /app + +# Define build arguments with default values +ARG NEXT_PUBLIC_API_URL=https://api-evoai.evoapicloud.com + +# Instalar pnpm globalmente +RUN npm install -g pnpm + +# Install dependencies first (caching) +COPY package.json pnpm-lock.yaml ./ +RUN pnpm install --frozen-lockfile + +# Instalar explicitamente o next-runtime-env +RUN pnpm add next-runtime-env + +# Copy source code +COPY . . + +# Set environment variables from build arguments +ENV NEXT_PUBLIC_API_URL=${NEXT_PUBLIC_API_URL} + +RUN pnpm run build + +# Production stage +FROM node:20.15.1-alpine AS runner + +WORKDIR /app + +# Define build arguments again for the runner stage +ARG NEXT_PUBLIC_API_URL=https://api-evoai.evoapicloud.com + +# Instalar pnpm globalmente +RUN npm install -g pnpm + +# Install production dependencies only +COPY package.json pnpm-lock.yaml ./ +RUN pnpm install --prod --frozen-lockfile + +# Instalar explicitamente o next-runtime-env na produção +RUN pnpm add next-runtime-env + +# Copy built assets from builder +COPY --from=builder /app/.next ./.next +COPY --from=builder /app/public ./public +COPY --from=builder /app/next.config.mjs ./ + +# Set environment variables +ENV NODE_ENV=production +ENV PORT=3000 +ENV NEXT_PUBLIC_API_URL=${NEXT_PUBLIC_API_URL} + +# Script to replace environment variables at runtime - create it diretamente no container +COPY docker-entrypoint.sh ./ +RUN chmod +x ./docker-entrypoint.sh + +# Expose port +EXPOSE 3000 + +# Use entrypoint script to initialize environment variables before starting the app +ENTRYPOINT ["sh", "./docker-entrypoint.sh"] \ No newline at end of file diff --git a/frontend/LICENSE b/frontend/LICENSE new file mode 100644 index 00000000..727e6b0a --- /dev/null +++ b/frontend/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright 2025 Evolution API + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/frontend/README.md b/frontend/README.md new file mode 100644 index 00000000..baf04986 --- /dev/null +++ b/frontend/README.md @@ -0,0 +1,237 @@ +# Evo AI - AI Agents Platform (Frontend) + +Evo AI is an open-source platform for creating and managing AI agents, enabling integration with different AI models and services. + +## 🚀 Overview + +The Evo AI frontend platform enables: + +- User-friendly interface for creating and managing AI agents +- Integration with different language models +- Client management +- Visual configuration of MCP servers +- Custom tools management +- JWT authentication with email verification +- **Agent 2 Agent (A2A) Protocol Support**: Interface for interoperability between AI agents following Google's A2A specification +- **Workflow Agent with ReactFlow**: Visual interface for building complex agent workflows +- **Secure API Key Management**: Interface for encrypted storage of API keys +- **Agent Organization**: Folder structure for organizing agents by categories + +## 🧩 Agent Creation Interface + +The frontend offers intuitive interfaces for creating different types of agents: + +### 1. LLM Agent (Language Model) + +Interface for configuring agents based on models like GPT-4, Claude, etc. with tools, MCP servers, and sub-agents. + +### 2. A2A Agent (Agent-to-Agent) + +Interface for implementing Google's A2A protocol for agent interoperability. + +### 3. Sequential Agent + +Interface for executing sub-agents in a specific order. + +### 4. Parallel Agent + +Interface for executing multiple sub-agents simultaneously. + +### 5. Loop Agent + +Interface for executing sub-agents in a loop with a defined number of iterations. + +### 6. Workflow Agent + +Visual interface based on ReactFlow for creating complex workflows between agents. + +## 🛠️ Technologies + +- [Next.js](https://nextjs.org/) - React framework for production +- [React](https://reactjs.org/) - JavaScript library for building user interfaces +- [Tailwind CSS](https://tailwindcss.com/) - Utility-first CSS framework +- [Shadcn UI](https://ui.shadcn.com/) - UI component library +- [Radix UI](https://www.radix-ui.com/) - Unstyled, accessible components +- [TypeScript](https://www.typescriptlang.org/) - Typed JavaScript +- [React Query](https://tanstack.com/query/latest) - Data fetching and state management +- [Zustand](https://zustand-demo.pmnd.rs/) - Global state management +- [React Flow](https://reactflow.dev/) - Library for building node-based visual workflows +- [Axios](https://axios-http.com/) - HTTP client for API communication + +## 📋 Requirements + +- Node.js 18+ (LTS recommended) +- npm, yarn, or pnpm package manager +- Evo AI backend running + +## 🔧 Installation + +1. Clone the repository: + +```bash +git clone https://github.com/EvolutionAPI/evo-ai-frontend.git +cd evo-ai-frontend +``` + +2. Install dependencies: + +```bash +npm install +# or +yarn install +# or +pnpm install +``` + +3. Configure environment variables: + +```bash +cp .env.example .env +# Edit the .env file with your settings +``` + +## 🚀 Running the Project + +```bash +# Development mode +npm run dev +# or +yarn dev +# or +pnpm dev + +# Production build +npm run build +# or +yarn build +# or +pnpm build + +# Start production server +npm run start +# or +yarn start +# or +pnpm start +``` + +The project will be available at [http://localhost:3000](http://localhost:3000) + +## 🔐 Authentication + +The frontend implements JWT authentication integrated with the backend: + +- **User Registration**: Form for creating new accounts +- **Email Verification**: Process for verifying via email +- **Login**: Authentication of existing users +- **Password Recovery**: Complete password recovery flow +- **Secure Storage**: Tokens stored in HttpOnly cookies + +## 🖥️ Main Interface Features + +### Dashboard + +Main dashboard showing: +- Agent overview +- Usage statistics +- Recent activities +- Quick links for agent creation + +### Agent Editor + +Complete interface for: +- Creating new agents +- Editing existing agents +- Configuring instructions +- Selecting models +- Setting up API keys + +### Workflow Editor + +Visual editor based on ReactFlow for: +- Creating complex workflows +- Connecting different agents +- Defining conditionals and decision flows +- Visualizing data flow + +### API Key Manager + +Interface for: +- Adding new API keys +- Securely encrypting keys +- Managing existing keys +- Rotating and updating keys + +### Agent Organization + +System for: +- Creating folders and categories +- Organizing agents by type or use case +- Searching and filtering agents + +## 🔄 Backend Integration + +The frontend communicates with the backend through: + +- **RESTful API**: Endpoints for resource management +- **WebSockets**: Real-time communication for agent messages +- **Response Streaming**: Support for streaming model responses + +## 🐳 Docker Support + +The project includes Docker configuration for containerized deployment: + +```bash +# Build the Docker image +./docker_build.sh +# or +docker build -t nextjs-frontend . + +# Run the container +docker run -p 3000:3000 nextjs-frontend +``` + +# 🐳 Docker Compose +```bash +# Copy the .env file +cp .env.example .env + +# Build and deploy + docker-compose up -d --build +``` + +## 🤝 Contributing + +We welcome contributions from the community! Here's how you can help: + +1. Fork the project +2. Create a feature branch (`git checkout -b feature/AmazingFeature`) +3. Make your changes and add tests if possible +4. Run tests and make sure they pass +5. Commit your changes following conventional commits format (`feat: add amazing feature`) +6. Push to the branch (`git push origin feature/AmazingFeature`) +7. Open a Pull Request + +Please read our [Contributing Guidelines](CONTRIBUTING.md) for more details. + +## 📄 License + +This project is licensed under the Apache License 2.0 - see the [LICENSE](LICENSE) file for details. + +**Trademark Notice:** The name "Evo AI" and related branding are protected trademarks. Unauthorized use is prohibited. + +## 👨‍💻 Development Commands + +- `npm run dev` - Start the development server +- `npm run build` - Build the application for production +- `npm run start` - Start the production server +- `npm run lint` - Run ESLint to check code quality +- `npm run format` - Format code with Prettier + +## 🙏 Acknowledgments + +- [Next.js](https://nextjs.org/) +- [React](https://reactjs.org/) +- [Tailwind CSS](https://tailwindcss.com/) +- [Shadcn UI](https://ui.shadcn.com/) +- [ReactFlow](https://reactflow.dev/) diff --git a/frontend/app/agents/AgentCard.tsx b/frontend/app/agents/AgentCard.tsx new file mode 100644 index 00000000..8a504d5c --- /dev/null +++ b/frontend/app/agents/AgentCard.tsx @@ -0,0 +1,506 @@ +/* +┌──────────────────────────────────────────────────────────────────────────────┐ +│ @author: Davidson Gomes │ +│ @file: /app/agents/AgentCard.tsx │ +│ Developed by: Davidson Gomes │ +│ Creation date: May 13, 2025 │ +│ Contact: contato@evolution-api.com │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ @copyright © Evolution API 2025. All rights reserved. │ +│ Licensed under the Apache License, Version 2.0 │ +│ │ +│ You may not use this file except in compliance with the License. │ +│ You may obtain a copy of the License at │ +│ │ +│ http://www.apache.org/licenses/LICENSE-2.0 │ +│ │ +│ Unless required by applicable law or agreed to in writing, software │ +│ distributed under the License is distributed on an "AS IS" BASIS, │ +│ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. │ +│ See the License for the specific language governing permissions and │ +│ limitations under the License. │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ @important │ +│ For any future changes to the code in this file, it is recommended to │ +│ include, together with the modification, the information of the developer │ +│ who changed it and the date of modification. │ +└──────────────────────────────────────────────────────────────────────────────┘ +*/ +"use client"; + +import { Badge } from "@/components/ui/badge"; +import { Button } from "@/components/ui/button"; +import { Card, CardContent } from "@/components/ui/card"; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuTrigger, +} from "@/components/ui/dropdown-menu"; +import { Folder } from "@/services/agentService"; +import { Agent, AgentType } from "@/types/agent"; +import { MCPServer } from "@/types/mcpServer"; +import { + ArrowRight, + Bot, + BookOpenCheck, + ChevronDown, + ChevronUp, + Code, + ExternalLink, + GitBranch, + MoveRight, + Pencil, + RefreshCw, + Settings, + Share2, + Trash2, + Workflow, + TextSelect, + Download, + FlaskConical, +} from "lucide-react"; +import { useState } from "react"; +import { useRouter } from "next/navigation"; +import { cn } from "@/lib/utils"; +import { exportAsJson } from "@/lib/utils"; + +interface AgentCardProps { + agent: Agent; + onEdit: (agent: Agent) => void; + onDelete: (agent: Agent) => void; + onMove: (agent: Agent) => void; + onShare?: (agent: Agent) => void; + onWorkflow?: (agentId: string) => void; + availableMCPs?: MCPServer[]; + getApiKeyNameById?: (id: string | undefined) => string | null; + getAgentNameById?: (id: string) => string; + folders?: Folder[]; + agents: Agent[]; +} + +export function AgentCard({ + agent, + onEdit, + onDelete, + onMove, + onShare, + onWorkflow, + availableMCPs = [], + getApiKeyNameById = () => null, + getAgentNameById = (id) => id, + folders = [], + agents, +}: AgentCardProps) { + const [expanded, setExpanded] = useState(false); + const router = useRouter(); + + const getAgentTypeInfo = (type: AgentType) => { + const types: Record< + string, + { + label: string; + icon: React.ElementType; + color: string; + bgColor: string; + badgeClass: string; + } + > = { + llm: { + label: "LLM Agent", + icon: Code, + color: "#00cc7d", + bgColor: "bg-green-500/10", + badgeClass: + "bg-green-900/30 text-green-400 border-green-600/30 hover:bg-green-900/40", + }, + a2a: { + label: "A2A Agent", + icon: ExternalLink, + color: "#6366f1", + bgColor: "bg-indigo-500/10", + badgeClass: + "bg-indigo-900/30 text-indigo-400 border-indigo-600/30 hover:bg-indigo-900/40", + }, + sequential: { + label: "Sequential Agent", + icon: ArrowRight, + color: "#f59e0b", + bgColor: "bg-yellow-500/10", + badgeClass: + "bg-yellow-900/30 text-yellow-400 border-yellow-600/30 hover:bg-yellow-900/40", + }, + parallel: { + label: "Parallel Agent", + icon: GitBranch, + color: "#8b5cf6", + bgColor: "bg-purple-500/10", + badgeClass: + "bg-purple-900/30 text-purple-400 border-purple-600/30 hover:bg-purple-900/40", + }, + loop: { + label: "Loop Agent", + icon: RefreshCw, + color: "#ec4899", + bgColor: "bg-pink-500/10", + badgeClass: + "bg-orange-900/30 text-orange-400 border-orange-600/30 hover:bg-orange-900/40", + }, + workflow: { + label: "Workflow Agent", + icon: Workflow, + color: "#3b82f6", + bgColor: "bg-blue-500/10", + badgeClass: + "bg-blue-900/30 text-blue-400 border-blue-700/40 hover:bg-blue-900/40", + }, + task: { + label: "Task Agent", + icon: BookOpenCheck, + color: "#ef4444", + bgColor: "bg-red-500/10", + badgeClass: + "bg-red-900/30 text-red-400 border-red-600/30 hover:bg-red-900/40", + }, + }; + + return ( + types[type] || { + label: type, + icon: Bot, + color: "#94a3b8", + bgColor: "bg-slate-500/10", + badgeClass: + "bg-slate-900/30 text-slate-400 border-slate-600/30 hover:bg-slate-900/40", + } + ); + }; + + const getAgentTypeIcon = (type: AgentType) => { + const typeInfo = getAgentTypeInfo(type); + const IconComponent = typeInfo.icon; + return ( + + ); + }; + + const getAgentTypeName = (type: AgentType) => { + return getAgentTypeInfo(type).label; + }; + + const getAgentTypeBgColor = (type: AgentType) => { + return getAgentTypeInfo(type).bgColor; + }; + + const getAgentTypeBadgeClass = (type: AgentType) => { + return getAgentTypeInfo(type).badgeClass; + }; + + const getFolderNameById = (id: string) => { + const folder = folders?.find((f) => f.id === id); + return folder?.name || id; + }; + + const getTotalTools = () => { + if (agent.type === "llm" && agent.config?.mcp_servers) { + return agent.config.mcp_servers.reduce( + (total, mcp) => total + (mcp.tools?.length || 0), + 0 + ); + } + return 0; + }; + + const getCreatedAtFormatted = () => { + return new Date(agent.created_at).toLocaleDateString(); + }; + + // Função para exportar o agente como JSON + const handleExportAgent = () => { + try { + exportAsJson( + agent, + `agent-${agent.name + .replace(/\s+/g, "-") + .toLowerCase()}-${agent.id.substring(0, 8)}`, + true, + agents + ); + } catch (error) { + console.error("Error exporting agent:", error); + } + }; + + // Função para testar o agente A2A no laboratório + const handleTestA2A = () => { + // Usar a URL do agent card como URL base para testes A2A + const agentUrl = agent.agent_card_url?.replace( + "/.well-known/agent.json", + "" + ); + + // Usar a API key diretamente do config do agente + const apiKey = agent.config?.api_key; + + // Construir a URL com parâmetros para o laboratório de testes + const params = new URLSearchParams(); + + if (agentUrl) { + params.set("agent_url", agentUrl); + } + + if (apiKey) { + params.set("api_key", apiKey); + } + + // Redirecionar para o laboratório de testes na aba "lab" + const testUrl = `/documentation?${params.toString()}#lab`; + + router.push(testUrl); + }; + + return ( + +
+
+ {getAgentTypeIcon(agent.type)} +

{agent.name}

+
+ + {getAgentTypeName(agent.type)} + +
+ + +
+

+ {agent.description && agent.description.length > 100 + ? `${agent.description.substring(0, 100)}...` + : agent.description} +

+
+ +
+
+ Model: + + {agent.type === "llm" ? agent.model : "N/A"} + +
+ +
+ + {expanded && ( +
+ {agent.folder_id && ( +
+ Folder: + + {getFolderNameById(agent.folder_id)} + +
+ )} + + {agent.type === "llm" && agent.api_key_id && ( +
+ API Key: + + {getApiKeyNameById(agent.api_key_id)} + +
+ )} + + {getTotalTools() > 0 && ( +
+ Tools: + {getTotalTools()} +
+ )} + + {agent.config?.sub_agents && agent.config.sub_agents.length > 0 && ( +
+ Sub-agents: + + {agent.config.sub_agents.length} + +
+ )} + + {agent.type === "workflow" && agent.config?.workflow && ( +
+ Elements: + + {agent.config.workflow.nodes?.length || 0} nodes,{" "} + {agent.config.workflow.edges?.length || 0} connections + +
+ )} + +
+ Created at: + {getCreatedAtFormatted()} +
+ +
+ ID: + {agent.id} +
+
+ )} + +
+ + + + + + + + Test A2A + + onEdit(agent)} + > + + Edit Agent + + onMove(agent)} + > + + Move Agent + + {onWorkflow && agent.type === "workflow" && ( + onWorkflow(agent.id)} + > + + Open Workflow + + )} + + + Export as JSON + + {onShare && ( + onShare(agent)} + > + + Share Agent + + )} + onDelete(agent)} + > + + Delete Agent + + + + + + + ); +} diff --git a/frontend/app/agents/AgentList.tsx b/frontend/app/agents/AgentList.tsx new file mode 100644 index 00000000..6eb55af8 --- /dev/null +++ b/frontend/app/agents/AgentList.tsx @@ -0,0 +1,130 @@ +/* +┌──────────────────────────────────────────────────────────────────────────────┐ +│ @author: Davidson Gomes │ +│ @file: /app/agents/AgentList.tsx │ +│ Developed by: Davidson Gomes │ +│ Creation date: May 13, 2025 │ +│ Contact: contato@evolution-api.com │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ @copyright © Evolution API 2025. All rights reserved. │ +│ Licensed under the Apache License, Version 2.0 │ +│ │ +│ You may not use this file except in compliance with the License. │ +│ You may obtain a copy of the License at │ +│ │ +│ http://www.apache.org/licenses/LICENSE-2.0 │ +│ │ +│ Unless required by applicable law or agreed to in writing, software │ +│ distributed under the License is distributed on an "AS IS" BASIS, │ +│ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. │ +│ See the License for the specific language governing permissions and │ +│ limitations under the License. │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ @important │ +│ For any future changes to the code in this file, it is recommended to │ +│ include, together with the modification, the information of the developer │ +│ who changed it and the date of modification. │ +└──────────────────────────────────────────────────────────────────────────────┘ +*/ +"use client"; + +import { Agent } from "@/types/agent"; +import { MCPServer } from "@/types/mcpServer"; +import { AgentCard } from "./AgentCard"; +import { EmptyState } from "./EmptyState"; +import { ApiKey, Folder } from "@/services/agentService"; + +interface AgentListProps { + agents: Agent[]; + isLoading: boolean; + searchTerm: string; + selectedFolderId: string | null; + availableMCPs: MCPServer[]; + getApiKeyNameById: (id: string | undefined) => string | null; + getAgentNameById: (id: string) => string; + onEdit: (agent: Agent) => void; + onDelete: (agent: Agent) => void; + onMove: (agent: Agent) => void; + onShare?: (agent: Agent) => void; + onWorkflow?: (agentId: string) => void; + onClearSearch?: () => void; + onCreateAgent?: () => void; + apiKeys: ApiKey[]; + folders: Folder[]; +} + +export function AgentList({ + agents, + isLoading, + searchTerm, + selectedFolderId, + availableMCPs, + getApiKeyNameById, + getAgentNameById, + onEdit, + onDelete, + onMove, + onShare, + onWorkflow, + onClearSearch, + onCreateAgent, + apiKeys, + folders, +}: AgentListProps) { + if (isLoading) { + return ( +
+
+
+ ); + } + + if (agents.length === 0) { + if (searchTerm) { + return ( + + ); + } else if (selectedFolderId) { + return ( + + ); + } else { + return ( + + ); + } + } + + return ( +
+ {agents.map((agent) => ( + + ))} +
+ ); +} diff --git a/frontend/app/agents/AgentSidebar.tsx b/frontend/app/agents/AgentSidebar.tsx new file mode 100644 index 00000000..16358107 --- /dev/null +++ b/frontend/app/agents/AgentSidebar.tsx @@ -0,0 +1,186 @@ +/* +┌──────────────────────────────────────────────────────────────────────────────┐ +│ @author: Davidson Gomes │ +│ @file: /app/agents/AgentSidebar.tsx │ +│ Developed by: Davidson Gomes │ +│ Creation date: May 13, 2025 │ +│ Contact: contato@evolution-api.com │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ @copyright © Evolution API 2025. All rights reserved. │ +│ Licensed under the Apache License, Version 2.0 │ +│ │ +│ You may not use this file except in compliance with the License. │ +│ You may obtain a copy of the License at │ +│ │ +│ http://www.apache.org/licenses/LICENSE-2.0 │ +│ │ +│ Unless required by applicable law or agreed to in writing, software │ +│ distributed under the License is distributed on an "AS IS" BASIS, │ +│ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. │ +│ See the License for the specific language governing permissions and │ +│ limitations under the License. │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ @important │ +│ For any future changes to the code in this file, it is recommended to │ +│ include, together with the modification, the information of the developer │ +│ who changed it and the date of modification. │ +└──────────────────────────────────────────────────────────────────────────────┘ +*/ +"use client"; + +import { Button } from "@/components/ui/button"; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuTrigger, +} from "@/components/ui/dropdown-menu"; +import { + Folder, + FolderPlus, + Home, + X, + CircleEllipsis, + Edit, + Trash2, +} from "lucide-react"; + +interface AgentFolder { + id: string; + name: string; + description: string; +} + +interface AgentSidebarProps { + visible: boolean; + folders: AgentFolder[]; + selectedFolderId: string | null; + onSelectFolder: (id: string | null) => void; + onAddFolder: () => void; + onEditFolder: (folder: AgentFolder) => void; + onDeleteFolder: (folder: AgentFolder) => void; + onClose: () => void; +} + +export function AgentSidebar({ + visible, + folders, + selectedFolderId, + onSelectFolder, + onAddFolder, + onEditFolder, + onDeleteFolder, + onClose, +}: AgentSidebarProps) { + return ( + <> + {visible && ( + + )} + +
+
+

+ + Folders +

+
+ + +
+
+ +
+ + + {folders.map((folder) => ( +
+ + +
+ + + + + + { + e.stopPropagation(); + onEditFolder(folder); + }} + > + + Edit + + { + e.stopPropagation(); + onDeleteFolder(folder); + }} + > + + Delete + + + +
+
+ ))} +
+
+ + ); +} diff --git a/frontend/app/agents/AgentTypeSelector.tsx b/frontend/app/agents/AgentTypeSelector.tsx new file mode 100644 index 00000000..35db3def --- /dev/null +++ b/frontend/app/agents/AgentTypeSelector.tsx @@ -0,0 +1,96 @@ +/* +┌──────────────────────────────────────────────────────────────────────────────┐ +│ @author: Davidson Gomes │ +│ @file: /app/agents/AgentTypeSelector.tsx │ +│ Developed by: Davidson Gomes │ +│ Creation date: May 13, 2025 │ +│ Contact: contato@evolution-api.com │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ @copyright © Evolution API 2025. All rights reserved. │ +│ Licensed under the Apache License, Version 2.0 │ +│ │ +│ You may not use this file except in compliance with the License. │ +│ You may obtain a copy of the License at │ +│ │ +│ http://www.apache.org/licenses/LICENSE-2.0 │ +│ │ +│ Unless required by applicable law or agreed to in writing, software │ +│ distributed under the License is distributed on an "AS IS" BASIS, │ +│ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. │ +│ See the License for the specific language governing permissions and │ +│ limitations under the License. │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ @important │ +│ For any future changes to the code in this file, it is recommended to │ +│ include, together with the modification, the information of the developer │ +│ who changed it and the date of modification. │ +└──────────────────────────────────────────────────────────────────────────────┘ +*/ +"use client"; + +import { AgentType } from "@/types/agent"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select"; +import { + Code, + ExternalLink, + GitBranch, + RefreshCw, + Workflow, + Users, + BookOpenCheck, +} from "lucide-react"; + +interface AgentTypeSelectorProps { + value: AgentType; + onValueChange: (value: AgentType) => void; + className?: string; +} + +export const agentTypes = [ + { value: "llm", label: "LLM Agent", icon: Code }, + { value: "a2a", label: "A2A Agent", icon: ExternalLink }, + { value: "sequential", label: "Sequential Agent", icon: Workflow }, + { value: "parallel", label: "Parallel Agent", icon: GitBranch }, + { value: "loop", label: "Loop Agent", icon: RefreshCw }, + { value: "workflow", label: "Workflow Agent", icon: Workflow }, + { value: "task", label: "Task Agent", icon: BookOpenCheck }, +]; + +export function AgentTypeSelector({ + value, + onValueChange, + className = "", +}: AgentTypeSelectorProps) { + return ( + + ); +} diff --git a/frontend/app/agents/EmptyState.tsx b/frontend/app/agents/EmptyState.tsx new file mode 100644 index 00000000..b084c017 --- /dev/null +++ b/frontend/app/agents/EmptyState.tsx @@ -0,0 +1,107 @@ +/* +┌──────────────────────────────────────────────────────────────────────────────┐ +│ @author: Davidson Gomes │ +│ @file: /app/agents/EmptyState.tsx │ +│ Developed by: Davidson Gomes │ +│ Creation date: May 13, 2025 │ +│ Contact: contato@evolution-api.com │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ @copyright © Evolution API 2025. All rights reserved. │ +│ Licensed under the Apache License, Version 2.0 │ +│ │ +│ You may not use this file except in compliance with the License. │ +│ You may obtain a copy of the License at │ +│ │ +│ http://www.apache.org/licenses/LICENSE-2.0 │ +│ │ +│ Unless required by applicable law or agreed to in writing, software │ +│ distributed under the License is distributed on an "AS IS" BASIS, │ +│ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. │ +│ See the License for the specific language governing permissions and │ +│ limitations under the License. │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ @important │ +│ For any future changes to the code in this file, it is recommended to │ +│ include, together with the modification, the information of the developer │ +│ who changed it and the date of modification. │ +└──────────────────────────────────────────────────────────────────────────────┘ +*/ +"use client"; + +import { Button } from "@/components/ui/button"; +import { Folder, Plus, Search, Server } from "lucide-react"; + +interface EmptyStateProps { + type: "no-agents" | "empty-folder" | "search-no-results"; + searchTerm?: string; + onAction?: () => void; + actionLabel?: string; +} + +export function EmptyState({ + type, + searchTerm = "", + onAction, + actionLabel = "Create Agent", +}: EmptyStateProps) { + const getIcon = () => { + switch (type) { + case "empty-folder": + return ; + case "search-no-results": + return ; + case "no-agents": + default: + return ; + } + }; + + const getTitle = () => { + switch (type) { + case "empty-folder": + return "Empty folder"; + case "search-no-results": + return "No agents found"; + case "no-agents": + default: + return "No agents found"; + } + }; + + const getMessage = () => { + switch (type) { + case "empty-folder": + return "This folder is empty. Add agents or create a new one."; + case "search-no-results": + return `We couldn't find any agents that match your search: "${searchTerm}"`; + case "no-agents": + default: + return "You don't have any agents configured. Create your first agent to start!"; + } + }; + + return ( +
+
+ {getIcon()} +
+

{getTitle()}

+

{getMessage()}

+ {onAction && ( + + )} +
+ ); +} diff --git a/frontend/app/agents/SearchInput.tsx b/frontend/app/agents/SearchInput.tsx new file mode 100644 index 00000000..f8e30739 --- /dev/null +++ b/frontend/app/agents/SearchInput.tsx @@ -0,0 +1,153 @@ +/* +┌──────────────────────────────────────────────────────────────────────────────┐ +│ @author: Davidson Gomes │ +│ @file: /app/agents/SearchInput.tsx │ +│ Developed by: Davidson Gomes │ +│ Creation date: May 13, 2025 │ +│ Contact: contato@evolution-api.com │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ @copyright © Evolution API 2025. All rights reserved. │ +│ Licensed under the Apache License, Version 2.0 │ +│ │ +│ You may not use this file except in compliance with the License. │ +│ You may obtain a copy of the License at │ +│ │ +│ http://www.apache.org/licenses/LICENSE-2.0 │ +│ │ +│ Unless required by applicable law or agreed to in writing, software │ +│ distributed under the License is distributed on an "AS IS" BASIS, │ +│ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. │ +│ See the License for the specific language governing permissions and │ +│ limitations under the License. │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ @important │ +│ For any future changes to the code in this file, it is recommended to │ +│ include, together with the modification, the information of the developer │ +│ who changed it and the date of modification. │ +└──────────────────────────────────────────────────────────────────────────────┘ +*/ +"use client"; + +import { useState } from "react"; +import { Input } from "@/components/ui/input"; +import { Search, X, Filter } from "lucide-react"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select"; +import { Button } from "@/components/ui/button"; +import { + Popover, + PopoverContent, + PopoverTrigger, +} from "@/components/ui/popover"; + +interface SearchInputProps { + value: string; + onChange: (value: string) => void; + placeholder?: string; + className?: string; + selectedAgentType?: string | null; + onAgentTypeChange?: (type: string | null) => void; + agentTypes?: string[]; +} + +// Using "all" as a special value to represent no filter +const ANY_TYPE_VALUE = "all"; + +export function SearchInput({ + value, + onChange, + placeholder = "Search agents...", + className = "", + selectedAgentType = null, + onAgentTypeChange, + agentTypes = [], +}: SearchInputProps) { + const [isFilterOpen, setIsFilterOpen] = useState(false); + + const handleTypeChange = (value: string) => { + if (onAgentTypeChange) { + onAgentTypeChange(value === ANY_TYPE_VALUE ? null : value); + } + }; + + return ( +
+
+ + onChange(e.target.value)} + autoComplete="off" + className="pl-10 w-full bg-[#222] border-[#444] text-white focus:border-emerald-400 focus:ring-emerald-400/10" + /> + {value && ( + + )} +
+ + {agentTypes.length > 0 && onAgentTypeChange && ( + + + + + +
+
+ Filter by type +
+ + + {selectedAgentType && ( + + )} +
+
+
+ )} +
+ ); +} diff --git a/frontend/app/agents/config/A2AAgentConfig.tsx b/frontend/app/agents/config/A2AAgentConfig.tsx new file mode 100644 index 00000000..99f6a5dd --- /dev/null +++ b/frontend/app/agents/config/A2AAgentConfig.tsx @@ -0,0 +1,73 @@ +/* +┌──────────────────────────────────────────────────────────────────────────────┐ +│ @author: Davidson Gomes │ +│ @file: /app/agents/config/A2AAgentConfig.tsx │ +│ Developed by: Davidson Gomes │ +│ Creation date: May 13, 2025 │ +│ Contact: contato@evolution-api.com │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ @copyright © Evolution API 2025. All rights reserved. │ +│ Licensed under the Apache License, Version 2.0 │ +│ │ +│ You may not use this file except in compliance with the License. │ +│ You may obtain a copy of the License at │ +│ │ +│ http://www.apache.org/licenses/LICENSE-2.0 │ +│ │ +│ Unless required by applicable law or agreed to in writing, software │ +│ distributed under the License is distributed on an "AS IS" BASIS, │ +│ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. │ +│ See the License for the specific language governing permissions and │ +│ limitations under the License. │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ @important │ +│ For any future changes to the code in this file, it is recommended to │ +│ include, together with the modification, the information of the developer │ +│ who changed it and the date of modification. │ +└──────────────────────────────────────────────────────────────────────────────┘ +*/ +"use client"; + +import { Input } from "@/components/ui/input"; +import { Label } from "@/components/ui/label"; + +interface A2AAgentConfigProps { + values: { + agent_card_url?: string; + }; + onChange: (values: any) => void; +} + +export function A2AAgentConfig({ values, onChange }: A2AAgentConfigProps) { + return ( +
+
+ + + onChange({ + ...values, + agent_card_url: e.target.value, + }) + } + placeholder="https://example.com/.well-known/agent-card.json" + className="col-span-3 bg-[#222] border-[#444] text-white" + /> +
+
+

+ Provide the full URL for the JSON file of the Agent Card that describes + this agent. +

+

+ Agent Cards contain metadata, capabilities descriptions and supported + protocols. +

+
+
+ ); +} diff --git a/frontend/app/agents/config/LLMAgentConfig.tsx b/frontend/app/agents/config/LLMAgentConfig.tsx new file mode 100644 index 00000000..9c3d1449 --- /dev/null +++ b/frontend/app/agents/config/LLMAgentConfig.tsx @@ -0,0 +1,367 @@ +/* +┌──────────────────────────────────────────────────────────────────────────────┐ +│ @author: Davidson Gomes │ +│ @file: /app/agents/config/LLMAgentConfig.tsx │ +│ Developed by: Davidson Gomes │ +│ Creation date: May 13, 2025 │ +│ Contact: contato@evolution-api.com │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ @copyright © Evolution API 2025. All rights reserved. │ +│ Licensed under the Apache License, Version 2.0 │ +│ │ +│ You may not use this file except in compliance with the License. │ +│ You may obtain a copy of the License at │ +│ │ +│ http://www.apache.org/licenses/LICENSE-2.0 │ +│ │ +│ Unless required by applicable law or agreed to in writing, software │ +│ distributed under the License is distributed on an "AS IS" BASIS, │ +│ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. │ +│ See the License for the specific language governing permissions and │ +│ limitations under the License. │ +├──────────────────────────────────────────────────────────────────────────────┤ +│ @important │ +│ For any future changes to the code in this file, it is recommended to │ +│ include, together with the modification, the information of the developer │ +│ who changed it and the date of modification. │ +└──────────────────────────────────────────────────────────────────────────────┘ +*/ +"use client"; + +import { Badge } from "@/components/ui/badge"; +import { Button } from "@/components/ui/button"; +import { Input } from "@/components/ui/input"; +import { Label } from "@/components/ui/label"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select"; +import { Textarea } from "@/components/ui/textarea"; +import { ApiKey } from "@/services/agentService"; +import { Plus, Maximize2, Save } from "lucide-react"; +import { useEffect, useState } from "react"; +import { + Dialog, + DialogContent, + DialogHeader, + DialogTitle, + DialogFooter, +} from "@/components/ui/dialog"; + +interface ModelOption { + value: string; + label: string; + provider: string; +} + +interface LLMAgentConfigProps { + apiKeys: ApiKey[]; + availableModels: ModelOption[]; + values: { + model?: string; + api_key_id?: string; + instruction?: string; + role?: string; + goal?: string; + }; + onChange: (values: any) => void; + onOpenApiKeysDialog: () => void; +} + +export function LLMAgentConfig({ + apiKeys, + availableModels, + values, + onChange, + onOpenApiKeysDialog, +}: LLMAgentConfigProps) { + const [instructionText, setInstructionText] = useState(values.instruction || ""); + const [isInstructionModalOpen, setIsInstructionModalOpen] = useState(false); + const [expandedInstructionText, setExpandedInstructionText] = useState(""); + + useEffect(() => { + setInstructionText(values.instruction || ""); + }, [values.instruction]); + + const handleInstructionChange = (e: React.ChangeEvent) => { + const newValue = e.target.value; + setInstructionText(newValue); + + onChange({ + ...values, + instruction: newValue, + }); + }; + + const handleExpandInstruction = () => { + setExpandedInstructionText(instructionText); + setIsInstructionModalOpen(true); + }; + + const handleSaveExpandedInstruction = () => { + setInstructionText(expandedInstructionText); + onChange({ + ...values, + instruction: expandedInstructionText, + }); + setIsInstructionModalOpen(false); + }; + + return ( +
+
+ +
+ + onChange({ + ...values, + role: e.target.value, + }) + } + placeholder="Ex: Research Assistant, Customer Support, etc." + className="bg-[#222] border-[#444] text-white" + /> +
+ ℹ️ + Define the role or persona that the agent will assume +
+
+
+ +
+ +
+ + onChange({ + ...values, + goal: e.target.value, + }) + } + placeholder="Ex: Find and organize information, Assist customers with inquiries, etc." + className="bg-[#222] border-[#444] text-white" + /> +
+ ℹ️ + Define the main objective or purpose of this agent +
+
+
+ +
+ +
+
+ + + +
+ + {apiKeys.length === 0 && ( +
+ i + + You need to{" "} + {" "} + before creating an agent. + +
+ )} +
+
+ +
+ + { + const searchQuery = e.target.value.toLowerCase(); + const items = document.querySelectorAll('[data-model-item="true"]'); + items.forEach((item) => { + const text = item.textContent?.toLowerCase() || ''; + if (text.includes(searchQuery)) { + (item as HTMLElement).style.display = 'flex'; + } else { + (item as HTMLElement).style.display = 'none'; + } + }); + }} + /> +
+
+ {availableModels + .filter((model) => { + if (!values.api_key_id) return true; + + const selectedKey = apiKeys.find( + (key) => key.id === values.api_key_id + ); + + if (!selectedKey) return true; + + return model.provider === selectedKey.provider; + }) + .map((model) => ( + + {model.label} + + ))} +
+ + +
+ +
+ +
+
+