Merge branch 'release/0.0.2'
This commit is contained in:
commit
bafbd494ed
@ -39,6 +39,10 @@ SENDGRID_API_KEY="your-sendgrid-api-key"
|
||||
EMAIL_FROM="noreply@yourdomain.com"
|
||||
APP_URL="https://yourdomain.com"
|
||||
|
||||
LANGFUSE_PUBLIC_KEY="your-langfuse-public-key"
|
||||
LANGFUSE_SECRET_KEY="your-langfuse-secret-key"
|
||||
OTEL_EXPORTER_OTLP_ENDPOINT="https://cloud.langfuse.com/api/public/otel"
|
||||
|
||||
# Server settings
|
||||
HOST="0.0.0.0"
|
||||
PORT=8000
|
||||
|
@ -49,6 +49,8 @@ dependencies = [
|
||||
"jwcrypto==1.5.6",
|
||||
"pyjwt[crypto]==2.9.0",
|
||||
"langgraph==0.4.1",
|
||||
"opentelemetry-sdk==1.33.0",
|
||||
"opentelemetry-exporter-otlp==1.33.0",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
|
@ -102,9 +102,8 @@ async def websocket_chat(
|
||||
memory_service=memory_service,
|
||||
db=db,
|
||||
):
|
||||
# Send each chunk as a JSON message
|
||||
await websocket.send_json(
|
||||
{"message": chunk, "turn_complete": False}
|
||||
{"message": json.loads(chunk), "turn_complete": False}
|
||||
)
|
||||
|
||||
# Send signal of complete turn
|
||||
|
@ -84,6 +84,11 @@ class Settings(BaseSettings):
|
||||
DEMO_PASSWORD: str = os.getenv("DEMO_PASSWORD", "demo123")
|
||||
DEMO_CLIENT_NAME: str = os.getenv("DEMO_CLIENT_NAME", "Demo Client")
|
||||
|
||||
# Langfuse / OpenTelemetry settings
|
||||
LANGFUSE_PUBLIC_KEY: str = os.getenv("LANGFUSE_PUBLIC_KEY", "")
|
||||
LANGFUSE_SECRET_KEY: str = os.getenv("LANGFUSE_SECRET_KEY", "")
|
||||
OTEL_EXPORTER_OTLP_ENDPOINT: str = os.getenv("OTEL_EXPORTER_OTLP_ENDPOINT", "")
|
||||
|
||||
class Config:
|
||||
env_file = ".env"
|
||||
env_file_encoding = "utf-8"
|
||||
|
@ -7,6 +7,7 @@ from fastapi.staticfiles import StaticFiles
|
||||
from src.config.database import engine, Base
|
||||
from src.config.settings import settings
|
||||
from src.utils.logger import setup_logger
|
||||
from src.utils.otel import init_otel
|
||||
|
||||
# Necessary for other modules
|
||||
from src.services.service_providers import session_service # noqa: F401
|
||||
@ -85,6 +86,9 @@ app.include_router(session_router, prefix=API_PREFIX)
|
||||
app.include_router(agent_router, prefix=API_PREFIX)
|
||||
app.include_router(a2a_router, prefix=API_PREFIX)
|
||||
|
||||
# Inicializa o OpenTelemetry para Langfuse
|
||||
init_otel()
|
||||
|
||||
|
||||
@app.get("/")
|
||||
def read_root():
|
||||
|
@ -10,6 +10,9 @@ from src.services.agent_builder import AgentBuilder
|
||||
from sqlalchemy.orm import Session
|
||||
from typing import Optional, AsyncGenerator
|
||||
import asyncio
|
||||
import json
|
||||
from src.utils.otel import get_tracer
|
||||
from opentelemetry import trace
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
|
||||
@ -25,6 +28,16 @@ async def run_agent(
|
||||
session_id: Optional[str] = None,
|
||||
timeout: float = 60.0,
|
||||
):
|
||||
tracer = get_tracer()
|
||||
with tracer.start_as_current_span(
|
||||
"run_agent",
|
||||
attributes={
|
||||
"agent_id": agent_id,
|
||||
"external_id": external_id,
|
||||
"session_id": session_id or f"{external_id}_{agent_id}",
|
||||
"message": message,
|
||||
},
|
||||
):
|
||||
exit_stack = None
|
||||
try:
|
||||
logger.info(
|
||||
@ -109,7 +122,9 @@ async def run_agent(
|
||||
if last_response:
|
||||
await response_queue.put(last_response)
|
||||
else:
|
||||
await response_queue.put("Finished without specific response")
|
||||
await response_queue.put(
|
||||
"Finished without specific response"
|
||||
)
|
||||
|
||||
execution_completed.set()
|
||||
except Exception as e:
|
||||
@ -127,7 +142,9 @@ async def run_agent(
|
||||
p.cancel()
|
||||
|
||||
if not execution_completed.is_set():
|
||||
logger.warning(f"Agent execution timed out after {timeout} seconds")
|
||||
logger.warning(
|
||||
f"Agent execution timed out after {timeout} seconds"
|
||||
)
|
||||
await response_queue.put(
|
||||
"The response took too long and was interrupted."
|
||||
)
|
||||
@ -180,6 +197,17 @@ async def run_agent(
|
||||
# Do not raise the exception to not obscure the original error
|
||||
|
||||
|
||||
def convert_sets(obj):
|
||||
if isinstance(obj, set):
|
||||
return list(obj)
|
||||
elif isinstance(obj, dict):
|
||||
return {k: convert_sets(v) for k, v in obj.items()}
|
||||
elif isinstance(obj, list):
|
||||
return [convert_sets(i) for i in obj]
|
||||
else:
|
||||
return obj
|
||||
|
||||
|
||||
async def run_agent_stream(
|
||||
agent_id: str,
|
||||
external_id: str,
|
||||
@ -190,6 +218,18 @@ async def run_agent_stream(
|
||||
db: Session,
|
||||
session_id: Optional[str] = None,
|
||||
) -> AsyncGenerator[str, None]:
|
||||
tracer = get_tracer()
|
||||
span = tracer.start_span(
|
||||
"run_agent_stream",
|
||||
attributes={
|
||||
"agent_id": agent_id,
|
||||
"external_id": external_id,
|
||||
"session_id": session_id or f"{external_id}_{agent_id}",
|
||||
"message": message,
|
||||
},
|
||||
)
|
||||
try:
|
||||
with trace.use_span(span, end_on_exit=True):
|
||||
try:
|
||||
logger.info(
|
||||
f"Starting streaming execution of agent {agent_id} for external_id {external_id}"
|
||||
@ -246,11 +286,9 @@ async def run_agent_stream(
|
||||
)
|
||||
|
||||
async for event in events_async:
|
||||
if event.content and event.content.parts:
|
||||
text = event.content.parts[0].text
|
||||
if text:
|
||||
yield text
|
||||
await asyncio.sleep(0) # Allow other tasks to run
|
||||
event_dict = event.dict()
|
||||
event_dict = convert_sets(event_dict)
|
||||
yield json.dumps(event_dict)
|
||||
|
||||
completed_session = session_service.get_session(
|
||||
app_name=agent_id,
|
||||
@ -276,5 +314,9 @@ async def run_agent_stream(
|
||||
logger.error(f"Error processing request: {str(e)}")
|
||||
raise e
|
||||
except Exception as e:
|
||||
logger.error(f"Internal error processing request: {str(e)}", exc_info=True)
|
||||
logger.error(
|
||||
f"Internal error processing request: {str(e)}", exc_info=True
|
||||
)
|
||||
raise InternalServerError(str(e))
|
||||
finally:
|
||||
span.end()
|
||||
|
@ -304,7 +304,6 @@ class WorkflowAgent(BaseAgent):
|
||||
"session_id": session_id,
|
||||
}
|
||||
|
||||
# Função para message-node
|
||||
async def message_node_function(
|
||||
state: State, node_id: str, node_data: Dict[str, Any]
|
||||
) -> AsyncGenerator[State, None]:
|
||||
@ -318,7 +317,6 @@ class WorkflowAgent(BaseAgent):
|
||||
session_id = state.get("session_id", "")
|
||||
conversation_history = state.get("conversation_history", [])
|
||||
|
||||
# Adiciona a mensagem como um novo Event do tipo agent
|
||||
new_event = Event(
|
||||
author="agent",
|
||||
content=Content(parts=[Part(text=message_content)]),
|
||||
@ -750,7 +748,7 @@ class WorkflowAgent(BaseAgent):
|
||||
content=Content(parts=[Part(text=user_message)]),
|
||||
)
|
||||
|
||||
# Se o histórico estiver vazio, adiciona a mensagem do usuário
|
||||
# If the conversation history is empty, add the user message
|
||||
conversation_history = ctx.session.events or []
|
||||
if not conversation_history or (len(conversation_history) == 0):
|
||||
conversation_history = [user_event]
|
||||
@ -768,16 +766,17 @@ class WorkflowAgent(BaseAgent):
|
||||
print("\n🚀 Starting workflow execution:")
|
||||
print(f"Initial content: {user_message[:100]}...")
|
||||
|
||||
# Execute the graph with a recursion limit to avoid infinite loops
|
||||
result = await graph.ainvoke(initial_state, {"recursion_limit": 20})
|
||||
sent_events = 0 # Count of events already sent
|
||||
|
||||
# 6. Process and return the result
|
||||
final_content = result.get("content", [])
|
||||
print(f"\n✅ FINAL RESULT: {final_content[:100]}...")
|
||||
|
||||
for content in final_content:
|
||||
if content.author != "user":
|
||||
yield content
|
||||
async for state in graph.astream(initial_state, {"recursion_limit": 20}):
|
||||
# The state can be a dict with the node name as a key
|
||||
for node_state in state.values():
|
||||
content = node_state.get("content", [])
|
||||
# Only send new events
|
||||
for event in content[sent_events:]:
|
||||
if event.author != "user":
|
||||
yield event
|
||||
sent_events = len(content)
|
||||
|
||||
# Execute sub-agents
|
||||
for sub_agent in self.sub_agents:
|
||||
|
41
src/utils/otel.py
Normal file
41
src/utils/otel.py
Normal file
@ -0,0 +1,41 @@
|
||||
import os
|
||||
import base64
|
||||
from src.config.settings import settings
|
||||
|
||||
from opentelemetry import trace
|
||||
from opentelemetry.sdk.resources import Resource
|
||||
from opentelemetry.sdk.trace import TracerProvider
|
||||
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
||||
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
|
||||
|
||||
_otlp_initialized = False
|
||||
|
||||
|
||||
def init_otel():
|
||||
global _otlp_initialized
|
||||
if _otlp_initialized:
|
||||
return
|
||||
if not (
|
||||
settings.LANGFUSE_PUBLIC_KEY
|
||||
and settings.LANGFUSE_SECRET_KEY
|
||||
and settings.OTEL_EXPORTER_OTLP_ENDPOINT
|
||||
):
|
||||
return
|
||||
|
||||
langfuse_auth = base64.b64encode(
|
||||
f"{settings.LANGFUSE_PUBLIC_KEY}:{settings.LANGFUSE_SECRET_KEY}".encode()
|
||||
).decode()
|
||||
os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = settings.OTEL_EXPORTER_OTLP_ENDPOINT
|
||||
os.environ["OTEL_EXPORTER_OTLP_HEADERS"] = f"Authorization=Basic {langfuse_auth}"
|
||||
|
||||
provider = TracerProvider(
|
||||
resource=Resource.create({"service.name": "evo_ai_agent"})
|
||||
)
|
||||
exporter = OTLPSpanExporter()
|
||||
provider.add_span_processor(BatchSpanProcessor(exporter))
|
||||
trace.set_tracer_provider(provider)
|
||||
_otlp_initialized = True
|
||||
|
||||
|
||||
def get_tracer(name: str = "evo_ai_agent"):
|
||||
return trace.get_tracer(name)
|
Loading…
Reference in New Issue
Block a user