Add new GeminiCodeExecutor to code_executors/ folder.

PiperOrigin-RevId: 757895291
This commit is contained in:
Google Team Member 2025-05-12 13:43:45 -07:00 committed by Copybara-Service
parent 5462862795
commit 993f9971bb
3 changed files with 60 additions and 1 deletions

View File

@ -16,6 +16,7 @@ import logging
from .base_code_executor import BaseCodeExecutor
from .code_executor_context import CodeExecutorContext
from .gemini_code_executor import GeminiCodeExecutor
from .unsafe_local_code_executor import UnsafeLocalCodeExecutor
logger = logging.getLogger(__name__)
@ -23,6 +24,7 @@ logger = logging.getLogger(__name__)
__all__ = [
'BaseCodeExecutor',
'CodeExecutorContext',
'GeminiCodeExecutor',
'UnsafeLocalCodeExecutor',
]

View File

@ -0,0 +1,49 @@
# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.genai import types
from pydantic import Field
from typing_extensions import override
from ..agents.invocation_context import InvocationContext
from ..models import LlmRequest
from .base_code_executor import BaseCodeExecutor
from .code_execution_utils import CodeExecutionInput
from .code_execution_utils import CodeExecutionResult
class GeminiCodeExecutor(BaseCodeExecutor):
"""A code executor for Gemini 2.0+ models to exeute code."""
@override
def execute_code(
self,
invocation_context: InvocationContext,
code_execution_input: CodeExecutionInput,
) -> CodeExecutionResult:
pass
def process_llm_request(self, llm_request: LlmRequest) -> None:
"""Pre-process the LLM request for Gemini 2.0+ models to use the code execution tool."""
if llm_request.model and llm_request.model.startswith("gemini-2"):
llm_request.config = llm_request.config or types.GenerateContentConfig()
llm_request.config.tools = llm_request.config.tools or []
llm_request.config.tools.append(
types.Tool(code_execution=types.ToolCodeExecution())
)
return
raise ValueError(
"Gemini code execution tool is not supported for model"
f" {llm_request.model}"
)

View File

@ -22,7 +22,6 @@ import dataclasses
import os
import re
from typing import AsyncGenerator
from typing import Generator
from typing import Optional
from typing import TYPE_CHECKING
@ -36,6 +35,7 @@ from ...code_executors.code_execution_utils import CodeExecutionResult
from ...code_executors.code_execution_utils import CodeExecutionUtils
from ...code_executors.code_execution_utils import File
from ...code_executors.code_executor_context import CodeExecutorContext
from ...code_executors.gemini_code_executor import GeminiCodeExecutor
from ...events.event import Event
from ...events.event_actions import EventActions
from ...models.llm_response import LlmResponse
@ -174,6 +174,11 @@ async def _run_pre_processor(
if not code_executor or not isinstance(code_executor, BaseCodeExecutor):
return
if isinstance(code_executor, GeminiCodeExecutor):
code_executor.process_llm_request(llm_request)
return
if not code_executor.optimize_data_file:
return
@ -262,6 +267,9 @@ async def _run_post_processor(
if not llm_response or not llm_response.content:
return
if isinstance(code_executor, GeminiCodeExecutor):
return
code_executor_context = CodeExecutorContext(invocation_context.session.state)
# Skip if the error count exceeds the max retry attempts.
if (