Changes for 0.1.0 release

This commit is contained in:
hangfei 2025-04-09 04:24:34 +00:00
parent 9827820143
commit 363e10619a
25 changed files with 553 additions and 99 deletions

View File

@ -2,9 +2,19 @@
[![License](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](LICENSE)
<img src="assets/agent-development-kit.png" alt="Agent Development Kit Logo" width="150">
**An open-source, code-first Python toolkit for building, evaluating, and deploying sophisticated AI agents with flexibility and control.**
<html>
<h1 align="center">
<img src="assets/agent-development-kit.png" width="256"/>
</h1>
<h3 align="center">
An open-source, code-first Python toolkit for building, evaluating, and deploying sophisticated AI agents with flexibility and control.
</h3>
<h3 align="center">
Important Links:
<a href="https://google.github.io/adk-docs/">Docs</a> &
<a href="https://github.com/google/adk-samples">Samples</a>.
</h3>
</html>
The Agent Development Kit (ADK) is designed for developers seeking fine-grained control and flexibility when building advanced AI agents that are tightly integrated with services in Google Cloud. It allows you to define agent behavior, orchestration, and tool use directly in code, enabling robust debugging, versioning, and deployment anywhere from your laptop to the cloud.
@ -25,7 +35,7 @@ The Agent Development Kit (ADK) is designed for developers seeking fine-grained
## 🚀 Installation
You can install the Agent Developer Kit using `pip`:
You can install the ADK using `pip`:
```bash
pip install google-adk
@ -42,7 +52,7 @@ from google.adk.tools import google_search
root_agent = Agent(
name="search_assistant",
model="gemini-1.5-flash-latest", # Or your preferred model like gemini-2.0-flash-001
model="gemini-2.0-flash-exp", # Or your preferred Gemini model
instruction="You are a helpful assistant. Answer user questions using Google Search when needed.",
description="An assistant that can search the web.",
tools=[google_search]
@ -68,19 +78,18 @@ Or launch the Web UI from the folder that contains `my_agent` folder:
adk web
```
For a full step-by-step guide, check out the quickstart or sample agents.
For a full step-by-step guide, check out the [quickstart](https://google.github.io/adk-docs/get-started/quickstart/) or [sample agents](https://github.com/google/adk-samples).
## 📚 Resources
Explore the full documentation for detailed guides on building, evaluating, and deploying agents:
* **[Get Started](get-started/introduction.md)**
* **[Build Agents](build/agents.md)**
* **[Browse Sample Agents](learn/sample_agents/)**
* **[Evaluate Agents](evaluate/evaluate-agents.md)**
* **[Deploy Agents](deploy/overview.md)**
* **[API Reference](guides/reference.md)**
* **[Troubleshooting](guides/troubleshooting.md)**
* **[Get Started](https://google.github.io/adk-docs/get-started/)**
* **[Browse Sample Agents](https://github.com/google/adk-samples)**
* **[Evaluate Agents](https://google.github.io/adk-docs/guides/evaluate-agents/)**
* **[Deploy Agents](https://google.github.io/adk-docs/deploy/)**
* **[API Reference](https://google.github.io/adk-docs/api-reference/)**
* **[Troubleshooting](https://google.github.io/adk-docs/guides/troubleshooting/)**
## 🤝 Contributing

View File

@ -124,7 +124,7 @@ requires = ["flit_core >=3.8,<4"]
build-backend = "flit_core.buildapi"
[tool.flit.sdist]
include = ['src/**/*', 'README.md', 'pyproject.toml']
include = ['src/**/*', 'README.md', 'pyproject.toml', 'LICENSE']
exclude = ['src/**/*.sh']
[tool.flit.module]
@ -135,7 +135,6 @@ name = "google.adk"
force_single_line = true
force_sort_within_sections = true
honor_case_in_force_sorted_sections = true
known_third_party = ["agents", "google"]
order_by_type = false
sort_relative_in_force_sorted_sections = true
multi_line_output = 3

View File

@ -52,6 +52,10 @@ class RunConfig(BaseModel):
Whether to support CFC (Compositional Function Calling). Only applicable for
StreamingMode.SSE. If it's true. the LIVE API will be invoked. Since only LIVE
API supports CFC
.. warning::
This feature is **experimental** and its API or behavior may change
in future releases.
"""
streaming_mode: StreamingMode = StreamingMode.NONE

View File

@ -89,7 +89,7 @@ class AuthHandler:
client = OAuth2Session(
auth_credential.oauth2.client_id,
auth_credential.oauth2.client_secret,
scope=",".join(scopes),
scope=" ".join(scopes),
redirect_uri=auth_credential.oauth2.redirect_uri,
state=auth_credential.oauth2.state,
)
@ -250,6 +250,7 @@ class AuthHandler:
or auth_scheme.flows.password
and auth_scheme.flows.password.scopes
)
scopes = list(scopes.keys())
client = OAuth2Session(
auth_credential.oauth2.client_id,
@ -257,7 +258,9 @@ class AuthHandler:
scope=" ".join(scopes),
redirect_uri=auth_credential.oauth2.redirect_uri,
)
uri, state = client.create_authorization_url(url=authorization_endpoint)
uri, state = client.create_authorization_url(
url=authorization_endpoint, access_type="offline", prompt="consent"
)
exchanged_auth_credential = auth_credential.model_copy(deep=True)
exchanged_auth_credential.oauth2.auth_uri = uri
exchanged_auth_credential.oauth2.state = state

View File

@ -48,26 +48,28 @@ class _AuthLlmRequestProcessor(BaseLlmRequestProcessor):
events = invocation_context.session.events
if not events:
return
request_euc_function_call_response_event = events[-1]
responses = (
request_euc_function_call_response_event.get_function_responses()
)
if not responses:
return
request_euc_function_call_ids = set()
for function_call_response in responses:
if function_call_response.name != REQUEST_EUC_FUNCTION_CALL_NAME:
for k in range(len(events) - 1, -1, -1):
event = events[k]
# look for first event authored by user
if not event.author or event.author != 'user':
continue
responses = event.get_function_responses()
if not responses:
return
# found the function call response for the system long running request euc
# function call
request_euc_function_call_ids.add(function_call_response.id)
auth_config = AuthConfig.model_validate(function_call_response.response)
AuthHandler(auth_config=auth_config).parse_and_store_auth_response(
state=invocation_context.session.state
)
for function_call_response in responses:
if function_call_response.name != REQUEST_EUC_FUNCTION_CALL_NAME:
continue
# found the function call response for the system long running request euc
# function call
request_euc_function_call_ids.add(function_call_response.id)
auth_config = AuthConfig.model_validate(function_call_response.response)
AuthHandler(auth_config=auth_config).parse_and_store_auth_response(
state=invocation_context.session.state
)
break
if not request_euc_function_call_ids:
return
@ -89,6 +91,7 @@ class _AuthLlmRequestProcessor(BaseLlmRequestProcessor):
tools_to_resume.add(args.function_call_id)
if not tools_to_resume:
continue
# found the the system long running reqeust euc function call
# looking for original function call that requests euc
for j in range(i - 1, -1, -1):

View File

@ -12,6 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import logging
from typing import Union
import graphviz
@ -21,7 +24,15 @@ from ..agents.llm_agent import LlmAgent
from ..tools.agent_tool import AgentTool
from ..tools.base_tool import BaseTool
from ..tools.function_tool import FunctionTool
from ..tools.retrieval.base_retrieval_tool import BaseRetrievalTool
logger = logging.getLogger(__name__)
try:
from ..tools.retrieval.base_retrieval_tool import BaseRetrievalTool
except ModuleNotFoundError:
retrieval_tool_module_loaded = False
else:
retrieval_tool_module_loaded = True
def build_graph(graph, agent: BaseAgent, highlight_pairs):
@ -38,9 +49,12 @@ def build_graph(graph, agent: BaseAgent, highlight_pairs):
raise ValueError(f'Unsupported tool type: {tool_or_agent}')
def get_node_caption(tool_or_agent: Union[BaseAgent, BaseTool]):
if isinstance(tool_or_agent, BaseAgent):
return '🤖 ' + tool_or_agent.name
elif isinstance(tool_or_agent, BaseRetrievalTool):
elif retrieval_tool_module_loaded and isinstance(
tool_or_agent, BaseRetrievalTool
):
return '🔎 ' + tool_or_agent.name
elif isinstance(tool_or_agent, FunctionTool):
return '🔧 ' + tool_or_agent.name
@ -49,19 +63,31 @@ def build_graph(graph, agent: BaseAgent, highlight_pairs):
elif isinstance(tool_or_agent, BaseTool):
return '🔧 ' + tool_or_agent.name
else:
raise ValueError(f'Unsupported tool type: {type(tool)}')
logger.warning(
'Unsupported tool, type: %s, obj: %s',
type(tool_or_agent),
tool_or_agent,
)
return f'❓ Unsupported tool type: {type(tool_or_agent)}'
def get_node_shape(tool_or_agent: Union[BaseAgent, BaseTool]):
if isinstance(tool_or_agent, BaseAgent):
return 'ellipse'
elif isinstance(tool_or_agent, BaseRetrievalTool):
elif retrieval_tool_module_loaded and isinstance(
tool_or_agent, BaseRetrievalTool
):
return 'cylinder'
elif isinstance(tool_or_agent, FunctionTool):
return 'box'
elif isinstance(tool_or_agent, BaseTool):
return 'box'
else:
raise ValueError(f'Unsupported tool type: {type(tool_or_agent)}')
logger.warning(
'Unsupported tool, type: %s, obj: %s',
type(tool_or_agent),
tool_or_agent,
)
return 'cylinder'
def draw_node(tool_or_agent: Union[BaseAgent, BaseTool]):
name = get_node_name(tool_or_agent)

View File

@ -0,0 +1,17 @@
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<g clip-path="url(#clip0_756_3354)">
<path fill-rule="evenodd" clip-rule="evenodd" d="M8.69139 10.1458C8.89799 10.3937 8.8645 10.7622 8.61657 10.9688L7.07351 12.2547L8.61657 13.5406C8.8645 13.7472 8.89799 14.1157 8.69139 14.3636C8.48478 14.6115 8.11631 14.645 7.86838 14.4384L5.82029 12.7317C5.52243 12.4834 5.52242 12.026 5.82029 11.7777L7.86838 10.071C8.11631 9.86438 8.48478 9.89788 8.69139 10.1458Z" fill="#EA4335"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M11.4459 10.1458C11.2393 10.3937 11.2728 10.7622 11.5207 10.9688L13.0638 12.2547L11.5207 13.5406C11.2728 13.7472 11.2393 14.1157 11.4459 14.3636C11.6525 14.6115 12.021 14.645 12.2689 14.4384L14.317 12.7317C14.6149 12.4834 14.6149 12.026 14.317 11.7777L12.2689 10.071C12.021 9.86438 11.6525 9.89788 11.4459 10.1458Z" fill="#EA4335"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M5.94165 2.19288C4.44903 2.19288 3.23902 3.40289 3.23902 4.89551C3.23902 6.38813 4.44903 7.59814 5.94165 7.59814H8.60776V8.76685H5.94165C3.80357 8.76685 2.07031 7.03359 2.07031 4.89551C2.07031 2.75743 3.80357 1.02417 5.94165 1.02417H9.73995C10.0627 1.02417 10.3243 1.28579 10.3243 1.60852C10.3243 1.93125 10.0627 2.19288 9.73995 2.19288H5.94165Z" fill="#4285F4"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M10.6895 2.19288C12.1821 2.19288 13.3922 3.40289 13.3922 4.89551C13.3922 6.38813 12.1821 7.59814 10.6895 7.59814H6.89123C6.5685 7.59814 6.30687 7.85977 6.30687 8.1825C6.30687 8.50523 6.5685 8.76685 6.89123 8.76685H10.6895C12.8276 8.76685 14.5609 7.03359 14.5609 4.89551C14.5609 2.75743 12.8276 1.02417 10.6895 1.02417H6.89123C6.5685 1.02417 6.30687 1.28579 6.30687 1.60852C6.30687 1.93125 6.5685 2.19288 6.89123 2.19288H10.6895Z" fill="#34A853"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M3.23902 10.739H4.18859C4.51132 10.739 4.77295 10.4774 4.77295 10.1547C4.77295 9.83196 4.51132 9.57033 4.18859 9.57033H3.01989C2.49545 9.57033 2.07031 9.99547 2.07031 10.5199V14.026C2.07031 14.5505 2.49545 14.9756 3.01989 14.9756H4.18859C4.51132 14.9756 4.77295 14.714 4.77295 14.3912C4.77295 14.0685 4.51132 13.8069 4.18859 13.8069H3.23902V10.739Z" fill="#FBBC04"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M10.9452 8.1825C10.9452 7.85977 10.6836 7.59814 10.3608 7.59814H6.89123C6.5685 7.59814 6.30687 7.85977 6.30687 8.1825C6.30687 8.50523 6.5685 8.76685 6.89123 8.76685H10.3608C10.6836 8.76685 10.9452 8.50523 10.9452 8.1825Z" fill="#4285F4"/>
<path d="M6.74514 4.89551C6.74514 5.25858 6.45081 5.55291 6.08774 5.55291C5.72467 5.55291 5.43034 5.25858 5.43034 4.89551C5.43034 4.53244 5.72467 4.23811 6.08774 4.23811C6.45081 4.23811 6.74514 4.53244 6.74514 4.89551Z" fill="#4285F4"/>
<path d="M11.2739 4.89551C11.2739 5.25858 10.9795 5.55291 10.6165 5.55291C10.2534 5.55291 9.95908 5.25858 9.95908 4.89551C9.95908 4.53244 10.2534 4.23811 10.6165 4.23811C10.9795 4.23811 11.2739 4.53244 11.2739 4.89551Z" fill="#4285F4"/>
</g>
<defs>
<clipPath id="clip0_756_3354">
<rect width="12.6294" height="14" fill="white" transform="translate(2 1)"/>
</clipPath>
</defs>
</svg>

After

Width:  |  Height:  |  Size: 3.1 KiB

View File

@ -0,0 +1,51 @@
/**
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
class AudioProcessor extends AudioWorkletProcessor {
constructor() {
super();
this.targetSampleRate = 22000; // Change to your desired rate
this.originalSampleRate = sampleRate; // Browser's sample rate
this.resampleRatio = this.originalSampleRate / this.targetSampleRate;
}
process(inputs, outputs, parameters) {
const input = inputs[0];
if (input.length > 0) {
let audioData = input[0]; // Get first channel's data
if (this.resampleRatio !== 1) {
audioData = this.resample(audioData);
}
this.port.postMessage(audioData);
}
return true; // Keep processor alive
}
resample(audioData) {
const newLength = Math.round(audioData.length / this.resampleRatio);
const resampled = new Float32Array(newLength);
for (let i = 0; i < newLength; i++) {
const srcIndex = Math.floor(i * this.resampleRatio);
resampled[i] = audioData[srcIndex]; // Nearest neighbor resampling
}
return resampled;
}
}
registerProcessor('audio-processor', AudioProcessor);

View File

@ -0,0 +1,3 @@
{
"backendUrl": ""
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,17 @@
/**
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
html{color-scheme:dark}html{--mat-sys-background: light-dark(#fcf9f8, #131314);--mat-sys-error: light-dark(#ba1a1a, #ffb4ab);--mat-sys-error-container: light-dark(#ffdad6, #93000a);--mat-sys-inverse-on-surface: light-dark(#f3f0f0, #313030);--mat-sys-inverse-primary: light-dark(#c1c7cd, #595f65);--mat-sys-inverse-surface: light-dark(#313030, #e5e2e2);--mat-sys-on-background: light-dark(#1c1b1c, #e5e2e2);--mat-sys-on-error: light-dark(#ffffff, #690005);--mat-sys-on-error-container: light-dark(#410002, #ffdad6);--mat-sys-on-primary: light-dark(#ffffff, #2b3136);--mat-sys-on-primary-container: light-dark(#161c21, #dde3e9);--mat-sys-on-primary-fixed: light-dark(#161c21, #161c21);--mat-sys-on-primary-fixed-variant: light-dark(#41474d, #41474d);--mat-sys-on-secondary: light-dark(#ffffff, #003061);--mat-sys-on-secondary-container: light-dark(#001b3c, #d5e3ff);--mat-sys-on-secondary-fixed: light-dark(#001b3c, #001b3c);--mat-sys-on-secondary-fixed-variant: light-dark(#0f4784, #0f4784);--mat-sys-on-surface: light-dark(#1c1b1c, #e5e2e2);--mat-sys-on-surface-variant: light-dark(#44474a, #e1e2e6);--mat-sys-on-tertiary: light-dark(#ffffff, #2b3136);--mat-sys-on-tertiary-container: light-dark(#161c21, #dde3e9);--mat-sys-on-tertiary-fixed: light-dark(#161c21, #161c21);--mat-sys-on-tertiary-fixed-variant: light-dark(#41474d, #41474d);--mat-sys-outline: light-dark(#74777b, #8e9194);--mat-sys-outline-variant: light-dark(#c4c7ca, #44474a);--mat-sys-primary: light-dark(#595f65, #c1c7cd);--mat-sys-primary-container: light-dark(#dde3e9, #41474d);--mat-sys-primary-fixed: light-dark(#dde3e9, #dde3e9);--mat-sys-primary-fixed-dim: light-dark(#c1c7cd, #c1c7cd);--mat-sys-scrim: light-dark(#000000, #000000);--mat-sys-secondary: light-dark(#305f9d, #a7c8ff);--mat-sys-secondary-container: light-dark(#d5e3ff, #0f4784);--mat-sys-secondary-fixed: light-dark(#d5e3ff, #d5e3ff);--mat-sys-secondary-fixed-dim: light-dark(#a7c8ff, #a7c8ff);--mat-sys-shadow: light-dark(#000000, #000000);--mat-sys-surface: light-dark(#fcf9f8, #131314);--mat-sys-surface-bright: light-dark(#fcf9f8, #393939);--mat-sys-surface-container: light-dark(#f0eded, #201f20);--mat-sys-surface-container-high: light-dark(#eae7e7, #2a2a2a);--mat-sys-surface-container-highest: light-dark(#e5e2e2, #393939);--mat-sys-surface-container-low: light-dark(#f6f3f3, #1c1b1c);--mat-sys-surface-container-lowest: light-dark(#ffffff, #0e0e0e);--mat-sys-surface-dim: light-dark(#dcd9d9, #131314);--mat-sys-surface-tint: light-dark(#595f65, #c1c7cd);--mat-sys-surface-variant: light-dark(#e1e2e6, #44474a);--mat-sys-tertiary: light-dark(#595f65, #c1c7cd);--mat-sys-tertiary-container: light-dark(#dde3e9, #41474d);--mat-sys-tertiary-fixed: light-dark(#dde3e9, #dde3e9);--mat-sys-tertiary-fixed-dim: light-dark(#c1c7cd, #c1c7cd);--mat-sys-neutral-variant20: #2d3134;--mat-sys-neutral10: #1c1b1c}html{--mat-sys-level0: 0px 0px 0px 0px rgba(0, 0, 0, .2), 0px 0px 0px 0px rgba(0, 0, 0, .14), 0px 0px 0px 0px rgba(0, 0, 0, .12)}html{--mat-sys-level1: 0px 2px 1px -1px rgba(0, 0, 0, .2), 0px 1px 1px 0px rgba(0, 0, 0, .14), 0px 1px 3px 0px rgba(0, 0, 0, .12)}html{--mat-sys-level2: 0px 3px 3px -2px rgba(0, 0, 0, .2), 0px 3px 4px 0px rgba(0, 0, 0, .14), 0px 1px 8px 0px rgba(0, 0, 0, .12)}html{--mat-sys-level3: 0px 3px 5px -1px rgba(0, 0, 0, .2), 0px 6px 10px 0px rgba(0, 0, 0, .14), 0px 1px 18px 0px rgba(0, 0, 0, .12)}html{--mat-sys-level4: 0px 5px 5px -3px rgba(0, 0, 0, .2), 0px 8px 10px 1px rgba(0, 0, 0, .14), 0px 3px 14px 2px rgba(0, 0, 0, .12)}html{--mat-sys-level5: 0px 7px 8px -4px rgba(0, 0, 0, .2), 0px 12px 17px 2px rgba(0, 0, 0, .14), 0px 5px 22px 4px rgba(0, 0, 0, .12)}html{--mat-sys-corner-extra-large: 28px;--mat-sys-corner-extra-large-top: 28px 28px 0 0;--mat-sys-corner-extra-small: 4px;--mat-sys-corner-extra-small-top: 4px 4px 0 0;--mat-sys-corner-full: 9999px;--mat-sys-corner-large: 16px;--mat-sys-corner-large-end: 0 16px 16px 0;--mat-sys-corner-large-start: 16px 0 0 16px;--mat-sys-corner-large-top: 16px 16px 0 0;--mat-sys-corner-medium: 12px;--mat-sys-corner-none: 0;--mat-sys-corner-small: 8px}html{--mat-sys-dragged-state-layer-opacity: .16;--mat-sys-focus-state-layer-opacity: .12;--mat-sys-hover-state-layer-opacity: .08;--mat-sys-pressed-state-layer-opacity: .12}html{font-family:Google Sans,Helvetica Neue,sans-serif!important}body{height:100vh;margin:0}markdown p{margin-block-start:.5em;margin-block-end:.5em}:root{--mat-sys-primary: black;--mdc-checkbox-selected-icon-color: white;--mat-sys-background: #131314;--mat-tab-header-active-label-text-color: #8AB4F8;--mat-tab-header-active-hover-label-text-color: #8AB4F8;--mat-tab-header-active-focus-label-text-color: #8AB4F8;--mat-tab-header-label-text-weight: 500;--mdc-text-button-label-text-color: #89b4f8}:root{--mdc-dialog-container-color: #2b2b2f}:root{--mdc-dialog-subhead-color: white}:root{--mdc-circular-progress-active-indicator-color: #a8c7fa}:root{--mdc-circular-progress-size: 80}

View File

@ -168,14 +168,14 @@ async def run_cli(
else:
session_id = input('Session ID to save: ')
session_path = f'{agent_module_path}/{session_id}.session.json'
# Fetch the session again to get all the details.
session = session_service.get_session(
app_name=session.app_name,
user_id=session.user_id,
session_id=session.id,
)
with open(session_path, 'w') as f:
f.write(session.model_dump_json(indent=2, exclude_none=True))
# TODO: Save from opentelemetry.
# logs_path = session_path.replace('.session.json', '.logs.json')
# with open(logs_path, 'w') as f:
# f.write(
# session.model_dump_json(
# indent=2, exclude_none=True, include='event_logs'
# )
# )
print('Session saved to', session_path)

View File

@ -36,10 +36,8 @@ USER myuser
ENV PATH="/home/myuser/.local/bin:$PATH"
ENV GOOGLE_GENAI_USE_VERTEXAI=1
# TODO: use passed-in value
ENV GOOGLE_CLOUD_PROJECT={gcp_project_id}
ENV GOOGLE_CLOUD_LOCATION={gcp_region}
ENV ADK_TRACE_TO_CLOUD={with_cloud_trace}
# Set up environment variables - End
@ -56,7 +54,7 @@ COPY "agents/{app_name}/" "/app/agents/{app_name}/"
EXPOSE {port}
CMD adk {command} --port={port} "/app/agents"
CMD adk {command} --port={port} {trace_to_cloud_option} "/app/agents"
"""
@ -144,7 +142,7 @@ def to_cloud_run(
port=port,
command='web' if with_ui else 'api_server',
install_agent_deps=install_agent_deps,
with_cloud_trace='1' if with_cloud_trace else '0',
trace_to_cloud_option='--trace_to_cloud' if with_cloud_trace else '',
)
dockerfile_path = os.path.join(temp_folder, 'Dockerfile')
os.makedirs(temp_folder, exist_ok=True)

View File

@ -13,6 +13,7 @@
# limitations under the License.
import asyncio
from contextlib import asynccontextmanager
from datetime import datetime
import logging
import os
@ -20,6 +21,7 @@ import tempfile
from typing import Optional
import click
from fastapi import FastAPI
import uvicorn
from . import cli_deploy
@ -98,7 +100,7 @@ def cli_run(agent: str, save_session: bool):
default=False,
help="Optional. Whether to print detailed results on console or not.",
)
def eval_command(
def cli_eval(
agent_module_file_path: str,
eval_set_file_path: tuple[str],
config_file_path: str,
@ -230,6 +232,13 @@ def eval_command(
" This is useful for local debugging."
),
)
@click.option(
"--trace_to_cloud",
is_flag=True,
show_default=True,
default=False,
help="Optional. Whether to enable cloud trace for telemetry.",
)
@click.argument(
"agents_dir",
type=click.Path(
@ -237,15 +246,16 @@ def eval_command(
),
default=os.getcwd(),
)
def web(
def cli_web(
agents_dir: str,
log_to_tmp: bool,
session_db_url: str = "",
log_level: str = "INFO",
allow_origins: Optional[list[str]] = None,
port: int = 8000,
trace_to_cloud: bool = False,
):
"""Start a FastAPI server with web UI for a certain agent.
"""Start a FastAPI server with Web UI for agents.
AGENTS_DIR: The directory of agents, where each sub-directory is a single
agent, containing at least `__init__.py` and `agent.py` files.
@ -261,17 +271,43 @@ def web(
logging.getLogger().setLevel(log_level)
@asynccontextmanager
async def _lifespan(app: FastAPI):
click.secho(
f"""\
+-----------------------------------------------------------------------------+
| ADK Web Server started |
| |
| For local testing, access at http://localhost:{port}.{" "*(29 - len(str(port)))}|
+-----------------------------------------------------------------------------+
""",
fg="green",
)
yield # Startup is done, now app is running
click.secho(
"""\
+-----------------------------------------------------------------------------+
| ADK Web Server shutting down... |
+-----------------------------------------------------------------------------+
""",
fg="green",
)
app = get_fast_api_app(
agent_dir=agents_dir,
session_db_url=session_db_url,
allow_origins=allow_origins,
web=True,
trace_to_cloud=trace_to_cloud,
lifespan=_lifespan,
)
config = uvicorn.Config(
get_fast_api_app(
agent_dir=agents_dir,
session_db_url=session_db_url,
allow_origins=allow_origins,
web=True,
),
app,
host="0.0.0.0",
port=port,
reload=True,
)
server = uvicorn.Server(config)
server.run()
@ -317,6 +353,13 @@ def web(
" This is useful for local debugging."
),
)
@click.option(
"--trace_to_cloud",
is_flag=True,
show_default=True,
default=False,
help="Optional. Whether to enable cloud trace for telemetry.",
)
# The directory of agents, where each sub-directory is a single agent.
# By default, it is the current working directory
@click.argument(
@ -333,8 +376,9 @@ def cli_api_server(
log_level: str = "INFO",
allow_origins: Optional[list[str]] = None,
port: int = 8000,
trace_to_cloud: bool = False,
):
"""Start an api server for a certain agent.
"""Start a FastAPI server for agents.
AGENTS_DIR: The directory of agents, where each sub-directory is a single
agent, containing at least `__init__.py` and `agent.py` files.
@ -356,6 +400,7 @@ def cli_api_server(
session_db_url=session_db_url,
allow_origins=allow_origins,
web=False,
trace_to_cloud=trace_to_cloud,
),
host="0.0.0.0",
port=port,
@ -444,7 +489,7 @@ def cli_api_server(
exists=True, dir_okay=True, file_okay=False, resolve_path=True
),
)
def deploy_to_cloud_run(
def cli_deploy_cloud_run(
agent: str,
project: Optional[str],
region: Optional[str],
@ -455,7 +500,7 @@ def deploy_to_cloud_run(
with_cloud_trace: bool,
with_ui: bool,
):
"""Deploys agent to Cloud Run.
"""Deploys an agent to Cloud Run.
AGENT: The path to the agent source code folder.

View File

@ -31,7 +31,6 @@ import click
from fastapi import FastAPI
from fastapi import HTTPException
from fastapi import Query
from fastapi import Response
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import FileResponse
from fastapi.responses import RedirectResponse
@ -48,6 +47,7 @@ from opentelemetry.sdk.trace import ReadableSpan
from opentelemetry.sdk.trace import TracerProvider
from pydantic import BaseModel
from pydantic import ValidationError
from starlette.types import Lifespan
from ..agents import RunConfig
from ..agents.live_request_queue import LiveRequest
@ -83,7 +83,11 @@ class ApiServerSpanExporter(export.SpanExporter):
self, spans: typing.Sequence[ReadableSpan]
) -> export.SpanExportResult:
for span in spans:
if span.name == "call_llm" or span.name == "send_data":
if (
span.name == "call_llm"
or span.name == "send_data"
or span.name.startswith("tool_response")
):
attributes = dict(span.attributes)
attributes["trace_id"] = span.get_span_context().trace_id
attributes["span_id"] = span.get_span_context().span_id
@ -128,6 +132,8 @@ def get_fast_api_app(
session_db_url: str = "",
allow_origins: Optional[list[str]] = None,
web: bool,
trace_to_cloud: bool = False,
lifespan: Optional[Lifespan[FastAPI]] = None,
) -> FastAPI:
# InMemory tracing dict.
trace_dict: dict[str, Any] = {}
@ -137,18 +143,26 @@ def get_fast_api_app(
provider.add_span_processor(
export.SimpleSpanProcessor(ApiServerSpanExporter(trace_dict))
)
if os.environ.get("ADK_TRACE_TO_CLOUD", "0") == "1":
processor = export.BatchSpanProcessor(
CloudTraceSpanExporter(
project_id=os.environ.get("GOOGLE_CLOUD_PROJECT", "")
)
)
provider.add_span_processor(processor)
envs.load_dotenv()
enable_cloud_tracing = trace_to_cloud or os.environ.get(
"ADK_TRACE_TO_CLOUD", "0"
).lower() in ["1", "true"]
if enable_cloud_tracing:
if project_id := os.environ.get("GOOGLE_CLOUD_PROJECT", None):
processor = export.BatchSpanProcessor(
CloudTraceSpanExporter(project_id=project_id)
)
provider.add_span_processor(processor)
else:
logging.warning(
"GOOGLE_CLOUD_PROJECT environment variable is not set. Tracing will"
" not be enabled."
)
trace.set_tracer_provider(provider)
# Run the FastAPI server.
app = FastAPI()
app = FastAPI(lifespan=lifespan)
if allow_origins:
app.add_middleware(
@ -478,6 +492,7 @@ def get_fast_api_app(
artifact_name: str,
version: Optional[int] = Query(None),
) -> Optional[types.Part]:
app_name = agent_engine_id if agent_engine_id else app_name
artifact = artifact_service.load_artifact(
app_name=app_name,
user_id=user_id,
@ -500,6 +515,7 @@ def get_fast_api_app(
artifact_name: str,
version_id: int,
) -> Optional[types.Part]:
app_name = agent_engine_id if agent_engine_id else app_name
artifact = artifact_service.load_artifact(
app_name=app_name,
user_id=user_id,
@ -518,6 +534,7 @@ def get_fast_api_app(
def list_artifact_names(
app_name: str, user_id: str, session_id: str
) -> list[str]:
app_name = agent_engine_id if agent_engine_id else app_name
return artifact_service.list_artifact_keys(
app_name=app_name, user_id=user_id, session_id=session_id
)
@ -529,6 +546,7 @@ def get_fast_api_app(
def list_artifact_versions(
app_name: str, user_id: str, session_id: str, artifact_name: str
) -> list[int]:
app_name = agent_engine_id if agent_engine_id else app_name
return artifact_service.list_versions(
app_name=app_name,
user_id=user_id,
@ -542,6 +560,7 @@ def get_fast_api_app(
def delete_artifact(
app_name: str, user_id: str, session_id: str, artifact_name: str
):
app_name = agent_engine_id if agent_engine_id else app_name
artifact_service.delete_artifact(
app_name=app_name,
user_id=user_id,

View File

@ -124,6 +124,17 @@ class CodeExecutionUtils:
if not content or not content.parts:
return
# Extract the code from the executable code parts if there're no associated
# code execution result parts.
for idx, part in enumerate(content.parts):
if part.executable_code and (
idx == len(content.parts) - 1
or not content.parts[idx + 1].code_execution_result
):
content.parts = content.parts[: idx + 1]
return part.executable_code.code
# Extract the code from the text parts.
text_parts = [p for p in content.parts if p.text]
if not text_parts:
return

View File

@ -74,7 +74,7 @@ class BaseLlmFlow(ABC):
return
llm = self.__get_llm(invocation_context)
logger.info(
logger.debug(
'Establishing live connection for agent: %s with llm request: %s',
invocation_context.agent.name,
llm_request,

View File

@ -27,6 +27,7 @@ from ...events.event import Event
from ...models.llm_request import LlmRequest
from ._base_llm_processor import BaseLlmRequestProcessor
from .functions import remove_client_function_call_id
from .functions import REQUEST_EUC_FUNCTION_CALL_NAME
class _ContentLlmRequestProcessor(BaseLlmRequestProcessor):
@ -208,7 +209,9 @@ def _get_contents(
if not _is_event_belongs_to_branch(current_branch, event):
# Skip events not belong to current branch.
continue
if _is_auth_event(event):
# skip auth event
continue
filtered_events.append(
_convert_foreign_event(event)
if _is_other_agent_reply(agent_name, event)
@ -368,3 +371,20 @@ def _is_event_belongs_to_branch(
if not invocation_branch or not event.branch:
return True
return invocation_branch.startswith(event.branch)
def _is_auth_event(event: Event) -> bool:
if not event.content.parts:
return False
for part in event.content.parts:
if (
part.function_call
and part.function_call.name == REQUEST_EUC_FUNCTION_CALL_NAME
):
return True
if (
part.function_response
and part.function_response.name == REQUEST_EUC_FUNCTION_CALL_NAME
):
return True
return False

View File

@ -32,6 +32,8 @@ from ...agents.invocation_context import InvocationContext
from ...auth.auth_tool import AuthToolArguments
from ...events.event import Event
from ...events.event_actions import EventActions
from ...telemetry import trace_tool_call
from ...telemetry import trace_tool_response
from ...telemetry import tracer
from ...tools.base_tool import BaseTool
from ...tools.tool_context import ToolContext
@ -114,7 +116,9 @@ def generate_auth_event(
invocation_id=invocation_context.invocation_id,
author=invocation_context.agent.name,
branch=invocation_context.branch,
content=types.Content(parts=parts),
content=types.Content(
parts=parts, role=function_response_event.content.role
),
long_running_tool_ids=long_running_tool_ids,
)
@ -186,6 +190,16 @@ async def handle_function_calls_async(
merged_event = merge_parallel_function_response_events(
function_response_events
)
if len(function_response_events) > 1:
# this is needed for debug traces of parallel calls
# individual response with tool.name is traced in __build_response_event
# (we drop tool.name from span name here as this is merged event)
with tracer.start_as_current_span('tool_response'):
trace_tool_response(
invocation_context=invocation_context,
event_id=merged_event.id,
function_response_event=merged_event,
)
return merged_event
@ -375,7 +389,8 @@ async def __call_tool_live(
invocation_context: InvocationContext,
) -> AsyncGenerator[Event, None]:
"""Calls the tool asynchronously (awaiting the coroutine)."""
with tracer.start_as_current_span(f'call_tool [{tool.name}]'):
with tracer.start_as_current_span(f'tool_call [{tool.name}]'):
trace_tool_call(args=args)
async for item in tool._call_live(
args=args,
tool_context=tool_context,
@ -390,7 +405,8 @@ async def __call_tool_async(
tool_context: ToolContext,
) -> Any:
"""Calls the tool."""
with tracer.start_as_current_span(f'call_tool [{tool.name}]'):
with tracer.start_as_current_span(f'tool_call [{tool.name}]'):
trace_tool_call(args=args)
return await tool.run_async(args=args, tool_context=tool_context)
@ -400,26 +416,35 @@ def __build_response_event(
tool_context: ToolContext,
invocation_context: InvocationContext,
) -> Event:
# Specs requires the result to be a dict.
if not isinstance(function_result, dict):
function_result = {'result': function_result}
with tracer.start_as_current_span(f'tool_response [{tool.name}]'):
# Specs requires the result to be a dict.
if not isinstance(function_result, dict):
function_result = {'result': function_result}
part_function_response = types.Part.from_function_response(
name=tool.name, response=function_result
)
part_function_response.function_response.id = tool_context.function_call_id
part_function_response = types.Part.from_function_response(
name=tool.name, response=function_result
)
part_function_response.function_response.id = tool_context.function_call_id
content = types.Content(
role='user',
parts=[part_function_response],
)
return Event(
invocation_id=invocation_context.invocation_id,
author=invocation_context.agent.name,
content=content,
actions=tool_context.actions,
branch=invocation_context.branch,
)
content = types.Content(
role='user',
parts=[part_function_response],
)
function_response_event = Event(
invocation_id=invocation_context.invocation_id,
author=invocation_context.agent.name,
content=content,
actions=tool_context.actions,
branch=invocation_context.branch,
)
trace_tool_response(
invocation_context=invocation_context,
event_id=function_response_event.id,
function_response_event=function_response_event,
)
return function_response_event
def merge_parallel_function_response_events(

View File

@ -19,7 +19,6 @@ import logging
import sys
from typing import AsyncGenerator
from typing import cast
from typing import Generator
from typing import TYPE_CHECKING
from google.genai import Client

View File

@ -259,6 +259,10 @@ class Runner:
Yields:
The events generated by the agent.
.. warning::
This feature is **experimental** and its API or behavior may change
in future releases.
"""
# TODO: right now, only works for a single audio agent without FC.
invocation_context = self._new_invocation_context_for_live(
@ -416,9 +420,16 @@ class Runner:
if self.agent.sub_agents and live_request_queue:
if not run_config.response_modalities:
# default
run_config.response_modalities = ['AUDIO', 'TEXT']
run_config.response_modalities = ['AUDIO']
if not run_config.output_audio_transcription:
run_config.output_audio_transcription = (
types.AudioTranscriptionConfig()
)
elif 'TEXT' not in run_config.response_modalities:
run_config.response_modalities.append('TEXT')
if not run_config.output_audio_transcription:
run_config.output_audio_transcription = (
types.AudioTranscriptionConfig()
)
return self._new_invocation_context(
session,
live_request_queue=live_request_queue,

View File

@ -28,12 +28,64 @@ from google.genai import types
from opentelemetry import trace
from .agents.invocation_context import InvocationContext
from .events.event import Event
from .models.llm_request import LlmRequest
from .models.llm_response import LlmResponse
tracer = trace.get_tracer('gcp.vertex.agent')
def trace_tool_call(
args: dict[str, Any],
):
"""Traces tool call.
Args:
args: The arguments to the tool call.
"""
span = trace.get_current_span()
span.set_attribute('gen_ai.system', 'gcp.vertex.agent')
span.set_attribute('gcp.vertex.agent.tool_call_args', json.dumps(args))
def trace_tool_response(
invocation_context: InvocationContext,
event_id: str,
function_response_event: Event,
):
"""Traces tool response event.
This function records details about the tool response event as attributes on
the current OpenTelemetry span.
Args:
invocation_context: The invocation context for the current agent run.
event_id: The ID of the event.
function_response_event: The function response event which can be either
merged function response for parallel function calls or individual
function response for sequential function calls.
"""
span = trace.get_current_span()
span.set_attribute('gen_ai.system', 'gcp.vertex.agent')
span.set_attribute(
'gcp.vertex.agent.invocation_id', invocation_context.invocation_id
)
span.set_attribute('gcp.vertex.agent.event_id', event_id)
span.set_attribute(
'gcp.vertex.agent.tool_response',
function_response_event.model_dump_json(exclude_none=True),
)
# Setting empty llm request and response (as UI expect these) while not
# applicable for tool_response.
span.set_attribute('gcp.vertex.agent.llm_request', '{}')
span.set_attribute(
'gcp.vertex.agent.llm_response',
'{}',
)
def trace_call_llm(
invocation_context: InvocationContext,
event_id: str,

View File

@ -13,4 +13,4 @@
# limitations under the License.
# version: date+base_cl
__version__ = "0.0.2"
__version__ = "0.1.0"