mirror of
https://github.com/EvolutionAPI/adk-python.git
synced 2025-07-13 15:14:50 -06:00
Accounting for "references" to be absent in eval files in older format.
PiperOrigin-RevId: 764454937
This commit is contained in:
parent
958c18db2b
commit
447798d573
@ -12,6 +12,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib
|
||||
from typing import Any
|
||||
from typing import Optional
|
||||
@ -56,7 +58,7 @@ class EvaluationGenerator:
|
||||
"""Returns evaluation responses for the given dataset and agent.
|
||||
|
||||
Args:
|
||||
eval_dataset: The dataset that needs to be scraped for responses.
|
||||
eval_set: The eval set that needs to be scraped for responses.
|
||||
agent_module_path: Path to the module that contains the root agent.
|
||||
repeat_num: Number of time the eval dataset should be repeated. This is
|
||||
usually done to remove uncertainty that a single run may bring.
|
||||
@ -209,7 +211,8 @@ class EvaluationGenerator:
|
||||
"""Process the queries using the existing session data without invoking the runner."""
|
||||
responses = data.copy()
|
||||
|
||||
# Iterate through the provided queries and align them with the session events
|
||||
# Iterate through the provided queries and align them with the session
|
||||
# events
|
||||
for index, eval_entry in enumerate(responses):
|
||||
query = eval_entry["query"]
|
||||
actual_tool_uses = []
|
||||
@ -241,5 +244,3 @@ class EvaluationGenerator:
|
||||
responses[index]["actual_tool_use"] = actual_tool_uses
|
||||
responses[index]["response"] = response
|
||||
return responses
|
||||
return responses
|
||||
return responses
|
||||
|
@ -12,6 +12,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
@ -39,9 +41,9 @@ _EVAL_SET_FILE_EXTENSION = ".evalset.json"
|
||||
def _convert_invocation_to_pydantic_schema(
|
||||
invocation_in_json_format: dict[str, Any],
|
||||
) -> Invocation:
|
||||
"""Converts an invocation from old json format to new Pydantic Schema"""
|
||||
"""Converts an invocation from old json format to new Pydantic Schema."""
|
||||
query = invocation_in_json_format["query"]
|
||||
reference = invocation_in_json_format["reference"]
|
||||
reference = invocation_in_json_format.get("reference", "")
|
||||
expected_tool_use = []
|
||||
expected_intermediate_agent_responses = []
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user