From a7059e37421078eae4fa9d637adcaf23cb0e07c1 Mon Sep 17 00:00:00 2001 From: sreeharsha1902 <53158304+sreeharsha1902@users.noreply.github.com> Date: Thu, 18 Dec 2025 15:42:40 -0500 Subject: [PATCH 1/4] fix(core): Propagate RunConfig.custom_metadata to Event.custom_metadata This change fixes issue #3953 where custom_metadata set in RunConfig was not being propagated to Event objects, making it impossible to track custom metadata across invocations. Changes: - Add custom_metadata propagation in Event creation for regular model response events - Add custom_metadata propagation in Event creation for live streaming events This ensures that custom metadata (e.g., request IDs, user sessions) is properly preserved across all events in agent interactions. Fixes #3953 --- src/google/adk/flows/llm_flows/base_llm_flow.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/google/adk/flows/llm_flows/base_llm_flow.py b/src/google/adk/flows/llm_flows/base_llm_flow.py index 824cd26be1..4cf8dbe00f 100644 --- a/src/google/adk/flows/llm_flows/base_llm_flow.py +++ b/src/google/adk/flows/llm_flows/base_llm_flow.py @@ -324,6 +324,7 @@ def get_author_for_event(llm_response): id=Event.new_id(), invocation_id=invocation_context.invocation_id, author=get_author_for_event(llm_response), + custom_metadata=invocation_context.run_config.custom_metadata, ) async with Aclosing( @@ -438,6 +439,7 @@ async def _run_one_step_async( invocation_id=invocation_context.invocation_id, author=invocation_context.agent.name, branch=invocation_context.branch, + custom_metadata=invocation_context.run_config.custom_metadata, ) async with Aclosing( self._call_llm_async( From 6b4a3a307238afdfa33146bc7b2d985667ab56c9 Mon Sep 17 00:00:00 2001 From: sreeharsha1902 <53158304+sreeharsha1902@users.noreply.github.com> Date: Thu, 18 Dec 2025 15:42:53 -0500 Subject: [PATCH 2/4] fix(models): Filter thought parts in LiteLLM _get_content() This change fixes issue #3948 where thought parts (reasoning content with part.thought == True) were being included in subsequent conversation turns, causing the model's previous reasoning to be replayed as input. Changes: - Add filtering logic in _get_content() to skip thought parts when constructing message content for subsequent turns - Add clear documentation explaining why thought parts should be filtered This prevents reasoning replay and improves multi-turn conversation quality for LiteLLM-based agents. Fixes #3948 --- src/google/adk/models/lite_llm.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/google/adk/models/lite_llm.py b/src/google/adk/models/lite_llm.py index 140473982f..d217d78648 100644 --- a/src/google/adk/models/lite_llm.py +++ b/src/google/adk/models/lite_llm.py @@ -533,6 +533,8 @@ async def _get_content( content_objects = [] for part in parts: + if part.thought: + continue if part.text: if len(parts) == 1: return part.text From 19e5aa2fad56298dc1d0c417b4c78bf750293335 Mon Sep 17 00:00:00 2001 From: sreeharsha1902 <53158304+sreeharsha1902@users.noreply.github.com> Date: Thu, 18 Dec 2025 15:50:24 -0500 Subject: [PATCH 3/4] refactor: Use getattr for safer attribute access Address code review feedback to make the code more robust by using getattr() for accessing optional attributes: - Use getattr for run_config.custom_metadata access to prevent AttributeError when run_config might be None - Use getattr for part.thought access to prevent AttributeError when the thought attribute is not present on Part objects This makes the code more defensive and resilient to edge cases while maintaining the same functionality. All tests continue to pass (197 tests in litellm and base_llm_flow). --- src/google/adk/flows/llm_flows/base_llm_flow.py | 4 ++-- src/google/adk/models/lite_llm.py | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/google/adk/flows/llm_flows/base_llm_flow.py b/src/google/adk/flows/llm_flows/base_llm_flow.py index 4cf8dbe00f..f69cd6c2cd 100644 --- a/src/google/adk/flows/llm_flows/base_llm_flow.py +++ b/src/google/adk/flows/llm_flows/base_llm_flow.py @@ -324,7 +324,7 @@ def get_author_for_event(llm_response): id=Event.new_id(), invocation_id=invocation_context.invocation_id, author=get_author_for_event(llm_response), - custom_metadata=invocation_context.run_config.custom_metadata, + custom_metadata=getattr(invocation_context.run_config, 'custom_metadata', None), ) async with Aclosing( @@ -439,7 +439,7 @@ async def _run_one_step_async( invocation_id=invocation_context.invocation_id, author=invocation_context.agent.name, branch=invocation_context.branch, - custom_metadata=invocation_context.run_config.custom_metadata, + custom_metadata=getattr(invocation_context.run_config, 'custom_metadata', None), ) async with Aclosing( self._call_llm_async( diff --git a/src/google/adk/models/lite_llm.py b/src/google/adk/models/lite_llm.py index d217d78648..7651e39788 100644 --- a/src/google/adk/models/lite_llm.py +++ b/src/google/adk/models/lite_llm.py @@ -533,7 +533,9 @@ async def _get_content( content_objects = [] for part in parts: - if part.thought: + # Skip thought parts - these are model reasoning and should not be + # fed back as input in subsequent turns + if getattr(part, 'thought', False): continue if part.text: if len(parts) == 1: From 26b665ca7c966ab88849cbfd7e0af752a2c32b3d Mon Sep 17 00:00:00 2001 From: sreeharsha1902 <53158304+sreeharsha1902@users.noreply.github.com> Date: Fri, 19 Dec 2025 08:18:45 -0500 Subject: [PATCH 4/4] chore: Address code review feedback and apply autoformatting - Use getattr() for safer attribute access in base_llm_flow.py - Apply autoformatting to comply with project style guidelines --- contributing/samples/gepa/experiment.py | 1 - contributing/samples/gepa/run_experiment.py | 1 - src/google/adk/flows/llm_flows/base_llm_flow.py | 8 ++++++-- src/google/adk/models/lite_llm.py | 2 +- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/contributing/samples/gepa/experiment.py b/contributing/samples/gepa/experiment.py index 2f5d03a772..f68b349d9c 100644 --- a/contributing/samples/gepa/experiment.py +++ b/contributing/samples/gepa/experiment.py @@ -43,7 +43,6 @@ from tau_bench.types import EnvRunResult from tau_bench.types import RunConfig import tau_bench_agent as tau_bench_agent_lib - import utils diff --git a/contributing/samples/gepa/run_experiment.py b/contributing/samples/gepa/run_experiment.py index cfd850b3a3..1bc4ee58c8 100644 --- a/contributing/samples/gepa/run_experiment.py +++ b/contributing/samples/gepa/run_experiment.py @@ -25,7 +25,6 @@ from absl import flags import experiment from google.genai import types - import utils _OUTPUT_DIR = flags.DEFINE_string( diff --git a/src/google/adk/flows/llm_flows/base_llm_flow.py b/src/google/adk/flows/llm_flows/base_llm_flow.py index f69cd6c2cd..05061fad76 100644 --- a/src/google/adk/flows/llm_flows/base_llm_flow.py +++ b/src/google/adk/flows/llm_flows/base_llm_flow.py @@ -324,7 +324,9 @@ def get_author_for_event(llm_response): id=Event.new_id(), invocation_id=invocation_context.invocation_id, author=get_author_for_event(llm_response), - custom_metadata=getattr(invocation_context.run_config, 'custom_metadata', None), + custom_metadata=getattr( + invocation_context.run_config, 'custom_metadata', None + ), ) async with Aclosing( @@ -439,7 +441,9 @@ async def _run_one_step_async( invocation_id=invocation_context.invocation_id, author=invocation_context.agent.name, branch=invocation_context.branch, - custom_metadata=getattr(invocation_context.run_config, 'custom_metadata', None), + custom_metadata=getattr( + invocation_context.run_config, 'custom_metadata', None + ), ) async with Aclosing( self._call_llm_async( diff --git a/src/google/adk/models/lite_llm.py b/src/google/adk/models/lite_llm.py index 7651e39788..28426e0139 100644 --- a/src/google/adk/models/lite_llm.py +++ b/src/google/adk/models/lite_llm.py @@ -535,7 +535,7 @@ async def _get_content( for part in parts: # Skip thought parts - these are model reasoning and should not be # fed back as input in subsequent turns - if getattr(part, 'thought', False): + if getattr(part, "thought", False): continue if part.text: if len(parts) == 1: