From 78d5929d04cddace4a4e69ddf1b0bd4acaad8abe Mon Sep 17 00:00:00 2001 From: Hiroaki Sano Date: Tue, 16 Dec 2025 22:31:52 +0900 Subject: [PATCH 1/3] fix: Fix infinite loop when using code_executor The code_executor was causing an infinite loop because: 1. `_contains_empty_content()` was not checking for `executable_code` and `code_execution_result` parts, causing CODE_EXECUTION_RESULT events to be filtered out as "empty content". 2. Consecutive model contents (executable_code and code_execution_result) were sent as separate Content objects, but the Gemini API expects them to be in the same Content with multiple parts. This fix: - Adds checks for `executable_code` and `code_execution_result` in `_contains_empty_content()` - Merges consecutive model contents into a single Content object Closes #3921 --- src/google/adk/flows/llm_flows/contents.py | 21 +++++- .../flows/llm_flows/test_contents.py | 73 +++++++++++++++++++ 2 files changed, 93 insertions(+), 1 deletion(-) diff --git a/src/google/adk/flows/llm_flows/contents.py b/src/google/adk/flows/llm_flows/contents.py index fefa014c45..df2fb1dc87 100644 --- a/src/google/adk/flows/llm_flows/contents.py +++ b/src/google/adk/flows/llm_flows/contents.py @@ -246,6 +246,8 @@ def _contains_empty_content(event: Event) -> bool: and not p.file_data and not p.function_call and not p.function_response + and not p.executable_code + and not p.code_execution_result for p in [event.content.parts[0]] ) ) and (not event.output_transcription and not event.input_transcription) @@ -445,7 +447,24 @@ def _get_contents( if content: remove_client_function_call_id(content) contents.append(content) - return contents + + # Merge consecutive contents with the same role. + # This is necessary for code execution where executable_code and + # code_execution_result are separate events but need to be in the same + # Content for the LLM to understand the execution flow. + merged_contents = [] + for content in contents: + if ( + merged_contents + and merged_contents[-1].role == content.role + and content.role == 'model' + ): + # Merge parts into the previous content + merged_contents[-1].parts.extend(content.parts) + else: + merged_contents.append(content) + + return merged_contents def _get_current_turn_contents( diff --git a/tests/unittests/flows/llm_flows/test_contents.py b/tests/unittests/flows/llm_flows/test_contents.py index b2aa91dbee..af09ed5084 100644 --- a/tests/unittests/flows/llm_flows/test_contents.py +++ b/tests/unittests/flows/llm_flows/test_contents.py @@ -535,3 +535,76 @@ async def test_events_with_empty_content_are_skipped(): role="user", ), ] + + +@pytest.mark.asyncio +async def test_events_with_code_execution_are_included(): + """Test that events with executable_code and code_execution_result are included.""" + agent = Agent(model="gemini-2.5-flash", name="test_agent") + llm_request = LlmRequest(model="gemini-2.5-flash") + invocation_context = await testing_utils.create_invocation_context( + agent=agent + ) + + events = [ + Event( + invocation_id="inv1", + author="user", + content=types.UserContent("Run some code"), + ), + # Event with executable_code part + Event( + invocation_id="inv2", + author="test_agent", + content=types.Content( + parts=[ + types.Part( + executable_code=types.ExecutableCode( + code="print('hello')", + language="PYTHON", + ) + ) + ], + role="model", + ), + ), + # Event with code_execution_result part + Event( + invocation_id="inv3", + author="test_agent", + content=types.Content( + parts=[ + types.Part( + code_execution_result=types.CodeExecutionResult( + outcome="OUTCOME_OK", + output="hello", + ) + ) + ], + role="model", + ), + ), + Event( + invocation_id="inv4", + author="user", + content=types.UserContent("What was the result?"), + ), + ] + invocation_context.session.events = events + + # Process the request + async for _ in contents.request_processor.run_async( + invocation_context, llm_request + ): + pass + + # Verify events with executable_code and code_execution_result are included + # Consecutive model contents are merged into one content with multiple parts + assert len(llm_request.contents) == 3 + assert llm_request.contents[0] == types.UserContent("Run some code") + # The merged model content should have both executable_code and code_execution_result + assert llm_request.contents[1].role == "model" + assert len(llm_request.contents[1].parts) == 2 + assert llm_request.contents[1].parts[0].executable_code is not None + assert llm_request.contents[1].parts[1].code_execution_result is not None + assert llm_request.contents[2] == types.UserContent("What was the result?") From fa8c7ce70e0675ee2686c8229e563b19fb654649 Mon Sep 17 00:00:00 2001 From: Hiroaki Sano Date: Wed, 17 Dec 2025 10:38:33 +0900 Subject: [PATCH 2/3] fix: Only merge consecutive model contents for code execution parts --- src/google/adk/flows/llm_flows/contents.py | 28 +++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/src/google/adk/flows/llm_flows/contents.py b/src/google/adk/flows/llm_flows/contents.py index df2fb1dc87..fa14b2426b 100644 --- a/src/google/adk/flows/llm_flows/contents.py +++ b/src/google/adk/flows/llm_flows/contents.py @@ -448,7 +448,7 @@ def _get_contents( remove_client_function_call_id(content) contents.append(content) - # Merge consecutive contents with the same role. + # Merge consecutive model contents when they contain code execution parts. # This is necessary for code execution where executable_code and # code_execution_result are separate events but need to be in the same # Content for the LLM to understand the execution flow. @@ -458,6 +458,7 @@ def _get_contents( merged_contents and merged_contents[-1].role == content.role and content.role == 'model' + and _should_merge_code_execution_content(merged_contents[-1], content) ): # Merge parts into the previous content merged_contents[-1].parts.extend(content.parts) @@ -467,6 +468,31 @@ def _get_contents( return merged_contents +def _content_has_code_execution_part(content: types.Content) -> bool: + """Check if a content has executable_code or code_execution_result parts.""" + if not content.parts: + return False + for part in content.parts: + if part.executable_code or part.code_execution_result: + return True + return False + + +def _should_merge_code_execution_content( + prev_content: types.Content, curr_content: types.Content +) -> bool: + """Determine if two consecutive model contents should be merged. + + Only merge when the previous content has executable_code and the current + content has code_execution_result. This ensures proper code execution flow + is maintained without affecting other consecutive model messages. + """ + return ( + _content_has_code_execution_part(prev_content) + or _content_has_code_execution_part(curr_content) + ) + + def _get_current_turn_contents( current_branch: Optional[str], events: list[Event], agent_name: str = '' ) -> list[types.Content]: From ea4515df284639ada5ca8481d6e40321aa1a0cd7 Mon Sep 17 00:00:00 2001 From: Hiroaki Sano Date: Thu, 18 Dec 2025 10:28:28 +0900 Subject: [PATCH 3/3] chore: Run autoformat.sh --- src/google/adk/flows/llm_flows/contents.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/google/adk/flows/llm_flows/contents.py b/src/google/adk/flows/llm_flows/contents.py index fa14b2426b..0209e46159 100644 --- a/src/google/adk/flows/llm_flows/contents.py +++ b/src/google/adk/flows/llm_flows/contents.py @@ -487,10 +487,9 @@ def _should_merge_code_execution_content( content has code_execution_result. This ensures proper code execution flow is maintained without affecting other consecutive model messages. """ - return ( - _content_has_code_execution_part(prev_content) - or _content_has_code_execution_part(curr_content) - ) + return _content_has_code_execution_part( + prev_content + ) or _content_has_code_execution_part(curr_content) def _get_current_turn_contents(