From f700c923d9e0b78a1b625f79713a66efea7d8502 Mon Sep 17 00:00:00 2001 From: krishdef7 <157892833+krishdef7@users.noreply.github.com> Date: Fri, 27 Feb 2026 04:09:36 +0530 Subject: [PATCH] fix(core): flush transcript for pure tool-call responses to ensure BeforeTool hooks see complete state (#20419) Co-authored-by: Bryan Morgan --- packages/core/src/core/geminiChat.test.ts | 53 +++++++++++++++++++++++ packages/core/src/core/geminiChat.ts | 8 +++- 2 files changed, 59 insertions(+), 2 deletions(-) diff --git a/packages/core/src/core/geminiChat.test.ts b/packages/core/src/core/geminiChat.test.ts index bfcb803a95..770a594bda 100644 --- a/packages/core/src/core/geminiChat.test.ts +++ b/packages/core/src/core/geminiChat.test.ts @@ -1032,6 +1032,59 @@ describe('GeminiChat', () => { LlmRole.MAIN, ); }); + + it('should flush transcript before tool dispatch for pure tool call with no text or thoughts', async () => { + const pureToolCallStream = (async function* () { + yield { + candidates: [ + { + content: { + role: 'model', + parts: [ + { + functionCall: { + name: 'read_file', + args: { path: 'test.py' }, + }, + }, + ], + }, + }, + ], + } as unknown as GenerateContentResponse; + })(); + + vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue( + pureToolCallStream, + ); + + const { default: fs } = await import('node:fs'); + const writeFileSync = vi.mocked(fs.writeFileSync); + const writeCountBefore = writeFileSync.mock.calls.length; + + const stream = await chat.sendMessageStream( + { model: 'test-model' }, + 'analyze test.py', + 'prompt-id-pure-tool-flush', + new AbortController().signal, + LlmRole.MAIN, + ); + for await (const _ of stream) { + // consume + } + + const newWrites = writeFileSync.mock.calls.slice(writeCountBefore); + expect(newWrites.length).toBeGreaterThan(0); + + const lastWriteData = JSON.parse( + newWrites[newWrites.length - 1][1] as string, + ) as { messages: Array<{ type: string }> }; + + const geminiMessages = lastWriteData.messages.filter( + (m) => m.type === 'gemini', + ); + expect(geminiMessages.length).toBeGreaterThan(0); + }); }); describe('addHistory', () => { diff --git a/packages/core/src/core/geminiChat.ts b/packages/core/src/core/geminiChat.ts index b7319c8afd..6814f31402 100644 --- a/packages/core/src/core/geminiChat.ts +++ b/packages/core/src/core/geminiChat.ts @@ -818,6 +818,7 @@ export class GeminiChat { const modelResponseParts: Part[] = []; let hasToolCall = false; + let hasThoughts = false; let finishReason: FinishReason | undefined; for await (const chunk of streamResponse) { @@ -834,6 +835,7 @@ export class GeminiChat { if (content?.parts) { if (content.parts.some((part) => part.thought)) { // Record thoughts + hasThoughts = true; this.recordThoughtFromContent(content); } if (content.parts.some((part) => part.functionCall)) { @@ -901,8 +903,10 @@ export class GeminiChat { .join('') .trim(); - // Record model response text from the collected parts - if (responseText) { + // Record model response text from the collected parts. + // Also flush when there are thoughts or a tool call (even with no text) + // so that BeforeTool hooks always see the latest transcript state. + if (responseText || hasThoughts || hasToolCall) { this.chatRecordingService.recordMessage({ model, type: 'gemini',