fix issue with missing function responses

This commit is contained in:
Your Name
2026-05-14 23:13:01 +00:00
parent 5e34ea4be3
commit 665b5ca6c7
4 changed files with 203 additions and 2 deletions
+12 -1
View File
@@ -386,10 +386,21 @@ export class ContextManager {
this.tracer.logEvent('ContextManager', 'Finished rendering');
const hardenedHistory = hardenHistory(renderedHistory, {
// We must temporarily append the pendingRequest (if any) before hardening.
// Otherwise, the hardener will see dangling functionCalls and inject sentinels
// even though the pendingRequest provides the required functionResponses.
const fullHistoryToHarden = pendingRequest
? [...renderedHistory, pendingRequest]
: renderedHistory;
const hardenedHistory = hardenHistory(fullHistoryToHarden, {
sentinels: this.sidecar.sentinels,
});
if (pendingRequest) {
hardenedHistory.pop(); // Remove the pending request from the final output
}
const apiHistory = hardenedHistory.map((h) => h.content);
if (header) {
apiHistory.unshift(header);
@@ -0,0 +1,110 @@
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect } from 'vitest';
import { ContextGraphMapper } from './mapper.js';
import type { HistoryTurn } from '../../core/agentChatHistory.js';
import { hardenHistory } from '../../utils/historyHardening.js';
describe('ContextGraphMapper (Round-Trip Fidelity)', () => {
it('should flawlessly round-trip a complex history containing parallel tool calls and responses', () => {
// 1. Define a complex, worst-case scenario history
const originalHistory: HistoryTurn[] = [
{
id: 'system_prompt_id',
content: {
role: 'user',
parts: [{ text: '<session_context>\nSystem Prompt here' }],
},
},
{
id: 'user_turn_1',
content: {
role: 'user',
parts: [{ text: 'Please read file A and file B at the same time.' }],
},
},
{
id: 'model_turn_1',
content: {
role: 'model',
parts: [
{ text: 'I will read both files concurrently.' },
{
functionCall: {
id: 'call_A',
name: 'read_file',
args: { path: 'A.txt' },
},
thoughtSignature: 'synthetic_sig_xyz',
},
{
functionCall: {
id: 'call_B',
name: 'read_file',
args: { path: 'B.txt' },
},
},
],
},
},
// Note: GeminiChat records these as separate sequential user turns initially
{
id: 'tool_resp_B_id',
content: {
role: 'user',
parts: [
{
functionResponse: {
id: 'call_B',
name: 'read_file',
response: { content: 'File B' },
},
},
],
},
},
{
id: 'tool_resp_A_id',
content: {
role: 'user',
parts: [
{
functionResponse: {
id: 'call_A',
name: 'read_file',
response: { content: 'File A' },
},
},
],
},
},
];
// 2. We harden the original history first. The core agent loop feeds the hardener the pure history.
// We want our round-tripped history to match what the hardener WOULD have produced natively.
const hardenedOriginal = hardenHistory(originalHistory);
// 3. Translate History -> Graph
const mapper = new ContextGraphMapper();
// Simulate the HistoryObserver capturing the push
const nodes = mapper.applyEvent({
type: 'SYNC_FULL',
payload: originalHistory,
});
// 4. Translate Graph -> History
const reconstructedHistory = mapper.fromGraph(nodes);
// 5. Harden the reconstructed history (as the ContextManager does before sending to API)
const hardenedReconstructed = hardenHistory(reconstructedHistory);
// 6. Assert Absolute Equality
// The round-trip through the Context Graph and Hardener must exactly equal
// the original history put through the Hardener.
expect(hardenedReconstructed).toEqual(hardenedOriginal);
});
});
@@ -42,7 +42,9 @@ export class AdaptiveTokenCalculator implements AdvancedTokenCalculator {
private handleGroundTruth(actualTokens: number, promptBaseUnits: number) {
if (promptBaseUnits <= 0) return;
const overheadTokens = this.getOverheadTokens ? this.getOverheadTokens() : 0;
const overheadTokens = this.getOverheadTokens
? this.getOverheadTokens()
: 0;
// The Gemini API token count includes the static overhead (system instruction + tools)
// and the dynamic chat history (which we measure as promptBaseUnits).
@@ -134,6 +134,84 @@ describe('hardenHistory', () => {
expect(hardened[2].id).toBe(deriveStableId(['2', 'sentinel_resp']));
});
it('should successfully match parallel tool calls and responses even if responses are originally split across separate user turns', () => {
const history: HistoryTurn[] = [
{ id: '1', content: { role: 'user', parts: [{ text: 'do it' }] } },
{
id: '2',
content: {
role: 'model',
parts: [
{
functionCall: { id: 'call_1', name: 'toolA', args: {} },
thoughtSignature: 'sig',
},
{ functionCall: { id: 'call_2', name: 'toolB', args: {} } },
],
},
},
// Responses arrive as separate user turns
{
id: '3',
content: {
role: 'user',
parts: [
{
functionResponse: {
id: 'call_1',
name: 'toolA',
response: { ok: true },
},
},
],
},
},
{
id: '4',
content: {
role: 'user',
parts: [
{
functionResponse: {
id: 'call_2',
name: 'toolB',
response: { ok: true },
},
},
],
},
},
];
// The hardener should coalesce Turn 3 and Turn 4 *before* it tries to pair them with Turn 2.
// Otherwise, it would look at Turn 3, see 'call_2' is missing, inject a sentinel for 'call_2',
// and then look at Turn 4 and consider 'call_2' to be orphaned.
const hardened = hardenHistory(history);
// Total turns: User(1), Model(2), User(3+4 merged)
expect(hardened.length).toBe(3);
const userResponseTurn = hardened[2];
expect(userResponseTurn.content.role).toBe('user');
expect(userResponseTurn.content.parts).toHaveLength(2);
// Verify no sentinels were injected and original responses were preserved
expect(userResponseTurn.content.parts![0].functionResponse?.id).toBe(
'call_1',
);
expect(userResponseTurn.content.parts![1].functionResponse?.id).toBe(
'call_2',
);
// Ensure no error properties exist
expect(
userResponseTurn.content.parts![0].functionResponse?.response,
).toEqual({ ok: true });
expect(
userResponseTurn.content.parts![1].functionResponse?.response,
).toEqual({ ok: true });
});
it('should drop orphaned functionResponses', () => {
const history: HistoryTurn[] = [
{ id: '1', content: { role: 'user', parts: [{ text: 'hello' }] } },