mirror of
https://github.com/google-gemini/gemini-cli.git
synced 2026-04-20 18:14:29 -07:00
fix(core): attempt compression before context overflow check (#16914)
This commit is contained in:
@@ -13,8 +13,16 @@ import { getCompressionPrompt } from '../core/prompts.js';
|
||||
import { getResponseText } from '../utils/partUtils.js';
|
||||
import { logChatCompression } from '../telemetry/loggers.js';
|
||||
import { makeChatCompressionEvent } from '../telemetry/types.js';
|
||||
import {
|
||||
saveTruncatedToolOutput,
|
||||
formatTruncatedToolOutput,
|
||||
} from '../utils/fileUtils.js';
|
||||
import { debugLogger } from '../utils/debugLogger.js';
|
||||
import { getInitialChatHistory } from '../utils/environmentContext.js';
|
||||
import { calculateRequestTokenCount } from '../utils/tokenCalculation.js';
|
||||
import {
|
||||
calculateRequestTokenCount,
|
||||
estimateTokenCountSync,
|
||||
} from '../utils/tokenCalculation.js';
|
||||
import {
|
||||
DEFAULT_GEMINI_FLASH_LITE_MODEL,
|
||||
DEFAULT_GEMINI_FLASH_MODEL,
|
||||
@@ -36,6 +44,16 @@ export const DEFAULT_COMPRESSION_TOKEN_THRESHOLD = 0.5;
|
||||
*/
|
||||
export const COMPRESSION_PRESERVE_THRESHOLD = 0.3;
|
||||
|
||||
/**
|
||||
* The budget for function response tokens in the preserved history.
|
||||
*/
|
||||
export const COMPRESSION_FUNCTION_RESPONSE_TOKEN_BUDGET = 50_000;
|
||||
|
||||
/**
|
||||
* The number of lines to keep when truncating a function response during compression.
|
||||
*/
|
||||
export const COMPRESSION_TRUNCATE_LINES = 30;
|
||||
|
||||
/**
|
||||
* Returns the index of the oldest item to keep when compressing. May return
|
||||
* contents.length which indicates that everything should be compressed.
|
||||
@@ -101,6 +119,119 @@ export function modelStringToModelConfigAlias(model: string): string {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Processes the chat history to ensure function responses don't exceed a specific token budget.
|
||||
*
|
||||
* This function implements a "Reverse Token Budget" strategy:
|
||||
* 1. It iterates through the history from the most recent turn to the oldest.
|
||||
* 2. It keeps a running tally of tokens used by function responses.
|
||||
* 3. Recent tool outputs are preserved in full to maintain high-fidelity context for the current turn.
|
||||
* 4. Once the budget (COMPRESSION_FUNCTION_RESPONSE_TOKEN_BUDGET) is exceeded, any older large
|
||||
* tool responses are truncated to their last 30 lines and saved to a temporary file.
|
||||
*
|
||||
* This ensures that compression effectively reduces context size even when recent turns
|
||||
* contain massive tool outputs (like large grep results or logs).
|
||||
*/
|
||||
async function truncateHistoryToBudget(
|
||||
history: Content[],
|
||||
config: Config,
|
||||
): Promise<Content[]> {
|
||||
let functionResponseTokenCounter = 0;
|
||||
const truncatedHistory: Content[] = [];
|
||||
|
||||
// Iterate backwards: newest messages first to prioritize their context.
|
||||
for (let i = history.length - 1; i >= 0; i--) {
|
||||
const content = history[i];
|
||||
const newParts = [];
|
||||
|
||||
if (content.parts) {
|
||||
// Process parts of the message backwards as well.
|
||||
for (let j = content.parts.length - 1; j >= 0; j--) {
|
||||
const part = content.parts[j];
|
||||
|
||||
if (part.functionResponse) {
|
||||
const responseObj = part.functionResponse.response;
|
||||
// Ensure we have a string representation to truncate.
|
||||
// If the response is an object, we try to extract a primary string field (output or content).
|
||||
let contentStr: string;
|
||||
if (typeof responseObj === 'string') {
|
||||
contentStr = responseObj;
|
||||
} else if (responseObj && typeof responseObj === 'object') {
|
||||
if (
|
||||
'output' in responseObj &&
|
||||
typeof responseObj['output'] === 'string'
|
||||
) {
|
||||
contentStr = responseObj['output'];
|
||||
} else if (
|
||||
'content' in responseObj &&
|
||||
typeof responseObj['content'] === 'string'
|
||||
) {
|
||||
contentStr = responseObj['content'];
|
||||
} else {
|
||||
contentStr = JSON.stringify(responseObj, null, 2);
|
||||
}
|
||||
} else {
|
||||
contentStr = JSON.stringify(responseObj, null, 2);
|
||||
}
|
||||
|
||||
const tokens = estimateTokenCountSync([{ text: contentStr }]);
|
||||
|
||||
if (
|
||||
functionResponseTokenCounter + tokens >
|
||||
COMPRESSION_FUNCTION_RESPONSE_TOKEN_BUDGET
|
||||
) {
|
||||
try {
|
||||
// Budget exceeded: Truncate this response.
|
||||
const { outputFile } = await saveTruncatedToolOutput(
|
||||
contentStr,
|
||||
part.functionResponse.name ?? 'unknown_tool',
|
||||
config.getNextCompressionTruncationId(),
|
||||
config.storage.getProjectTempDir(),
|
||||
);
|
||||
|
||||
// Prepare a honest, readable snippet of the tail.
|
||||
const truncatedMessage = formatTruncatedToolOutput(
|
||||
contentStr,
|
||||
outputFile,
|
||||
COMPRESSION_TRUNCATE_LINES,
|
||||
);
|
||||
|
||||
newParts.unshift({
|
||||
functionResponse: {
|
||||
...part.functionResponse,
|
||||
response: { output: truncatedMessage },
|
||||
},
|
||||
});
|
||||
|
||||
// Count the small truncated placeholder towards the budget.
|
||||
functionResponseTokenCounter += estimateTokenCountSync([
|
||||
{ text: truncatedMessage },
|
||||
]);
|
||||
} catch (error) {
|
||||
// Fallback: if truncation fails, keep the original part to avoid data loss in the chat.
|
||||
debugLogger.debug('Failed to truncate history to budget:', error);
|
||||
newParts.unshift(part);
|
||||
functionResponseTokenCounter += tokens;
|
||||
}
|
||||
} else {
|
||||
// Within budget: keep the full response.
|
||||
functionResponseTokenCounter += tokens;
|
||||
newParts.unshift(part);
|
||||
}
|
||||
} else {
|
||||
// Non-tool response part: always keep.
|
||||
newParts.unshift(part);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Reconstruct the message with processed (potentially truncated) parts.
|
||||
truncatedHistory.unshift({ ...content, parts: newParts });
|
||||
}
|
||||
|
||||
return truncatedHistory;
|
||||
}
|
||||
|
||||
export class ChatCompressionService {
|
||||
async compress(
|
||||
chat: GeminiChat,
|
||||
@@ -151,15 +282,22 @@ export class ChatCompressionService {
|
||||
}
|
||||
}
|
||||
|
||||
const splitPoint = findCompressSplitPoint(
|
||||
// Apply token-based truncation to the entire history before splitting.
|
||||
// This ensures that even the "to compress" portion is within safe limits for the summarization model.
|
||||
const truncatedHistory = await truncateHistoryToBudget(
|
||||
curatedHistory,
|
||||
config,
|
||||
);
|
||||
|
||||
const splitPoint = findCompressSplitPoint(
|
||||
truncatedHistory,
|
||||
1 - COMPRESSION_PRESERVE_THRESHOLD,
|
||||
);
|
||||
|
||||
const historyToCompress = curatedHistory.slice(0, splitPoint);
|
||||
const historyToKeep = curatedHistory.slice(splitPoint);
|
||||
const historyToCompressTruncated = truncatedHistory.slice(0, splitPoint);
|
||||
const historyToKeepTruncated = truncatedHistory.slice(splitPoint);
|
||||
|
||||
if (historyToCompress.length === 0) {
|
||||
if (historyToCompressTruncated.length === 0) {
|
||||
return {
|
||||
newHistory: null,
|
||||
info: {
|
||||
@@ -170,10 +308,21 @@ export class ChatCompressionService {
|
||||
};
|
||||
}
|
||||
|
||||
// High Fidelity Decision: Should we send the original or truncated history to the summarizer?
|
||||
const originalHistoryToCompress = curatedHistory.slice(0, splitPoint);
|
||||
const originalToCompressTokenCount = estimateTokenCountSync(
|
||||
originalHistoryToCompress.flatMap((c) => c.parts || []),
|
||||
);
|
||||
|
||||
const historyForSummarizer =
|
||||
originalToCompressTokenCount < tokenLimit(model)
|
||||
? originalHistoryToCompress
|
||||
: historyToCompressTruncated;
|
||||
|
||||
const summaryResponse = await config.getBaseLlmClient().generateContent({
|
||||
modelConfigKey: { model: modelStringToModelConfigAlias(model) },
|
||||
contents: [
|
||||
...historyToCompress,
|
||||
...historyForSummarizer,
|
||||
{
|
||||
role: 'user',
|
||||
parts: [
|
||||
@@ -199,7 +348,7 @@ export class ChatCompressionService {
|
||||
role: 'model',
|
||||
parts: [{ text: 'Got it. Thanks for the additional context!' }],
|
||||
},
|
||||
...historyToKeep,
|
||||
...historyToKeepTruncated,
|
||||
];
|
||||
|
||||
// Use a shared utility to construct the initial history for an accurate token count.
|
||||
|
||||
Reference in New Issue
Block a user