From b416508ef1c64db6cdac2f185afa579368c353cf Mon Sep 17 00:00:00 2001 From: Sandy Tao Date: Fri, 12 Sep 2025 15:11:41 -0700 Subject: [PATCH] feat(core): Cap shell output truncation threshold to the remaining context window size (#8379) --- packages/core/src/config/config.test.ts | 59 +++++++++++++++++++++++++ packages/core/src/config/config.ts | 9 +++- 2 files changed, 67 insertions(+), 1 deletion(-) diff --git a/packages/core/src/config/config.test.ts b/packages/core/src/config/config.test.ts index 3a0762094d..e63853286f 100644 --- a/packages/core/src/config/config.test.ts +++ b/packages/core/src/config/config.test.ts @@ -94,6 +94,9 @@ vi.mock('../telemetry/index.js', async (importOriginal) => { return { ...actual, initializeTelemetry: vi.fn(), + uiTelemetryService: { + getLastPromptTokenCount: vi.fn(), + }, }; }); @@ -123,8 +126,13 @@ vi.mock('../ide/ide-client.js', () => ({ })); import { BaseLlmClient } from '../core/baseLlmClient.js'; +import { tokenLimit } from '../core/tokenLimits.js'; +import { uiTelemetryService } from '../telemetry/index.js'; vi.mock('../core/baseLlmClient.js'); +vi.mock('../core/tokenLimits.js', () => ({ + tokenLimit: vi.fn(), +})); describe('Server Config (config.ts)', () => { const MODEL = 'gemini-pro'; @@ -661,6 +669,57 @@ describe('Server Config (config.ts)', () => { }); }); }); + + describe('getTruncateToolOutputThreshold', () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + it('should return the calculated threshold when it is smaller than the default', () => { + const config = new Config(baseParams); + vi.mocked(tokenLimit).mockReturnValue(32000); + vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue( + 1000, + ); + // 4 * (32000 - 1000) = 4 * 31000 = 124000 + // default is 4_000_000 + expect(config.getTruncateToolOutputThreshold()).toBe(124000); + }); + + it('should return the default threshold when the calculated value is larger', () => { + const config = new Config(baseParams); + vi.mocked(tokenLimit).mockReturnValue(2_000_000); + vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue( + 500_000, + ); + // 4 * (2_000_000 - 500_000) = 4 * 1_500_000 = 6_000_000 + // default is 4_000_000 + expect(config.getTruncateToolOutputThreshold()).toBe(4_000_000); + }); + + it('should use a custom truncateToolOutputThreshold if provided', () => { + const customParams = { + ...baseParams, + truncateToolOutputThreshold: 50000, + }; + const config = new Config(customParams); + vi.mocked(tokenLimit).mockReturnValue(8000); + vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue( + 2000, + ); + // 4 * (8000 - 2000) = 4 * 6000 = 24000 + // custom threshold is 50000 + expect(config.getTruncateToolOutputThreshold()).toBe(24000); + + vi.mocked(tokenLimit).mockReturnValue(32000); + vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue( + 1000, + ); + // 4 * (32000 - 1000) = 124000 + // custom threshold is 50000 + expect(config.getTruncateToolOutputThreshold()).toBe(50000); + }); + }); }); describe('setApprovalMode with folder trust', () => { diff --git a/packages/core/src/config/config.ts b/packages/core/src/config/config.ts index 19308ebfa2..1d88f51baf 100644 --- a/packages/core/src/config/config.ts +++ b/packages/core/src/config/config.ts @@ -39,7 +39,9 @@ import { initializeTelemetry, DEFAULT_TELEMETRY_TARGET, DEFAULT_OTLP_ENDPOINT, + uiTelemetryService, } from '../telemetry/index.js'; +import { tokenLimit } from '../core/tokenLimits.js'; import { StartSessionEvent } from '../telemetry/index.js'; import { DEFAULT_GEMINI_EMBEDDING_MODEL, @@ -916,7 +918,12 @@ export class Config { } getTruncateToolOutputThreshold(): number { - return this.truncateToolOutputThreshold; + return Math.min( + // Estimate remaining context window in characters (1 token ~= 4 chars). + 4 * + (tokenLimit(this.model) - uiTelemetryService.getLastPromptTokenCount()), + this.truncateToolOutputThreshold, + ); } getTruncateToolOutputLines(): number {