Ranged file reads.

This commit is contained in:
Christian Gunderman
2026-02-11 23:14:59 -08:00
parent b005b33ca7
commit 9d1220bbb0
2 changed files with 274 additions and 0 deletions

270
evals/frugalReads.eval.ts Normal file
View File

@@ -0,0 +1,270 @@
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, expect } from 'vitest';
import { evalTest } from './test-helper.js';
import { READ_FILE_TOOL_NAME, EDIT_TOOL_NAME } from '@google/gemini-cli-core';
describe('Frugal reads eval', () => {
/**
* Ensures that the agent is frugal in its use of context by relying
* primarily on ranged reads when the line number is known, and combining
* nearby ranges into a single contiguous read to save tool calls.
*/
evalTest('ALWAYS_PASSES', {
name: 'should use ranged read when nearby lines are targeted',
files: {
'package.json': JSON.stringify({
name: 'test-project',
version: '1.0.0',
type: 'module',
}),
'eslint.config.mjs': `export default [
{
files: ["**/*.ts"],
rules: {
"no-var": "error"
}
}
];`,
'linter_mess.ts': (() => {
const lines = [];
for (let i = 0; i < 1000; i++) {
if (i === 500 || i === 510 || i === 520) {
lines.push(`var oldVar${i} = "needs fix";`);
} else {
lines.push(`const goodVar${i} = "clean";`);
}
}
return lines.join('\n');
})(),
},
prompt:
'Fix all linter errors in linter_mess.ts manually by editing the file. Run eslint directly (using "npx --yes eslint") to find them. Do not run the file.',
assert: async (rig) => {
const logs = rig.readToolLogs();
// Check if the agent read the whole file
const readCalls = logs.filter(
(log) => log.toolRequest?.name === READ_FILE_TOOL_NAME,
);
const targetFileReads = readCalls.filter((call) => {
const args = JSON.parse(call.toolRequest.args);
return args.file_path.includes('linter_mess.ts');
});
expect(
targetFileReads.length,
'Agent should have used read_file to check context',
).toBeGreaterThan(0);
// We expect a single contiguous range covering all errors since they are near each other.
// Some models re-verify or read more than once, so we allow up to 4.
expect(
targetFileReads.length,
'Agent should have been efficient with ranged reads for near errors',
).toEqual(1);
let totalLinesRead = 0;
const readRanges: { offset: number; limit: number }[] = [];
for (const call of targetFileReads) {
const args = JSON.parse(call.toolRequest.args);
expect(
args.limit,
'Agent read the entire file (missing limit) instead of using ranged read',
).toBeDefined();
const limit = args.limit;
const offset = args.offset ?? 0;
totalLinesRead += limit;
readRanges.push({ offset, limit });
expect(args.limit, 'Agent read too many lines at once').toBeLessThan(
1001,
);
}
// Ranged read shoud be frugal and just enough to satisfy the task at hand.
expect(
totalLinesRead,
'Agent read more of the file than expected',
).toBeLessThan(1000);
// Check that we read around the error lines
const errorLines = [500, 510, 520];
for (const line of errorLines) {
const covered = readRanges.some(
(range) => line >= range.offset && line < range.offset + range.limit,
);
expect(covered, `Agent should have read around line ${line}`).toBe(
true,
);
}
const editCalls = logs.filter(
(log) => log.toolRequest?.name === EDIT_TOOL_NAME,
);
const targetEditCalls = editCalls.filter((call) => {
const args = JSON.parse(call.toolRequest.args);
return args.file_path.includes('linter_mess.ts');
});
expect(
targetEditCalls.length,
'Agent should have made replacement calls on the target file',
).toBeGreaterThanOrEqual(3);
},
});
/**
* Ensures the agent uses multiple ranged reads when the targets are far
* apart to avoid the need to read the whole file.
*/
evalTest('ALWAYS_PASSES', {
name: 'should use ranged read when targets are far apart',
files: {
'package.json': JSON.stringify({
name: 'test-project',
version: '1.0.0',
type: 'module',
}),
'eslint.config.mjs': `export default [
{
files: ["**/*.ts"],
rules: {
"no-var": "error"
}
}
];`,
'far_mess.ts': (() => {
const lines = [];
for (let i = 0; i < 1000; i++) {
if (i === 100 || i === 900) {
lines.push(`var oldVar${i} = "needs fix";`);
} else {
lines.push(`const goodVar${i} = "clean";`);
}
}
return lines.join('\n');
})(),
},
prompt:
'Fix all linter errors in far_mess.ts manually by editing the file. Run eslint directly (using "npx --yes eslint") to find them. Do not run the file.',
assert: async (rig) => {
const logs = rig.readToolLogs();
const readCalls = logs.filter(
(log) => log.toolRequest?.name === READ_FILE_TOOL_NAME,
);
const targetFileReads = readCalls.filter((call) => {
const args = JSON.parse(call.toolRequest.args);
return args.file_path.includes('far_mess.ts');
});
// The agent should use ranged reads to be frugal with context tokens,
// even if it requires multiple calls for far-apart errors.
expect(
targetFileReads.length,
'Agent should have used read_file to check context',
).toBeGreaterThan(0);
// We allow multiple calls since the errors are far apart.
expect(
targetFileReads.length,
'Agent should have used separate reads for far apart errors',
).toBeLessThanOrEqual(4);
for (const call of targetFileReads) {
const args = JSON.parse(call.toolRequest.args);
expect(
args.limit,
'Agent should have used ranged read (limit) to save tokens',
).toBeDefined();
}
},
});
/**
* Validates that the agent reads the entire file if there are lots of matches
* (e.g.: 10), as it's more efficient than many small ranged reads.
*/
evalTest('ALWAYS_PASSES', {
name: 'should read the entire file when there are many matches',
files: {
'package.json': JSON.stringify({
name: 'test-project',
version: '1.0.0',
type: 'module',
}),
'eslint.config.mjs': `export default [
{
files: ["**/*.ts"],
rules: {
"no-var": "error"
}
}
];`,
'many_mess.ts': (() => {
const lines = [];
for (let i = 0; i < 1000; i++) {
if (i % 100 === 0) {
lines.push(`var oldVar${i} = "needs fix";`);
} else {
lines.push(`const goodVar${i} = "clean";`);
}
}
return lines.join('\n');
})(),
},
prompt:
'Fix all linter errors in many_mess.ts manually by editing the file. Run eslint directly (using "npx --yes eslint") to find them. Do not run the file.',
assert: async (rig) => {
const logs = rig.readToolLogs();
const readCalls = logs.filter(
(log) => log.toolRequest?.name === READ_FILE_TOOL_NAME,
);
const targetFileReads = readCalls.filter((call) => {
const args = JSON.parse(call.toolRequest.args);
return args.file_path.includes('many_mess.ts');
});
expect(
targetFileReads.length,
'Agent should have used read_file to check context',
).toBeGreaterThan(0);
// In this case, we expect the agent to realize there are many scattered errors
// and just read the whole file to be efficient with tool calls.
const readEntireFile = targetFileReads.some((call) => {
const args = JSON.parse(call.toolRequest.args);
return args.limit === undefined;
});
expect(
readEntireFile,
'Agent should have read the entire file because of the high number of scattered matches',
).toBe(true);
// Check that the agent actually fixed the errors
const editCalls = logs.filter(
(log) => log.toolRequest?.name === EDIT_TOOL_NAME,
);
const targetEditCalls = editCalls.filter((call) => {
const args = JSON.parse(call.toolRequest.args);
return args.file_path.includes('many_mess.ts');
});
expect(
targetEditCalls.length,
'Agent should have made replacement calls on the target file',
).toBeGreaterThanOrEqual(1);
},
});
});

View File

@@ -168,6 +168,10 @@ export function renderCoreMandates(options?: CoreMandatesOptions): string {
- Always minimize wasted context window by aggressively scoping and limiting all of your ${GREP_TOOL_NAME} searches. e.g.: always pass total_max_matches, include, and max_matches_per_file.
- Use names_only=true or max_matches_per_file=1 to find a list of files that contain a pattern.
- Limit unnecessary context consumption from file reads by always using ${GREP_TOOL_NAME} (configured with \`max_matches_per_file\`) to search large files (> 1kb).
- Conserve context when reading files by reading just enough context to definitively answer the question by passing offset and limit to ${READ_FILE_TOOL_NAME} or by searching with ${GREP_TOOL_NAME} and before=50 and after=50 and total_max_matches
- Always read at most one range from the file to avoid chatty "scrolling" or "pagination" style reads which waste tokens by adding extra turns.
- Always read at least 100 lines to avoid degrading the ${WRITE_FILE_TOOL_NAME} reliability.
- If you have multiple ranges that you want to read, always combine them into a single range to avoid wasting tokens on another tool call.
## Engineering Standards
- **Contextual Precedence:** Instructions found in ${formattedFilenames} files are foundational mandates. They take absolute precedence over the general workflows and tool defaults described in this system prompt.