Use ranged reads and limited searches and fuzzy editing improvements (#19240)

This commit is contained in:
Christian Gunderman
2026-02-17 23:54:08 +00:00
committed by GitHub
parent 55c628e967
commit ce84b3cb5f
9 changed files with 1174 additions and 145 deletions

278
evals/frugalReads.eval.ts Normal file
View File

@@ -0,0 +1,278 @@
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, expect } from 'vitest';
import { evalTest } from './test-helper.js';
import { READ_FILE_TOOL_NAME, EDIT_TOOL_NAME } from '@google/gemini-cli-core';
describe('Frugal reads eval', () => {
/**
* Ensures that the agent is frugal in its use of context by relying
* primarily on ranged reads when the line number is known, and combining
* nearby ranges into a single contiguous read to save tool calls.
*/
evalTest('USUALLY_PASSES', {
name: 'should use ranged read when nearby lines are targeted',
files: {
'package.json': JSON.stringify({
name: 'test-project',
version: '1.0.0',
type: 'module',
}),
'eslint.config.mjs': `export default [
{
files: ["**/*.ts"],
rules: {
"no-var": "error"
}
}
];`,
'linter_mess.ts': (() => {
const lines = [];
for (let i = 0; i < 1000; i++) {
if (i === 500 || i === 510 || i === 520) {
lines.push(`var oldVar${i} = "needs fix";`);
} else {
lines.push(`const goodVar${i} = "clean";`);
}
}
return lines.join('\n');
})(),
},
prompt:
'Fix all linter errors in linter_mess.ts manually by editing the file. Run eslint directly (using "npx --yes eslint") to find them. Do not run the file.',
assert: async (rig) => {
const logs = rig.readToolLogs();
// Check if the agent read the whole file
const readCalls = logs.filter(
(log) => log.toolRequest?.name === READ_FILE_TOOL_NAME,
);
const targetFileReads = readCalls.filter((call) => {
const args = JSON.parse(call.toolRequest.args);
return args.file_path.includes('linter_mess.ts');
});
expect(
targetFileReads.length,
'Agent should have used read_file to check context',
).toBeGreaterThan(0);
// We expect 1-3 ranges in a single turn.
expect(
targetFileReads.length,
'Agent should have used 1-3 ranged reads for near errors',
).toBeLessThanOrEqual(3);
const firstPromptId = targetFileReads[0].toolRequest.prompt_id;
expect(firstPromptId, 'Prompt ID should be defined').toBeDefined();
expect(
targetFileReads.every(
(call) => call.toolRequest.prompt_id === firstPromptId,
),
'All reads should have happened in the same turn',
).toBe(true);
let totalLinesRead = 0;
const readRanges: { offset: number; limit: number }[] = [];
for (const call of targetFileReads) {
const args = JSON.parse(call.toolRequest.args);
expect(
args.limit,
'Agent read the entire file (missing limit) instead of using ranged read',
).toBeDefined();
const limit = args.limit;
const offset = args.offset ?? 0;
totalLinesRead += limit;
readRanges.push({ offset, limit });
expect(args.limit, 'Agent read too many lines at once').toBeLessThan(
1001,
);
}
// Ranged read shoud be frugal and just enough to satisfy the task at hand.
expect(
totalLinesRead,
'Agent read more of the file than expected',
).toBeLessThan(1000);
// Check that we read around the error lines
const errorLines = [500, 510, 520];
for (const line of errorLines) {
const covered = readRanges.some(
(range) => line >= range.offset && line < range.offset + range.limit,
);
expect(covered, `Agent should have read around line ${line}`).toBe(
true,
);
}
const editCalls = logs.filter(
(log) => log.toolRequest?.name === EDIT_TOOL_NAME,
);
const targetEditCalls = editCalls.filter((call) => {
const args = JSON.parse(call.toolRequest.args);
return args.file_path.includes('linter_mess.ts');
});
expect(
targetEditCalls.length,
'Agent should have made replacement calls on the target file',
).toBeGreaterThanOrEqual(3);
},
});
/**
* Ensures the agent uses multiple ranged reads when the targets are far
* apart to avoid the need to read the whole file.
*/
evalTest('USUALLY_PASSES', {
name: 'should use ranged read when targets are far apart',
files: {
'package.json': JSON.stringify({
name: 'test-project',
version: '1.0.0',
type: 'module',
}),
'eslint.config.mjs': `export default [
{
files: ["**/*.ts"],
rules: {
"no-var": "error"
}
}
];`,
'far_mess.ts': (() => {
const lines = [];
for (let i = 0; i < 1000; i++) {
if (i === 100 || i === 900) {
lines.push(`var oldVar${i} = "needs fix";`);
} else {
lines.push(`const goodVar${i} = "clean";`);
}
}
return lines.join('\n');
})(),
},
prompt:
'Fix all linter errors in far_mess.ts manually by editing the file. Run eslint directly (using "npx --yes eslint") to find them. Do not run the file.',
assert: async (rig) => {
const logs = rig.readToolLogs();
const readCalls = logs.filter(
(log) => log.toolRequest?.name === READ_FILE_TOOL_NAME,
);
const targetFileReads = readCalls.filter((call) => {
const args = JSON.parse(call.toolRequest.args);
return args.file_path.includes('far_mess.ts');
});
// The agent should use ranged reads to be frugal with context tokens,
// even if it requires multiple calls for far-apart errors.
expect(
targetFileReads.length,
'Agent should have used read_file to check context',
).toBeGreaterThan(0);
// We allow multiple calls since the errors are far apart.
expect(
targetFileReads.length,
'Agent should have used separate reads for far apart errors',
).toBeLessThanOrEqual(4);
for (const call of targetFileReads) {
const args = JSON.parse(call.toolRequest.args);
expect(
args.limit,
'Agent should have used ranged read (limit) to save tokens',
).toBeDefined();
}
},
});
/**
* Validates that the agent reads the entire file if there are lots of matches
* (e.g.: 10), as it's more efficient than many small ranged reads.
*/
evalTest('USUALLY_PASSES', {
name: 'should read the entire file when there are many matches',
files: {
'package.json': JSON.stringify({
name: 'test-project',
version: '1.0.0',
type: 'module',
}),
'eslint.config.mjs': `export default [
{
files: ["**/*.ts"],
rules: {
"no-var": "error"
}
}
];`,
'many_mess.ts': (() => {
const lines = [];
for (let i = 0; i < 1000; i++) {
if (i % 100 === 0) {
lines.push(`var oldVar${i} = "needs fix";`);
} else {
lines.push(`const goodVar${i} = "clean";`);
}
}
return lines.join('\n');
})(),
},
prompt:
'Fix all linter errors in many_mess.ts manually by editing the file. Run eslint directly (using "npx --yes eslint") to find them. Do not run the file.',
assert: async (rig) => {
const logs = rig.readToolLogs();
const readCalls = logs.filter(
(log) => log.toolRequest?.name === READ_FILE_TOOL_NAME,
);
const targetFileReads = readCalls.filter((call) => {
const args = JSON.parse(call.toolRequest.args);
return args.file_path.includes('many_mess.ts');
});
expect(
targetFileReads.length,
'Agent should have used read_file to check context',
).toBeGreaterThan(0);
// In this case, we expect the agent to realize there are many scattered errors
// and just read the whole file to be efficient with tool calls.
const readEntireFile = targetFileReads.some((call) => {
const args = JSON.parse(call.toolRequest.args);
return args.limit === undefined;
});
expect(
readEntireFile,
'Agent should have read the entire file because of the high number of scattered matches',
).toBe(true);
// Check that the agent actually fixed the errors
const editCalls = logs.filter(
(log) => log.toolRequest?.name === EDIT_TOOL_NAME,
);
const targetEditCalls = editCalls.filter((call) => {
const args = JSON.parse(call.toolRequest.args);
return args.file_path.includes('many_mess.ts');
});
expect(
targetEditCalls.length,
'Agent should have made replacement calls on the target file',
).toBeGreaterThanOrEqual(1);
},
});
});

View File

@@ -9,7 +9,7 @@ import { evalTest } from './test-helper.js';
/**
* Evals to verify that the agent uses search tools efficiently (frugally)
* by utilizing limiting parameters like `total_max_matches` and `max_matches_per_file`.
* by utilizing limiting parameters like `limit` and `max_matches_per_file`.
* This ensures the agent doesn't flood the context window with unnecessary search results.
*/
describe('Frugal Search', () => {
@@ -25,120 +25,76 @@ describe('Frugal Search', () => {
return args;
};
/**
* Ensure that the agent makes use of either grep or ranged reads in fulfilling this task.
* The task is specifically phrased to not evoke "view" or "search" specifically because
* the model implicitly understands that such tasks are searches. This covers the case of
* an unexpectedly large file benefitting from frugal approaches to viewing, like grep, or
* ranged reads.
*/
evalTest('USUALLY_PASSES', {
name: 'should use targeted search with limit',
prompt: 'find me a sample usage of path.resolve() in the codebase',
name: 'should use grep or ranged read for large files',
prompt: 'What year was legacy_processor.ts written?',
files: {
'package.json': JSON.stringify({
name: 'test-project',
version: '1.0.0',
main: 'dist/index.js',
scripts: {
build: 'tsc',
test: 'vitest',
},
dependencies: {
typescript: '^5.0.0',
'@types/node': '^20.0.0',
vitest: '^1.0.0',
},
}),
'src/index.ts': `
import { App } from './app.ts';
const app = new App();
app.start();
`,
'src/app.ts': `
import * as path from 'path';
import { UserController } from './controllers/user.ts';
export class App {
constructor() {
console.log('App initialized');
}
public start(): void {
const userController = new UserController();
console.log('Static path:', path.resolve(__dirname, '../public'));
}
}
`,
'src/utils.ts': `
import * as path from 'path';
import * as fs from 'fs';
export function resolvePath(p: string): string {
return path.resolve(process.cwd(), p);
}
export function ensureDir(dirPath: string): void {
const absolutePath = path.resolve(dirPath);
if (!fs.existsSync(absolutePath)) {
fs.mkdirSync(absolutePath, { recursive: true });
}
}
`,
'src/config.ts': `
import * as path from 'path';
export const config = {
dbPath: path.resolve(process.cwd(), 'data/db.sqlite'),
logLevel: 'info',
};
`,
'src/controllers/user.ts': `
import * as path from 'path';
export class UserController {
public getUsers(): any[] {
console.log('Loading users from:', path.resolve('data/users.json'));
return [{ id: 1, name: 'Alice' }];
}
}
`,
'tests/app.test.ts': `
import { describe, it, expect } from 'vitest';
import * as path from 'path';
describe('App', () => {
it('should resolve paths', () => {
const p = path.resolve('test');
expect(p).toBeDefined();
});
});
`,
'src/utils.ts': 'export const add = (a, b) => a + b;',
'src/types.ts': 'export type ID = string;',
'src/legacy_processor.ts': [
'// Copyright 2005 Legacy Systems Inc.',
...Array.from(
{ length: 5000 },
(_, i) =>
`// Legacy code block ${i} - strictly preserved for backward compatibility`,
),
].join('\n'),
'README.md': '# Project documentation',
},
assert: async (rig) => {
const toolCalls = rig.readToolLogs();
const grepCalls = toolCalls.filter(
(call) => call.toolRequest.name === 'grep_search',
);
const getParams = (call: any) => {
let args = call.toolRequest.args;
if (typeof args === 'string') {
try {
args = JSON.parse(args);
} catch (e) {
// Ignore parse errors
}
}
return args;
};
expect(grepCalls.length).toBeGreaterThan(0);
// Check for wasteful full file reads
const fullReads = toolCalls.filter((call) => {
if (call.toolRequest.name !== 'read_file') return false;
const args = getParams(call);
return (
args.file_path === 'src/legacy_processor.ts' &&
(args.limit === undefined || args.limit === null)
);
});
const grepParams = grepCalls.map(getGrepParams);
const hasTotalMaxLimit = grepParams.some(
(p) => p.total_max_matches !== undefined && p.total_max_matches <= 100,
);
expect(
hasTotalMaxLimit,
`Expected agent to use a small total_max_matches (<= 100) for a sample usage request. Actual values: ${JSON.stringify(
grepParams.map((p) => p.total_max_matches),
)}`,
).toBe(true);
fullReads.length,
'Agent should not attempt to read the entire large file at once',
).toBe(0);
const hasMaxMatchesPerFileLimit = grepParams.some(
(p) =>
p.max_matches_per_file !== undefined && p.max_matches_per_file <= 5,
);
expect(
hasMaxMatchesPerFileLimit,
`Expected agent to use a small max_matches_per_file (<= 5) for a sample usage request. Actual values: ${JSON.stringify(
grepParams.map((p) => p.max_matches_per_file),
)}`,
).toBe(true);
// Check that it actually tried to find it using appropriate tools
const validAttempts = toolCalls.filter((call) => {
const args = getParams(call);
if (call.toolRequest.name === 'grep_search') {
return true;
}
if (
call.toolRequest.name === 'read_file' &&
args.file_path === 'src/legacy_processor.ts' &&
args.limit !== undefined
) {
return true;
}
return false;
});
expect(validAttempts.length).toBeGreaterThan(0);
},
});
});