mirror of
https://github.com/google-gemini/gemini-cli.git
synced 2026-04-25 04:24:51 -07:00
Behavioral evals framework. (#16047)
This commit is contained in:
committed by
GitHub
parent
933bc5774f
commit
8030404b08
+102
@@ -0,0 +1,102 @@
|
||||
# Behavioral Evals
|
||||
|
||||
Behavioral evaluations (evals) are tests designed to validate the agent's
|
||||
behavior in response to specific prompts. They serve as a critical feedback loop
|
||||
for changes to system prompts, tool definitions, and other model-steering
|
||||
mechanisms.
|
||||
|
||||
## Why Behavioral Evals?
|
||||
|
||||
Unlike traditional **integration tests** which verify that the system functions
|
||||
correctly (e.g., "does the file writer actually write to disk?"), behavioral
|
||||
evals verify that the model _chooses_ to take the correct action (e.g., "does
|
||||
the model decide to write to disk when asked to save code?").
|
||||
|
||||
They are also distinct from broad **industry benchmarks** (like SWE-bench).
|
||||
While benchmarks measure general capabilities across complex challenges, our
|
||||
behavioral evals focus on specific, granular behaviors relevant to the Gemini
|
||||
CLI's features.
|
||||
|
||||
### Key Characteristics
|
||||
|
||||
- **Feedback Loop**: They help us understand how changes to prompts or tools
|
||||
affect the model's decision-making.
|
||||
- _Did a change to the system prompt make the model less likely to use tool
|
||||
X?_
|
||||
- _Did a new tool definition confuse the model?_
|
||||
- **Regression Testing**: They prevent regressions in model steering.
|
||||
- **Non-Determinism**: Unlike unit tests, LLM behavior can be non-deterministic.
|
||||
We distinguish between behaviors that should be robust (`ALWAYS_PASSES`) and
|
||||
those that are generally reliable but might occasionally vary
|
||||
(`USUALLY_PASSES`).
|
||||
|
||||
## Creating an Evaluation
|
||||
|
||||
Evaluations are located in the `evals` directory. Each evaluation is a Vitest
|
||||
test file that uses the `evalTest` function from `evals/test-helper.ts`.
|
||||
|
||||
### `evalTest`
|
||||
|
||||
The `evalTest` function is a helper that runs a single evaluation case. It takes
|
||||
two arguments:
|
||||
|
||||
1. `policy`: The consistency expectation for this test (`'ALWAYS_PASSES'` or
|
||||
`'USUALLY_PASSES'`).
|
||||
2. `evalCase`: An object defining the test case.
|
||||
|
||||
#### Policies
|
||||
|
||||
- `ALWAYS_PASSES`: Tests expected to pass 100% of the time. These are typically
|
||||
trivial and test basic functionality. These run in every CI.
|
||||
- `USUALLY_PASSES`: Tests expected to pass most of the time but may have some
|
||||
flakiness due to non-deterministic behaviors. These are run nightly and used
|
||||
to track the health of the product from build to build.
|
||||
|
||||
#### `EvalCase` Properties
|
||||
|
||||
- `name`: The name of the evaluation case.
|
||||
- `prompt`: The prompt to send to the model.
|
||||
- `params`: An optional object with parameters to pass to the test rig (e.g.,
|
||||
settings).
|
||||
- `assert`: An async function that takes the test rig and the result of the run
|
||||
and asserts that the result is correct.
|
||||
- `log`: An optional boolean that, if set to `true`, will log the tool calls to
|
||||
a file in the `evals/logs` directory.
|
||||
|
||||
### Example
|
||||
|
||||
```typescript
|
||||
import { describe, expect } from 'vitest';
|
||||
import { evalTest } from './test-helper.js';
|
||||
|
||||
describe('my_feature', () => {
|
||||
evalTest('ALWAYS_PASSES', {
|
||||
name: 'should do something',
|
||||
prompt: 'do it',
|
||||
assert: async (rig, result) => {
|
||||
// assertions
|
||||
},
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Running Evaluations
|
||||
|
||||
### Always Passing Evals
|
||||
|
||||
To run the evaluations that are expected to always pass (CI safe):
|
||||
|
||||
```bash
|
||||
npm run test:always_passing_evals
|
||||
```
|
||||
|
||||
### All Evals
|
||||
|
||||
To run all evaluations, including those that may be flaky ("usually passes"):
|
||||
|
||||
```bash
|
||||
npm run test:all_evals
|
||||
```
|
||||
|
||||
This command sets the `RUN_EVALS` environment variable to `1`, which enables the
|
||||
`USUALLY_PASSES` tests.
|
||||
@@ -0,0 +1,31 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, expect } from 'vitest';
|
||||
import { evalTest } from './test-helper.js';
|
||||
import { validateModelOutput } from '../integration-tests/test-helper.js';
|
||||
|
||||
describe('save_memory', () => {
|
||||
evalTest('ALWAYS_PASSES', {
|
||||
name: 'should be able to save to memory',
|
||||
log: true,
|
||||
params: {
|
||||
settings: { tools: { core: ['save_memory'] } },
|
||||
},
|
||||
prompt: `remember that my favorite color is blue.
|
||||
|
||||
what is my favorite color? tell me that and surround it with $ symbol`,
|
||||
assert: async (rig, result) => {
|
||||
const foundToolCall = await rig.waitForToolCall('save_memory');
|
||||
expect(
|
||||
foundToolCall,
|
||||
'Expected to find a save_memory tool call',
|
||||
).toBeTruthy();
|
||||
|
||||
validateModelOutput(result, 'blue', 'Save memory test');
|
||||
},
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,70 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { it } from 'vitest';
|
||||
import fs from 'node:fs';
|
||||
import { TestRig } from '@google/gemini-cli-test-utils';
|
||||
|
||||
export * from '@google/gemini-cli-test-utils';
|
||||
|
||||
// Indicates the consistency expectation for this test.
|
||||
// - ALWAYS_PASSES - Means that the test is expected to pass 100% of the time. These
|
||||
// These tests are typically trivial and test basic functionality with unambiguous
|
||||
// prompts. For example: "call save_memory to remember foo" should be fairly reliable.
|
||||
// These are the first line of defense against regressions in key behaviors and run in
|
||||
// every CI. You can run these locally with 'npm run test:always_passing_evals'.
|
||||
//
|
||||
// - USUALLY_PASSES - Means that the test is expected to pass most of the time but
|
||||
// may have some flakiness as a result of relying on non-deterministic prompted
|
||||
// behaviors and/or ambiguous prompts or complex tasks.
|
||||
// For example: "Please do build changes until the very end" --> ambiguous whether
|
||||
// the agent should add to memory without more explicit system prompt or user
|
||||
// instructions. There are many more of these tests and they may pass less consistently.
|
||||
// The pass/fail trendline of this set of tests can be used as a general measure
|
||||
// of product quality. You can run these locally with 'npm run test:all_evals'.
|
||||
// This may take a really long time and is not recommended.
|
||||
export type EvalPolicy = 'ALWAYS_PASSES' | 'USUALLY_PASSES';
|
||||
|
||||
export function evalTest(policy: EvalPolicy, evalCase: EvalCase) {
|
||||
const fn = async () => {
|
||||
const rig = new TestRig();
|
||||
try {
|
||||
await rig.setup(evalCase.name, evalCase.params);
|
||||
const result = await rig.run({ args: evalCase.prompt });
|
||||
await evalCase.assert(rig, result);
|
||||
} finally {
|
||||
if (evalCase.log) {
|
||||
await logToFile(
|
||||
evalCase.name,
|
||||
JSON.stringify(rig.readToolLogs(), null, 2),
|
||||
);
|
||||
}
|
||||
await rig.cleanup();
|
||||
}
|
||||
};
|
||||
|
||||
if (policy === 'USUALLY_PASSES' && !process.env.RUN_EVALS) {
|
||||
it.skip(evalCase.name, fn);
|
||||
} else {
|
||||
it(evalCase.name, fn);
|
||||
}
|
||||
}
|
||||
|
||||
export interface EvalCase {
|
||||
name: string;
|
||||
params?: Record<string, any>;
|
||||
prompt: string;
|
||||
assert: (rig: TestRig, result: string) => Promise<void>;
|
||||
log?: boolean;
|
||||
}
|
||||
|
||||
async function logToFile(name: string, content: string) {
|
||||
const logDir = 'evals/logs';
|
||||
await fs.promises.mkdir(logDir, { recursive: true });
|
||||
const sanitizedName = name.replace(/[^a-z0-9]/gi, '_').toLowerCase();
|
||||
const logFile = `${logDir}/${sanitizedName}.log`;
|
||||
await fs.promises.writeFile(logFile, content);
|
||||
}
|
||||
@@ -0,0 +1,15 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { defineConfig } from 'vitest/config';
|
||||
|
||||
export default defineConfig({
|
||||
test: {
|
||||
testTimeout: 300000, // 5 minutes
|
||||
reporters: ['default'],
|
||||
include: ['**/*.eval.ts'],
|
||||
},
|
||||
});
|
||||
Reference in New Issue
Block a user