feat(cli, core): Support hybrid evals.

This commit is contained in:
Your Name
2026-03-19 16:44:32 +00:00
parent 98d1bec99f
commit f178f9c020
20 changed files with 980 additions and 95 deletions

View File

@@ -13,7 +13,12 @@ import {
} from './test-helper.js';
import fs from 'node:fs';
import path from 'node:path';
import { DEFAULT_GEMINI_MODEL } from '@google/gemini-cli-core';
import {
DEFAULT_GEMINI_MODEL,
type ScriptItem,
extractFakeResponses,
extractUserPrompts,
} from '@google/gemini-cli-core';
export interface AppEvalCase {
name: string;
@@ -23,6 +28,12 @@ export interface AppEvalCase {
files?: Record<string, string>;
setup?: (rig: AppRig) => Promise<void>;
assert: (rig: AppRig, output: string) => Promise<void>;
/**
* Optional script to "prime the pump" before the main prompt.
* A sequential array interleaving MockUserTurn (e.g., userText('hello'))
* and FakeResponse (e.g., mockGenerateContentStreamText('hi')).
*/
script?: ScriptItem[];
}
/**
@@ -31,11 +42,25 @@ export interface AppEvalCase {
*/
export function appEvalTest(policy: EvalPolicy, evalCase: AppEvalCase) {
const fn = async () => {
const configOverrides = {
model: DEFAULT_GEMINI_MODEL,
...evalCase.configOverrides,
};
let userPrompts: string[] = [];
if (evalCase.script) {
configOverrides.fakeModelConfig = {
responses: extractFakeResponses(evalCase.script),
hybridHandoff: true,
};
// Extract the sequence of user prompts for the Mock User driver
userPrompts = extractUserPrompts(evalCase.script);
}
const rig = new AppRig({
configOverrides: {
model: DEFAULT_GEMINI_MODEL,
...evalCase.configOverrides,
},
configOverrides,
});
const { logDir, sanitizedName } = await prepareLogDir(evalCase.name);
@@ -62,11 +87,16 @@ export function appEvalTest(policy: EvalPolicy, evalCase: AppEvalCase) {
}
// Render the app!
rig.render();
await rig.render();
// Wait for initial ready state
await rig.waitForIdle();
// Execute priming script if requested
if (userPrompts.length > 0) {
await rig.driveMockUser(userPrompts, evalCase.timeout);
}
// Send the initial prompt
await rig.sendMessage(evalCase.prompt);

View File

@@ -0,0 +1,38 @@
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, expect } from 'vitest';
import { appEvalTest } from './app-test-helper.js';
import {
userText,
mockGenerateContentStreamText,
} from '@google/gemini-cli-core';
describe('Hybrid Handoff (Mock User to Live Model)', () => {
appEvalTest('ALWAYS_PASSES', {
name: 'Mock User successfully primes AppRig using a scripted history and hands off to live model',
timeout: 120000,
script: [
userText('Start priming'),
mockGenerateContentStreamText(
"Hello! I am a fake response. Let's prime the pump.",
),
userText('Continue priming'),
mockGenerateContentStreamText(
'Pump primed successfully. Ready for handoff.',
),
],
prompt: 'What is 5 * 5? Please answer with just the final number.',
assert: async (rig) => {
await rig.drainBreakpointsUntilIdle(undefined, 30000);
const liveOutput = rig.getStaticOutput();
// Ensure the handoff was successful
expect(liveOutput).toContain('25');
},
});
});

View File

@@ -0,0 +1,78 @@
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect } from 'vitest';
import { AppRig } from '../test-utils/AppRig.js';
import {
FakeContentGenerator,
FallbackContentGenerator,
userText,
mockGenerateContentStreamText,
extractUserPrompts,
extractFakeResponses,
type ScriptItem,
} from '@google/gemini-cli-core';
describe('Hybrid Handoff (Mock User to Synthetic Live Model)', () => {
it('successfully transitions from mock responses to live responses', async () => {
// 1. Define the conversational script for the priming phase
const primingScript: ScriptItem[] = [
userText('Start priming'),
mockGenerateContentStreamText('Hello! I am a fake response.'),
userText('Continue priming'),
mockGenerateContentStreamText(
'Pump primed successfully. Ready for handoff.',
),
];
// 2. Setup the primary fake generator that runs through the priming script
const fakeGenerator = new FakeContentGenerator(
extractFakeResponses(primingScript),
);
// 3. Setup a "live" fallback generator (it's synthetic so we don't need API keys)
const mockLiveFallback = new FakeContentGenerator([
mockGenerateContentStreamText('The answer is 4.'),
]);
// We need countTokens so AppRig doesn't hang checking size during truncation
mockLiveFallback.countTokens = async () => ({ totalTokens: 10 });
// 4. Compose them using FallbackContentGenerator
const composedGenerator = new FallbackContentGenerator(
fakeGenerator,
mockLiveFallback,
);
// 5. Mount the AppRig natively supporting custom content generators
const rig = new AppRig({
contentGenerator: composedGenerator,
configOverrides: {
fakeResponses: [], // ensure it avoids disk IO attempts internally
},
});
await rig.initialize();
await rig.render();
await rig.waitForIdle();
// 6. Drive the Mock User sequence using the extracted prompts from the script
await rig.driveMockUser(extractUserPrompts(primingScript), 10000);
// 7. Send the final prompt that should exhaust the primary generator and trigger the fallback
await rig.sendMessage('What is 2 + 2?');
// 8. Wait for the fallback response to render
await rig.waitForOutput('The answer is 4.', 10000);
const output = rig.getStaticOutput();
expect(output).toContain('The answer is 4.');
// Wait for everything to settle so React act() warnings don't fire during unmount
await rig.drainBreakpointsUntilIdle(undefined, 10000);
await rig.unmount();
});
});

View File

@@ -8,7 +8,7 @@ import { describe, it, afterEach } from 'vitest';
import { AppRig } from '../test-utils/AppRig.js';
import path from 'node:path';
import { fileURLToPath } from 'node:url';
import { PolicyDecision } from '@google/gemini-cli-core';
import { FakeContentGenerator, PolicyDecision } from '@google/gemini-cli-core';
const __dirname = path.dirname(fileURLToPath(import.meta.url));
@@ -24,8 +24,10 @@ describe('Model Steering Integration', () => {
__dirname,
'../test-utils/fixtures/steering.responses',
);
const contentGenerator =
await FakeContentGenerator.fromFile(fakeResponsesPath);
rig = new AppRig({
fakeResponsesPath,
contentGenerator,
configOverrides: { modelSteering: true },
});
await rig.initialize();

View File

@@ -8,7 +8,7 @@ import { describe, it, afterEach, expect } from 'vitest';
import { AppRig } from './AppRig.js';
import path from 'node:path';
import { fileURLToPath } from 'node:url';
import { debugLogger } from '@google/gemini-cli-core';
import { FakeContentGenerator, debugLogger } from '@google/gemini-cli-core';
const __dirname = path.dirname(fileURLToPath(import.meta.url));
@@ -25,8 +25,10 @@ describe('AppRig', () => {
'fixtures',
'steering.responses',
);
const contentGenerator =
await FakeContentGenerator.fromFile(fakeResponsesPath);
rig = new AppRig({
fakeResponsesPath,
contentGenerator,
configOverrides: { modelSteering: true },
});
await rig.initialize();
@@ -66,7 +68,9 @@ describe('AppRig', () => {
'fixtures',
'simple.responses',
);
rig = new AppRig({ fakeResponsesPath });
const contentGenerator =
await FakeContentGenerator.fromFile(fakeResponsesPath);
rig = new AppRig({ contentGenerator });
await rig.initialize();
await rig.render();
// Wait for initial render

View File

@@ -31,6 +31,8 @@ import {
debugLogger,
CoreToolCallStatus,
IntegrityDataStatus,
ConsecaSafetyChecker,
type ContentGenerator,
} from '@google/gemini-cli-core';
import {
type MockShellCommand,
@@ -48,37 +50,42 @@ import type {
TrackedCompletedToolCall,
TrackedToolCall,
} from '../ui/hooks/useToolScheduler.js';
import type { Content, GenerateContentParameters } from '@google/genai';
// Global state observer for React-based signals
const sessionStateMap = new Map<string, StreamingState>();
const activeRigs = new Map<string, AppRig>();
// Mock StreamingContext to report state changes back to the observer
vi.mock('../ui/contexts/StreamingContext.js', async (importOriginal) => {
// Mock useGeminiStream to report state changes back to the observer
vi.mock('../ui/hooks/useGeminiStream.js', async (importOriginal) => {
const original =
await importOriginal<typeof import('../ui/contexts/StreamingContext.js')>();
const { useConfig } = await import('../ui/contexts/ConfigContext.js');
await importOriginal<typeof import('../ui/hooks/useGeminiStream.js')>();
const React = await import('react');
const { useConfig } = await import('../ui/contexts/ConfigContext.js');
return {
...original,
useStreamingContext: () => {
const state = original.useStreamingContext();
// eslint-disable-next-line @typescript-eslint/no-explicit-any
useGeminiStream: (...args: any[]) => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const result = (original.useGeminiStream as any)(...args);
const config = useConfig();
const sessionId = config.getSessionId();
React.useEffect(() => {
sessionStateMap.set(sessionId, state);
// If we see activity, we are no longer "awaiting" the start of a response
if (state !== StreamingState.Idle) {
const rig = activeRigs.get(sessionId);
if (rig) {
rig.awaitingResponse = false;
if (sessionId) {
sessionStateMap.set(sessionId, result.streamingState);
// If we see activity, we are no longer "awaiting" the start of a response
if (result.streamingState !== StreamingState.Idle) {
const rig = activeRigs.get(sessionId);
if (rig) {
rig.awaitingResponse = false;
}
}
}
}, [sessionId, state]);
}, [sessionId, result.streamingState]);
return state;
return result;
},
};
});
@@ -142,10 +149,10 @@ vi.mock('../ui/components/GeminiRespondingSpinner.js', async () => {
});
export interface AppRigOptions {
fakeResponsesPath?: string;
terminalWidth?: number;
terminalHeight?: number;
configOverrides?: Partial<ConfigParameters>;
contentGenerator?: ContentGenerator;
}
export interface PendingConfirmation {
@@ -160,6 +167,7 @@ export class AppRig {
private settings: LoadedSettings | undefined;
private testDir: string;
private sessionId: string;
private appRigId: string;
private pendingConfirmations = new Map<string, PendingConfirmation>();
private breakpointTools = new Set<string | undefined>();
@@ -169,12 +177,14 @@ export class AppRig {
* True if a message was just sent but React hasn't yet reported a non-idle state.
*/
awaitingResponse = false;
activeStreamCount = 0;
constructor(private options: AppRigOptions = {}) {
const uniqueId = randomUUID();
this.testDir = fs.mkdtempSync(
path.join(os.tmpdir(), `gemini-app-rig-${uniqueId.slice(0, 8)}-`),
);
this.appRigId = path.basename(this.testDir).toLowerCase();
this.sessionId = `test-session-${uniqueId}`;
activeRigs.set(this.sessionId, this);
}
@@ -197,7 +207,7 @@ export class AppRig {
cwd: this.testDir,
debugMode: false,
model: 'test-model',
fakeResponses: this.options.fakeResponsesPath,
contentGenerator: this.options.contentGenerator,
interactive: true,
approvalMode,
policyEngineConfig,
@@ -209,8 +219,44 @@ export class AppRig {
};
this.config = makeFakeConfig(configParams);
if (this.options.fakeResponsesPath) {
this.stubRefreshAuth();
// Track active streams directly from the client to prevent false idleness during synchronous mock yields
const client = this.config.getGeminiClient();
const originalStream = client.sendMessageStream.bind(client);
// eslint-disable-next-line @typescript-eslint/no-this-alias
const self = this;
client.sendMessageStream = async function* (
// eslint-disable-next-line @typescript-eslint/no-explicit-any
...args: any[]
// eslint-disable-next-line @typescript-eslint/no-explicit-any
): AsyncGenerator<any, any, any> {
self.awaitingResponse = false;
self.activeStreamCount++;
try {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
yield* (originalStream as any)(...args);
} finally {
self.activeStreamCount = Math.max(0, self.activeStreamCount - 1);
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} as any;
if (this.config.fakeResponses || this.options.contentGenerator) {
if (!this.options.contentGenerator && !this.config.fakeResponses) {
this.stubRefreshAuth();
}
if (!process.env['GEMINI_API_KEY']) {
vi.stubEnv('GEMINI_API_KEY', 'test-api-key');
}
MockShellExecutionService.setPassthrough(false);
} else {
if (!process.env['GEMINI_API_KEY']) {
throw new Error(
'GEMINI_API_KEY must be set in the environment for live model tests.',
);
}
// For live tests, we allow falling through to the real shell service if no mock matches
MockShellExecutionService.setPassthrough(true);
}
this.setupMessageBusListeners();
@@ -226,18 +272,6 @@ export class AppRig {
private setupEnvironment() {
// Stub environment variables to avoid interference from developer's machine
vi.stubEnv('GEMINI_CLI_HOME', this.testDir);
if (this.options.fakeResponsesPath) {
vi.stubEnv('GEMINI_API_KEY', 'test-api-key');
MockShellExecutionService.setPassthrough(false);
} else {
if (!process.env['GEMINI_API_KEY']) {
throw new Error(
'GEMINI_API_KEY must be set in the environment for live model tests.',
);
}
// For live tests, we allow falling through to the real shell service if no mock matches
MockShellExecutionService.setPassthrough(true);
}
vi.stubEnv('GEMINI_DEFAULT_AUTH_TYPE', AuthType.USE_GEMINI);
}
@@ -355,18 +389,23 @@ export class AppRig {
* Returns true if the agent is currently busy (responding or executing tools).
*/
isBusy(): boolean {
if (this.awaitingResponse) {
const reactState = sessionStateMap.get(this.sessionId);
if (reactState && reactState !== StreamingState.Idle) {
this.awaitingResponse = false;
}
if (this.awaitingResponse || this.activeStreamCount > 0) {
return true;
}
const reactState = sessionStateMap.get(this.sessionId);
// If we have a React-based state, use it as the definitive signal.
// 'responding' and 'waiting-for-confirmation' both count as busy for the overall task.
if (reactState !== undefined) {
return reactState !== StreamingState.Idle;
}
// Fallback to tool tracking if React hasn't reported yet
// Fallback to tool tracking
const isAnyToolActive = this.toolCalls.some((tc) => {
if (
tc.status === CoreToolCallStatus.Executing ||
@@ -657,6 +696,21 @@ export class AppRig {
}
}
/**
* Acts as an automated user ('Mock User') to prime the system with a specific
* history state before handing off control to a live trial or eval.
*
* @param prompts An array of user messages to send sequentially.
* @param timeout Optional timeout per interaction.
*/
async driveMockUser(prompts: string[], timeout = 60000) {
for (let i = 0; i < prompts.length; i++) {
const prompt = prompts[i];
await this.sendMessage(prompt);
await this.drainBreakpointsUntilIdle(undefined, timeout);
}
}
getConfig(): Config {
if (!this.config) throw new Error('AppRig not initialized');
return this.config;
@@ -754,6 +808,10 @@ export class AppRig {
// Forcefully clear IdeClient singleton promise
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(IdeClient as any).instancePromise = null;
// Reset Conseca singleton to avoid leaking config/state across tests
ConsecaSafetyChecker.resetInstance();
vi.clearAllMocks();
this.config = undefined;
@@ -770,4 +828,79 @@ export class AppRig {
}
}
}
getSentRequests() {
if (!this.config) throw new Error('AppRig not initialized');
return this.config.getContentGenerator().getSentRequests?.() || [];
}
/**
* Helper to get the curated history (contents) sent in the most recent model request.
* This method scrubs unstable data like temp paths and IDs for deterministic goldens.
*/
getLastSentRequestContents() {
const requests = this.getSentRequests();
if (requests.length === 0) return [];
const contents = requests[requests.length - 1].contents || [];
return this.scrubUnstableData(contents);
}
/**
* Gets the final curated history of the active chat session.
*/
getCuratedHistory() {
if (!this.config) throw new Error('AppRig not initialized');
const history = this.config.getGeminiClient().getChat().getHistory(true);
return this.scrubUnstableData(history);
}
private scrubUnstableData<
T extends
| Content[]
| GenerateContentParameters['contents']
| readonly Content[],
>(contents: T): T {
// Deeply scrub unstable data
const scrubbedString = JSON.stringify(contents)
.replace(new RegExp(this.testDir, 'g'), '<TEST_DIR>')
.replace(new RegExp(this.appRigId, 'g'), '<APP_RIG_ID>')
.replace(new RegExp(this.sessionId, 'g'), '<SESSION_ID>')
.replace(
/([a-zA-Z0-9_]+)_([0-9]{13})_([0-9]+)\.txt/g,
'$1_<TIMESTAMP>_<INDEX>.txt',
)
.replace(/Process Group PGID: \d+/g, 'Process Group PGID: <PGID>');
const scrubbed = JSON.parse(scrubbedString) as T;
if (Array.isArray(scrubbed) && scrubbed.length > 0) {
const firstItem = scrubbed[0] as Content;
if (firstItem.parts?.[0]?.text?.includes('<session_context>')) {
firstItem.parts[0].text = '<SESSION_CONTEXT>';
}
for (const content of scrubbed as Content[]) {
if (content.parts) {
for (const part of content.parts) {
if (part.functionCall) {
part.functionCall.id = '<CALL_ID>';
}
if (part.functionResponse) {
part.functionResponse.id = '<CALL_ID>';
if (
part.functionResponse.response !== null &&
typeof part.functionResponse.response === 'object' &&
'original_output_file' in part.functionResponse.response
) {
part.functionResponse.response['original_output_file'] =
'<TMP_FILE>';
}
}
}
}
}
}
return scrubbed;
}
}

View File

@@ -262,5 +262,47 @@ describe('useReverseSearchCompletion', () => {
expect(result.current.suggestions).toEqual([]);
expect(result.current.showSuggestions).toBe(false);
});
it('does not cause state updates or timeouts when reverseSearchActive is false', async () => {
const history = ['alpha', 'beta'];
const { result, rerender } = await renderHookWithProviders(
({ active }) => {
const textBuffer = useTextBufferForTest('');
return {
search: useReverseSearchCompletion(textBuffer, history, active),
buffer: textBuffer,
};
},
{ initialProps: { active: false } },
);
// Typing while inactive should NOT update debounced search state.
// We change text to 'a', advance time past the debounce period (100ms),
// and ensure suggestions remain empty because the debounced value never changed.
act(() => {
result.current.buffer.setText('a');
});
act(() => {
vi.advanceTimersByTime(200);
});
expect(result.current.search.suggestions).toEqual([]);
// Activating reverse search and setting text to 'alpha' forces useDebouncedValue
// to receive 'alpha', scheduling the timeout.
act(() => {
result.current.buffer.setText('alpha');
});
rerender({ active: true });
act(() => {
vi.advanceTimersByTime(200);
});
expect(result.current.search.suggestions.map((s) => s.value)).toEqual([
'alpha',
]);
});
});
});

View File

@@ -48,7 +48,10 @@ export function useReverseSearchCompletion(
setVisibleStartIndex,
} = useCompletion();
const debouncedQuery = useDebouncedValue(buffer.text, 100);
const debouncedQuery = useDebouncedValue(
reverseSearchActive ? buffer.text : '',
100,
);
// incremental search
const prevQueryRef = useRef<string>('');

View File

@@ -16,6 +16,7 @@ import {
type ContentGenerator,
type ContentGeneratorConfig,
} from '../core/contentGenerator.js';
import type { ScriptItem } from '../core/scriptUtils.js';
import type { OverageStrategy } from '../billing/billing.js';
import { PromptRegistry } from '../prompts/prompt-registry.js';
import { ResourceRegistry } from '../resources/resource-registry.js';
@@ -513,6 +514,11 @@ export interface PolicyUpdateConfirmationRequest {
newHash: string;
}
export interface FakeModelConfig {
responses: string | ScriptItem[];
hybridHandoff?: boolean;
}
export interface ConfigParameters {
sessionId: string;
clientName?: string;
@@ -537,6 +543,7 @@ export interface ConfigParameters {
mcpEnablementCallbacks?: McpEnablementCallbacks;
userMemory?: string | HierarchicalMemory;
geminiMdFileCount?: number;
contentGenerator?: ContentGenerator;
geminiMdFilePaths?: string[];
approvalMode?: ApprovalMode;
showMemoryUsage?: boolean;
@@ -608,7 +615,8 @@ export interface ConfigParameters {
maxAttempts?: number;
enableShellOutputEfficiency?: boolean;
shellToolInactivityTimeout?: number;
fakeResponses?: string;
fakeModelConfig?: FakeModelConfig;
fakeResponses?: string | ScriptItem[];
recordResponses?: string;
ptyInfo?: string;
disableYoloMode?: boolean;
@@ -814,7 +822,8 @@ export class Config implements McpContext, AgentLoopContext {
private readonly maxAttempts: number;
private readonly enableShellOutputEfficiency: boolean;
private readonly shellToolInactivityTimeout: number;
readonly fakeResponses?: string;
readonly fakeModelConfig?: FakeModelConfig;
private readonly hasCustomContentGenerator: boolean;
readonly recordResponses?: string;
private readonly disableYoloMode: boolean;
private readonly disableAlwaysAllow: boolean;
@@ -896,6 +905,10 @@ export class Config implements McpContext, AgentLoopContext {
this.pendingIncludeDirectories = params.includeDirectories ?? [];
this.debugMode = params.debugMode;
this.question = params.question;
this.hasCustomContentGenerator = !!params.contentGenerator;
if (params.contentGenerator) {
this.contentGenerator = params.contentGenerator;
}
this.coreTools = params.coreTools;
this.mainAgentTools = params.mainAgentTools;
@@ -1093,7 +1106,14 @@ export class Config implements McpContext, AgentLoopContext {
this.storage = new Storage(this.targetDir, this._sessionId);
this.storage.setCustomPlansDir(params.planSettings?.directory);
this.fakeResponses = params.fakeResponses;
if (params.fakeModelConfig) {
this.fakeModelConfig = params.fakeModelConfig;
} else if (params.fakeResponses) {
this.fakeModelConfig = {
responses: params.fakeResponses,
};
}
this.recordResponses = params.recordResponses;
this.fileExclusions = new FileExclusions(this);
this.eventEmitter = params.eventEmitter;
@@ -1198,6 +1218,10 @@ export class Config implements McpContext, AgentLoopContext {
this.modelRouterService = new ModelRouterService(this);
}
get fakeResponses(): string | ScriptItem[] | undefined {
return this.fakeModelConfig?.responses;
}
get config(): Config {
return this;
}
@@ -1359,11 +1383,13 @@ export class Config implements McpContext, AgentLoopContext {
baseUrl,
customHeaders,
);
this.contentGenerator = await createContentGenerator(
newContentGeneratorConfig,
this,
this.getSessionId(),
);
if (!this.hasCustomContentGenerator) {
this.contentGenerator = await createContentGenerator(
newContentGeneratorConfig,
this,
this.getSessionId(),
);
}
// Only assign to instance properties after successful initialization
this.contentGeneratorConfig = newContentGeneratorConfig;

View File

@@ -53,6 +53,7 @@ describe('createContentGenerator', () => {
);
const fakeResponsesFile = 'fake/responses.yaml';
const mockConfigWithFake = {
...mockConfig,
fakeResponses: fakeResponsesFile,
getClientName: vi.fn().mockReturnValue(undefined),
} as unknown as Config;
@@ -74,6 +75,7 @@ describe('createContentGenerator', () => {
const fakeResponsesFile = 'fake/responses.yaml';
const recordResponsesFile = 'record/responses.yaml';
const mockConfigWithRecordResponses = {
...mockConfig,
fakeResponses: fakeResponsesFile,
recordResponses: recordResponsesFile,
getClientName: vi.fn().mockReturnValue(undefined),

View File

@@ -22,9 +22,14 @@ import { LoggingContentGenerator } from './loggingContentGenerator.js';
import { InstallationManager } from '../utils/installationManager.js';
import { FakeContentGenerator } from './fakeContentGenerator.js';
import { parseCustomHeaders } from '../utils/customHeaderUtils.js';
import { extractFakeResponses } from './scriptUtils.js';
import { determineSurface } from '../utils/surface.js';
import { RecordingContentGenerator } from './recordingContentGenerator.js';
import { getVersion, resolveModel } from '../../index.js';
import {
FallbackContentGenerator,
getVersion,
resolveModel,
} from '../../index.js';
import type { LlmRole } from '../telemetry/llmRole.js';
/**
@@ -47,6 +52,8 @@ export interface ContentGenerator {
embedContent(request: EmbedContentParameters): Promise<EmbedContentResponse>;
getSentRequests?(): GenerateContentParameters[];
userTier?: UserTierId;
userTierName?: string;
@@ -166,11 +173,15 @@ export async function createContentGenerator(
sessionId?: string,
): Promise<ContentGenerator> {
const generator = await (async () => {
let fakeGenerator: FakeContentGenerator | undefined;
if (gcConfig.fakeResponses) {
const fakeGenerator = await FakeContentGenerator.fromFile(
gcConfig.fakeResponses,
);
return new LoggingContentGenerator(fakeGenerator, gcConfig);
fakeGenerator = Array.isArray(gcConfig.fakeResponses)
? new FakeContentGenerator(extractFakeResponses(gcConfig.fakeResponses))
: await FakeContentGenerator.fromFile(gcConfig.fakeResponses);
if (!gcConfig.fakeModelConfig?.hybridHandoff) {
return new LoggingContentGenerator(fakeGenerator, gcConfig);
}
}
const version = await getVersion();
const model = resolveModel(
@@ -208,23 +219,21 @@ export async function createContentGenerator(
) {
baseHeaders['Authorization'] = `Bearer ${config.apiKey}`;
}
let realGenerator: ContentGenerator;
if (
config.authType === AuthType.LOGIN_WITH_GOOGLE ||
config.authType === AuthType.COMPUTE_ADC
) {
const httpOptions = { headers: baseHeaders };
return new LoggingContentGenerator(
await createCodeAssistContentGenerator(
httpOptions,
config.authType,
gcConfig,
sessionId,
),
realGenerator = await createCodeAssistContentGenerator(
httpOptions,
config.authType,
gcConfig,
sessionId,
);
}
if (
} else if (
config.authType === AuthType.USE_GEMINI ||
config.authType === AuthType.USE_VERTEX_AI ||
config.authType === AuthType.GATEWAY
@@ -268,11 +277,21 @@ export async function createContentGenerator(
httpOptions,
...(apiVersionEnv && { apiVersion: apiVersionEnv }),
});
return new LoggingContentGenerator(googleGenAI.models, gcConfig);
realGenerator = googleGenAI.models;
} else {
throw new Error(
`Error creating contentGenerator: Unsupported authType: ${config.authType}`,
);
}
throw new Error(
`Error creating contentGenerator: Unsupported authType: ${config.authType}`,
);
if (fakeGenerator && gcConfig.fakeModelConfig?.hybridHandoff) {
realGenerator = new FallbackContentGenerator(
fakeGenerator,
realGenerator,
);
}
return new LoggingContentGenerator(realGenerator, gcConfig);
})();
if (gcConfig.recordResponses) {

View File

@@ -7,6 +7,7 @@
import { describe, it, expect, vi, beforeEach } from 'vitest';
import {
FakeContentGenerator,
MockExhaustedError,
type FakeResponse,
} from './fakeContentGenerator.js';
import { promises } from 'node:fs';
@@ -142,7 +143,7 @@ describe('FakeContentGenerator', () => {
}
});
it('should throw error when no more responses', async () => {
it('should throw MockExhaustedError when no more responses', async () => {
const generator = new FakeContentGenerator([fakeGenerateContentResponse]);
await generator.generateContent(
{} as GenerateContentParameters,
@@ -151,24 +152,45 @@ describe('FakeContentGenerator', () => {
);
await expect(
generator.embedContent({} as EmbedContentParameters),
).rejects.toThrowError('No more mock responses for embedContent');
).rejects.toThrow(MockExhaustedError);
await expect(
generator.countTokens({} as CountTokensParameters),
).rejects.toThrowError('No more mock responses for countTokens');
).rejects.toThrow(MockExhaustedError);
await expect(
generator.generateContentStream(
{} as GenerateContentParameters,
'id',
LlmRole.MAIN,
),
).rejects.toThrow('No more mock responses for generateContentStream');
).rejects.toThrow(MockExhaustedError);
await expect(
generator.generateContent(
{} as GenerateContentParameters,
'id',
LlmRole.MAIN,
),
).rejects.toThrowError('No more mock responses for generateContent');
).rejects.toThrow(MockExhaustedError);
});
it('should track sent requests via getSentRequests', async () => {
const generator = new FakeContentGenerator([
fakeGenerateContentResponse,
fakeGenerateContentStreamResponse,
]);
const req1 = {
contents: [{ role: 'user', parts: [{ text: 'a' }] }],
} as GenerateContentParameters;
const req2 = {
contents: [{ role: 'user', parts: [{ text: 'b' }] }],
} as GenerateContentParameters;
await generator.generateContent(req1, 'id1', LlmRole.MAIN);
await generator.generateContentStream(req2, 'id2', LlmRole.MAIN);
const sent = generator.getSentRequests();
expect(sent).toHaveLength(2);
expect(sent[0]).toBe(req1);
expect(sent[1]).toBe(req2);
});
describe('fromFile', () => {

View File

@@ -18,6 +18,16 @@ import type { UserTierId, GeminiUserTier } from '../code_assist/types.js';
import { safeJsonStringify } from '../utils/safeJsonStringify.js';
import type { LlmRole } from '../telemetry/types.js';
export class MockExhaustedError extends Error {
constructor(method: string, request?: unknown) {
super(
`No more mock responses for ${method}, got request:\n` +
safeJsonStringify(request),
);
this.name = 'MockExhaustedError';
}
}
export type FakeResponse =
| {
method: 'generateContent';
@@ -42,12 +52,17 @@ export type FakeResponse =
// CLI argument.
export class FakeContentGenerator implements ContentGenerator {
private callCounter = 0;
private sentRequests: GenerateContentParameters[] = [];
userTier?: UserTierId;
userTierName?: string;
paidTier?: GeminiUserTier;
constructor(private readonly responses: FakeResponse[]) {}
getSentRequests(): GenerateContentParameters[] {
return this.sentRequests;
}
static async fromFile(filePath: string): Promise<FakeContentGenerator> {
const fileContent = await promises.readFile(filePath, 'utf-8');
const responses = fileContent
@@ -62,13 +77,14 @@ export class FakeContentGenerator implements ContentGenerator {
M extends FakeResponse['method'],
R = Extract<FakeResponse, { method: M }>['response'],
>(method: M, request: unknown): R {
const response = this.responses[this.callCounter++];
const response = this.responses[this.callCounter];
if (!response) {
throw new Error(
`No more mock responses for ${method}, got request:\n` +
safeJsonStringify(request),
);
throw new MockExhaustedError(method, request);
}
// We only increment the counter if we actually consume a mock response
this.callCounter++;
if (response.method !== method) {
throw new Error(
`Unexpected response type, next response was for ${response.method} but expected ${method}`,
@@ -81,24 +97,27 @@ export class FakeContentGenerator implements ContentGenerator {
async generateContent(
request: GenerateContentParameters,
_userPromptId: string,
// eslint-disable-next-line @typescript-eslint/no-unused-vars
role: LlmRole,
_role: LlmRole,
): Promise<GenerateContentResponse> {
this.sentRequests.push(request);
const next = this.getNextResponse('generateContent', request);
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
return Object.setPrototypeOf(
this.getNextResponse('generateContent', request),
GenerateContentResponse.prototype,
);
return Object.setPrototypeOf(next, GenerateContentResponse.prototype);
}
async generateContentStream(
request: GenerateContentParameters,
_userPromptId: string,
// eslint-disable-next-line @typescript-eslint/no-unused-vars
role: LlmRole,
_role: LlmRole,
): Promise<AsyncGenerator<GenerateContentResponse>> {
this.sentRequests.push(request);
const responses = this.getNextResponse('generateContentStream', request);
async function* stream() {
// Add a tiny delay to ensure React has time to render the 'Responding'
// state. If the mock stream finishes synchronously, AppRig's
// awaitingResponse flag may never be cleared, causing the rig to hang.
await new Promise((resolve) => setTimeout(resolve, 5));
for (const response of responses) {
yield Object.setPrototypeOf(
response,
@@ -112,16 +131,15 @@ export class FakeContentGenerator implements ContentGenerator {
async countTokens(
request: CountTokensParameters,
): Promise<CountTokensResponse> {
return this.getNextResponse('countTokens', request);
const next = this.getNextResponse('countTokens', request);
return next;
}
async embedContent(
request: EmbedContentParameters,
): Promise<EmbedContentResponse> {
const next = this.getNextResponse('embedContent', request);
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
return Object.setPrototypeOf(
this.getNextResponse('embedContent', request),
EmbedContentResponse.prototype,
);
return Object.setPrototypeOf(next, EmbedContentResponse.prototype);
}
}

View File

@@ -0,0 +1,184 @@
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, vi } from 'vitest';
import { FallbackContentGenerator } from './fallbackContentGenerator.js';
import { MockExhaustedError } from './fakeContentGenerator.js';
import type { ContentGenerator } from './contentGenerator.js';
import type { GenerateContentParameters } from '@google/genai';
import { LlmRole } from '../telemetry/types.js';
describe('FallbackContentGenerator', () => {
const dummyRequest: GenerateContentParameters = {
model: 'gemini',
contents: [{ role: 'user', parts: [{ text: 'hello' }] }],
};
it('delegates to the primary generator if successful', async () => {
const mockPrimary = {
generateContent: vi.fn().mockResolvedValue({ text: 'primary response' }),
} as unknown as ContentGenerator;
const mockFallback = {
generateContent: vi.fn(),
} as unknown as ContentGenerator;
const generator = new FallbackContentGenerator(mockPrimary, mockFallback);
const result = await generator.generateContent(
dummyRequest,
'prompt-id',
LlmRole.MAIN,
);
expect(result).toEqual({ text: 'primary response' });
expect(mockPrimary.generateContent).toHaveBeenCalledWith(
dummyRequest,
'prompt-id',
LlmRole.MAIN,
);
expect(mockFallback.generateContent).not.toHaveBeenCalled();
});
it('bubbles up regular errors from the primary generator', async () => {
const mockPrimary = {
generateContent: vi.fn().mockRejectedValue(new Error('Network failure')),
} as unknown as ContentGenerator;
const mockFallback = {
generateContent: vi.fn(),
} as unknown as ContentGenerator;
const generator = new FallbackContentGenerator(mockPrimary, mockFallback);
await expect(
generator.generateContent(dummyRequest, 'prompt-id', LlmRole.MAIN),
).rejects.toThrow('Network failure');
expect(mockFallback.generateContent).not.toHaveBeenCalled();
});
it('falls back to the secondary generator if primary throws MockExhaustedError', async () => {
const mockPrimary = {
generateContent: vi
.fn()
.mockRejectedValue(new MockExhaustedError('generateContent')),
} as unknown as ContentGenerator;
const mockFallback = {
generateContent: vi.fn().mockResolvedValue({ text: 'fallback response' }),
} as unknown as ContentGenerator;
const onFallback = vi.fn();
const generator = new FallbackContentGenerator(
mockPrimary,
mockFallback,
onFallback,
);
const result = await generator.generateContent(
dummyRequest,
'prompt-id',
LlmRole.MAIN,
);
expect(result).toEqual({ text: 'fallback response' });
expect(mockPrimary.generateContent).toHaveBeenCalled();
expect(onFallback).toHaveBeenCalledWith('generateContent');
expect(mockFallback.generateContent).toHaveBeenCalledWith(
dummyRequest,
'prompt-id',
LlmRole.MAIN,
);
});
it('bubbles up MockExhaustedError if the fallback generator also exhausts', async () => {
const mockPrimary = {
generateContent: vi
.fn()
.mockRejectedValue(new MockExhaustedError('generateContent')),
} as unknown as ContentGenerator;
const mockFallback = {
generateContent: vi
.fn()
.mockRejectedValue(new MockExhaustedError('generateContent')),
} as unknown as ContentGenerator;
const generator = new FallbackContentGenerator(mockPrimary, mockFallback);
await expect(
generator.generateContent(dummyRequest, 'prompt-id', LlmRole.MAIN),
).rejects.toThrow(MockExhaustedError);
});
it('handles stream delegation and fallback', async () => {
const asyncStream = async function* () {
yield { text: 'stream chunk' };
};
const mockPrimary = {
generateContentStream: vi
.fn()
.mockRejectedValue(new MockExhaustedError('generateContentStream')),
} as unknown as ContentGenerator;
const mockFallback = {
generateContentStream: vi.fn().mockResolvedValue(asyncStream()),
} as unknown as ContentGenerator;
const generator = new FallbackContentGenerator(mockPrimary, mockFallback);
const result = await generator.generateContentStream(
dummyRequest,
'prompt-id',
LlmRole.MAIN,
);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const chunks: any[] = [];
for await (const chunk of result) {
chunks.push(chunk);
}
expect(chunks).toEqual([{ text: 'stream chunk' }]);
expect(mockFallback.generateContentStream).toHaveBeenCalled();
});
it('handles optional methods like countTokens that are missing on primary', async () => {
const mockPrimary = {} as unknown as ContentGenerator;
const mockFallback = {
countTokens: vi.fn().mockResolvedValue({ totalTokens: 42 }),
} as unknown as ContentGenerator;
const generator = new FallbackContentGenerator(mockPrimary, mockFallback);
const result = await generator.countTokens({
model: 'gemini',
contents: [],
});
expect(result).toEqual({ totalTokens: 42 });
expect(mockFallback.countTokens).toHaveBeenCalled();
});
it('handles optional methods like embedContent that are missing on primary', async () => {
const mockPrimary = {} as unknown as ContentGenerator;
const mockFallback = {
embedContent: vi.fn().mockResolvedValue({ embedding: { values: [0.1] } }),
} as unknown as ContentGenerator;
const generator = new FallbackContentGenerator(mockPrimary, mockFallback);
const result = await generator.embedContent({
model: 'gemini',
contents: { parts: [{ text: '' }] },
});
expect(result).toEqual({ embedding: { values: [0.1] } });
expect(mockFallback.embedContent).toHaveBeenCalled();
});
it('proxies tier properties from the primary', () => {
const mockPrimary = {
userTier: 'test-tier',
userTierName: 'Test Tier',
paidTier: true,
} as unknown as ContentGenerator;
const mockFallback = {} as unknown as ContentGenerator;
const generator = new FallbackContentGenerator(mockPrimary, mockFallback);
expect(generator.userTier).toBe('test-tier');
expect(generator.userTierName).toBe('Test Tier');
expect(generator.paidTier).toBe(true);
});
});

View File

@@ -0,0 +1,109 @@
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import type { ContentGenerator } from './contentGenerator.js';
import type {
GenerateContentParameters,
GenerateContentResponse,
CountTokensParameters,
CountTokensResponse,
EmbedContentParameters,
EmbedContentResponse,
} from '@google/genai';
import type { LlmRole } from '../telemetry/types.js';
import { MockExhaustedError } from './fakeContentGenerator.js';
/**
* A ContentGenerator that attempts to use a primary generator,
* and falls back to a secondary generator if the primary throws MockExhaustedError.
*/
export class FallbackContentGenerator implements ContentGenerator {
get userTier() {
return this.primary.userTier;
}
get userTierName() {
return this.primary.userTierName;
}
get paidTier() {
return this.primary.paidTier;
}
constructor(
private readonly primary: ContentGenerator,
private readonly fallback: ContentGenerator,
private readonly onFallback?: (method: string) => void,
) {}
async generateContent(
request: GenerateContentParameters,
userPromptId: string,
role: LlmRole,
): Promise<GenerateContentResponse> {
try {
return await this.primary.generateContent(request, userPromptId, role);
} catch (error) {
if (error instanceof MockExhaustedError) {
this.onFallback?.('generateContent');
return this.fallback.generateContent(request, userPromptId, role);
}
throw error;
}
}
async generateContentStream(
request: GenerateContentParameters,
userPromptId: string,
role: LlmRole,
): Promise<AsyncGenerator<GenerateContentResponse>> {
try {
return await this.primary.generateContentStream(
request,
userPromptId,
role,
);
} catch (error) {
if (error instanceof MockExhaustedError) {
this.onFallback?.('generateContentStream');
return this.fallback.generateContentStream(request, userPromptId, role);
}
throw error;
}
}
async countTokens(
request: CountTokensParameters,
): Promise<CountTokensResponse> {
try {
if (!this.primary.countTokens) {
throw new MockExhaustedError('countTokens');
}
return await this.primary.countTokens(request);
} catch (error) {
if (error instanceof MockExhaustedError && this.fallback.countTokens) {
this.onFallback?.('countTokens');
return this.fallback.countTokens(request);
}
throw error;
}
}
async embedContent(
request: EmbedContentParameters,
): Promise<EmbedContentResponse> {
try {
if (!this.primary.embedContent) {
throw new MockExhaustedError('embedContent');
}
return await this.primary.embedContent(request);
} catch (error) {
if (error instanceof MockExhaustedError && this.fallback.embedContent) {
this.onFallback?.('embedContent');
return this.fallback.embedContent(request);
}
throw error;
}
}
}

View File

@@ -168,6 +168,10 @@ export class LoggingContentGenerator implements ContentGenerator {
return this.wrapped.paidTier;
}
getSentRequests?(): GenerateContentParameters[] {
return this.wrapped.getSentRequests?.() || [];
}
private logApiRequest(
contents: Content[],
model: string,

View File

@@ -39,6 +39,10 @@ export class RecordingContentGenerator implements ContentGenerator {
return this.realGenerator.userTierName;
}
getSentRequests?(): GenerateContentParameters[] {
return this.realGenerator.getSentRequests?.() || [];
}
async generateContent(
request: GenerateContentParameters,
userPromptId: string,

View File

@@ -0,0 +1,104 @@
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect } from 'vitest';
import {
mockGenerateContentStreamText,
mockGenerateContentText,
userText,
isFakeResponse,
isFakeRequest,
extractUserPrompts,
extractFakeResponses,
type ScriptItem,
} from './scriptUtils.js';
describe('scriptUtils', () => {
describe('mockGenerateContentStreamText', () => {
it('creates a valid FakeResponse for generateContentStream', () => {
const result = mockGenerateContentStreamText('hello stream');
expect(result.method).toBe('generateContentStream');
expect(Array.isArray(result.response)).toBe(true);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const responseArray = result.response as any[];
expect(responseArray[0].candidates[0].content.parts[0].text).toBe(
'hello stream',
);
expect(responseArray[0].candidates[0].finishReason).toBe('STOP');
});
});
describe('mockGenerateContentText', () => {
it('creates a valid FakeResponse for generateContent', () => {
const result = mockGenerateContentText('hello block');
expect(result.method).toBe('generateContent');
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const responseObj = result.response as any;
expect(responseObj.candidates[0].content.parts[0].text).toBe(
'hello block',
);
expect(responseObj.candidates[0].finishReason).toBe('STOP');
});
});
describe('userText', () => {
it('creates a valid FakeRequest', () => {
const result = userText('user input');
expect(result.method).toBe('userText');
expect(result.text).toBe('user input');
});
});
describe('Type Guards', () => {
it('correctly identifies FakeResponse vs FakeRequest', () => {
const fakeRes = mockGenerateContentText('test');
const fakeReq = userText('test');
expect(isFakeResponse(fakeRes)).toBe(true);
expect(isFakeResponse(fakeReq)).toBe(false);
expect(isFakeRequest(fakeReq)).toBe(true);
expect(isFakeRequest(fakeRes)).toBe(false);
});
});
describe('extractUserPrompts and extractFakeResponses', () => {
it('correctly partitions a mixed script array', () => {
const script: ScriptItem[] = [
userText('prompt 1'),
mockGenerateContentText('response 1'),
userText('prompt 2'),
mockGenerateContentStreamText('response 2'),
];
const prompts = extractUserPrompts(script);
expect(prompts).toEqual(['prompt 1', 'prompt 2']);
const responses = extractFakeResponses(script);
expect(responses).toHaveLength(2);
expect(responses[0].method).toBe('generateContent');
expect(responses[1].method).toBe('generateContentStream');
});
it('handles empty scripts', () => {
expect(extractUserPrompts([])).toEqual([]);
expect(extractFakeResponses([])).toEqual([]);
});
it('handles scripts with only one type', () => {
const justPrompts = [userText('a'), userText('b')];
expect(extractUserPrompts(justPrompts)).toEqual(['a', 'b']);
expect(extractFakeResponses(justPrompts)).toEqual([]);
const justResponses = [
mockGenerateContentText('a'),
mockGenerateContentText('b'),
];
expect(extractUserPrompts(justResponses)).toEqual([]);
expect(extractFakeResponses(justResponses)).toHaveLength(2);
});
});
});

View File

@@ -0,0 +1,59 @@
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import type { GenerateContentResponse } from '@google/genai';
import type { FakeResponse } from './fakeContentGenerator.js';
export type FakeRequest = { method: 'userText'; text: string };
export type ScriptItem = FakeResponse | FakeRequest;
export function mockGenerateContentStreamText(text: string): FakeResponse {
return {
method: 'generateContentStream',
// eslint-disable-next-line @typescript-eslint/no-unsafe-type-assertion
response: [
{
candidates: [{ content: { parts: [{ text }] }, finishReason: 'STOP' }],
},
] as GenerateContentResponse[],
};
}
export function mockGenerateContentText(text: string): FakeResponse {
return {
method: 'generateContent',
// eslint-disable-next-line @typescript-eslint/no-unsafe-type-assertion
response: {
candidates: [{ content: { parts: [{ text }] }, finishReason: 'STOP' }],
} as GenerateContentResponse,
};
}
export function userText(text: string): FakeRequest {
return { method: 'userText', text };
}
export function isFakeResponse(item: ScriptItem): item is FakeResponse {
return item.method !== 'userText';
}
export function isFakeRequest(item: ScriptItem): item is FakeRequest {
return item.method === 'userText';
}
/**
* Extracts all FakeRequests from a script array and maps them to their string text.
*/
export function extractUserPrompts(script: ScriptItem[]): string[] {
return script.filter(isFakeRequest).map((req) => req.text);
}
/**
* Extracts all FakeResponses from a script array.
*/
export function extractFakeResponses(script: ScriptItem[]): FakeResponse[] {
return script.filter(isFakeResponse);
}

View File

@@ -24,6 +24,7 @@ export * from './config/extensions/integrityTypes.js';
export * from './billing/index.js';
export * from './confirmation-bus/types.js';
export * from './confirmation-bus/message-bus.js';
export * from './safety/conseca/conseca.js';
// Export Commands logic
export * from './commands/extensions.js';
@@ -36,6 +37,9 @@ export * from './commands/types.js';
export * from './core/baseLlmClient.js';
export * from './core/client.js';
export * from './core/contentGenerator.js';
export * from './core/fakeContentGenerator.js';
export * from './core/fallbackContentGenerator.js';
export * from './core/scriptUtils.js';
export * from './core/loggingContentGenerator.js';
export * from './core/geminiChat.js';
export * from './core/logger.js';