This commit is contained in:
Your Name
2026-03-11 18:58:50 +00:00
parent 23575ff83d
commit 3291d3a58b
15 changed files with 542 additions and 117 deletions

View File

@@ -13,7 +13,12 @@ import {
} from './test-helper.js';
import fs from 'node:fs';
import path from 'node:path';
import { DEFAULT_GEMINI_MODEL } from '@google/gemini-cli-core';
import {
DEFAULT_GEMINI_MODEL,
type ScriptItem,
extractFakeResponses,
extractUserPrompts,
} from '@google/gemini-cli-core';
export interface AppEvalCase {
name: string;
@@ -23,6 +28,12 @@ export interface AppEvalCase {
files?: Record<string, string>;
setup?: (rig: AppRig) => Promise<void>;
assert: (rig: AppRig, output: string) => Promise<void>;
/**
* Optional script to "prime the pump" before the main prompt.
* A sequential array interleaving MockUserTurn (e.g., userText('hello'))
* and FakeResponse (e.g., mockGenerateContentStreamText('hi')).
*/
script?: ScriptItem[];
}
/**
@@ -31,11 +42,23 @@ export interface AppEvalCase {
*/
export function appEvalTest(policy: EvalPolicy, evalCase: AppEvalCase) {
const fn = async () => {
const configOverrides = {
model: DEFAULT_GEMINI_MODEL,
...evalCase.configOverrides,
};
let userPrompts: string[] = [];
if (evalCase.script) {
// Extract fake model responses to inject into the ContentGenerator
configOverrides.fakeResponses = extractFakeResponses(evalCase.script);
// Extract the sequence of user prompts for the Mock User driver
userPrompts = extractUserPrompts(evalCase.script);
}
const rig = new AppRig({
configOverrides: {
model: DEFAULT_GEMINI_MODEL,
...evalCase.configOverrides,
},
configOverrides,
});
const { logDir, sanitizedName } = await prepareLogDir(evalCase.name);
@@ -67,6 +90,11 @@ export function appEvalTest(policy: EvalPolicy, evalCase: AppEvalCase) {
// Wait for initial ready state
await rig.waitForIdle();
// Execute priming script if requested
if (userPrompts.length > 0) {
await rig.driveMockUser(userPrompts, evalCase.timeout);
}
// Send the initial prompt
await rig.sendMessage(evalCase.prompt);

View File

@@ -8,11 +8,15 @@ import { describe, expect } from 'vitest';
import path from 'node:path';
import fs from 'node:fs';
import { appEvalTest } from './app-test-helper.js';
import {
userText,
mockGenerateContentStreamText,
} from '@google/gemini-cli-core';
describe('Auto-Distillation Behavioral Evals', () => {
appEvalTest('ALWAYS_PASSES', {
appEvalTest('USUALLY_PASSES', {
name: 'Agent successfully navigates truncated output using the structural map to extract a secret',
timeout: 120000,
timeout: 180000,
configOverrides: {},
setup: async (rig) => {
const testDir = rig.getTestDir();
@@ -23,8 +27,6 @@ describe('Auto-Distillation Behavioral Evals', () => {
uptime: 999999,
environment: 'production',
},
// Pad with enough active sessions to push the next section past the 8,000 character 'head'
// 300 sessions * ~80 chars = ~24,000 characters
active_sessions: [],
quarantined_payloads: [
{ id: 'Subject-01', status: 'cleared' },
@@ -35,8 +37,6 @@ describe('Auto-Distillation Behavioral Evals', () => {
},
{ id: 'Subject-99', status: 'cleared' },
],
// Pad with enough metrics to push the total file size well past 60,000 characters
// 2000 metrics * ~70 chars = ~140,000 characters
archived_metrics: [],
};
@@ -56,29 +56,26 @@ describe('Auto-Distillation Behavioral Evals', () => {
});
}
const massiveString = JSON.stringify(mockData, null, 2);
fs.writeFileSync(
path.join(testDir, 'server_state_dump.json'),
JSON.stringify(mockData, null, 2),
massiveString,
);
},
prompt:
'A massive log dump is located at server_state_dump.json. First, you MUST run the shell command `cat server_state_dump.json` to view it. The output will likely be truncated. Read the structural map provided in the output, and then figure out a way to extract the secret_token for the quarantined payload "Subject-89".',
script: [
userText('We have a critical error in production. Are you ready to help?'),
mockGenerateContentStreamText(
'I am ready. Please provide the details of the error.',
),
],
prompt: `My application crashed with: "FATAL: Subject-89 held for review in quarantine". \n\nPlease run \`cat server_state_dump.json\` to investigate. The file is massive, so your tool output will be automatically truncated and you will receive a structural map instead. Use that structural map to determine the right command to extract the \`secret_token\` for Subject-89. Please state the exact secret token when you find it.`,
assert: async (rig) => {
await rig.waitForIdle(120000);
const finalOutput = rig.getStaticOutput();
const curatedHistory = rig.getCuratedHistory();
// Ensure truncation occurred
const stringifiedHistory = JSON.stringify(curatedHistory);
expect(stringifiedHistory).toContain('Output too large. Showing first');
// Ensure the structural map summarizer was triggered
expect(stringifiedHistory).toContain(
'--- Structural Map of Truncated Content ---',
);
// Ensure the agent correctly extracted the secret token
// Ensure the agent correctly extracted the secret token after navigating the distilled output
expect(finalOutput).toContain('the_cake_is_a_lie');
},
});

View File

@@ -0,0 +1,39 @@
import { describe, expect } from 'vitest';
import {
appEvalTest,
} from './app-test-helper.js';
import {
userText,
mockGenerateContentStreamText,
} from '@google/gemini-cli-core';
describe('Hybrid Handoff (Mock User to Live Model)', () => {
appEvalTest('ALWAYS_PASSES', {
name: 'Mock User successfully primes AppRig using a scripted history and hands off to live model',
timeout: 120000,
script: [
userText('Start priming'),
mockGenerateContentStreamText(
"Hello! I am a fake response. Let's prime the pump.",
),
userText('Continue priming'),
mockGenerateContentStreamText(
'Pump primed successfully. Ready for handoff.',
),
],
prompt: 'What is 2 + 2? Please answer with exactly the number "4".',
assert: async (rig) => {
// The Mock User has automatically driven the script before sending the final prompt.
// So the history now has the 2 fake turns in it, and the final prompt was just sent to the LIVE model.
await rig.waitForIdle(60000);
const liveOutput = rig.getStaticOutput();
// Ensure the handoff was successful
expect(liveOutput).toContain('4');
await rig.drainBreakpointsUntilIdle(undefined, 10000);
},
});
});

View File

@@ -4,10 +4,11 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, afterEach, vi } from 'vitest';
import { describe, it, expect, vi, afterEach } from 'vitest';
import { AppRig } from '../test-utils/AppRig.js';
import path from 'node:path';
import { fileURLToPath } from 'node:url';
import { FakeContentGenerator } from '@google/gemini-cli-core';
import { PolicyDecision } from '@google/gemini-cli-core';
const __dirname = path.dirname(fileURLToPath(import.meta.url));
@@ -27,8 +28,9 @@ describe('Auto-distillation Integration', () => {
__dirname,
'../test-utils/fixtures/auto-distillation.responses',
);
const contentGenerator = await FakeContentGenerator.fromFile(fakeResponsesPath);
rig = new AppRig({
fakeResponsesPath,
contentGenerator,
});
await rig.initialize();

View File

@@ -0,0 +1,64 @@
import { describe, it, expect } from 'vitest';
import { AppRig } from '../test-utils/AppRig.js';
import {
FakeContentGenerator,
FallbackContentGenerator,
userText,
mockGenerateContentStreamText,
extractUserPrompts,
extractFakeResponses,
type ScriptItem
} from '@google/gemini-cli-core';
describe('Hybrid Handoff (Mock User to Synthetic Live Model)', () => {
it('successfully transitions from mock responses to live responses', async () => {
// 1. Define the conversational script for the priming phase
const primingScript: ScriptItem[] = [
userText('Start priming'),
mockGenerateContentStreamText('Hello! I am a fake response.'),
userText('Continue priming'),
mockGenerateContentStreamText('Pump primed successfully. Ready for handoff.'),
];
// 2. Setup the primary fake generator that runs through the priming script
const fakeGenerator = new FakeContentGenerator(extractFakeResponses(primingScript));
// 3. Setup a "live" fallback generator (it's synthetic so we don't need API keys)
const mockLiveFallback = new FakeContentGenerator([
mockGenerateContentStreamText('The answer is 4.'),
]);
// We need countTokens so AppRig doesn't hang checking size during truncation
mockLiveFallback.countTokens = async () => ({ totalTokens: 10 });
// 4. Compose them using FallbackContentGenerator
const composedGenerator = new FallbackContentGenerator(fakeGenerator, mockLiveFallback);
// 5. Mount the AppRig natively supporting custom content generators
const rig = new AppRig({
contentGenerator: composedGenerator,
configOverrides: {
fakeResponses: [], // ensure it avoids disk IO attempts internally
}
});
await rig.initialize();
rig.render();
await rig.waitForIdle();
// 6. Drive the Mock User sequence using the extracted prompts from the script
await rig.driveMockUser(extractUserPrompts(primingScript), 10000);
// 7. Send the final prompt that should exhaust the primary generator and trigger the fallback
await rig.sendMessage('What is 2 + 2?');
// 8. Wait for the fallback response to render
await rig.waitForOutput('The answer is 4.', 10000);
const output = rig.getStaticOutput();
expect(output).toContain('The answer is 4.');
// Wait for everything to settle so React act() warnings don't fire during unmount
await rig.drainBreakpointsUntilIdle(undefined, 10000);
});
});

View File

@@ -8,6 +8,7 @@ import { describe, it, afterEach } from 'vitest';
import { AppRig } from '../test-utils/AppRig.js';
import path from 'node:path';
import { fileURLToPath } from 'node:url';
import { FakeContentGenerator } from '@google/gemini-cli-core';
import { PolicyDecision } from '@google/gemini-cli-core';
const __dirname = path.dirname(fileURLToPath(import.meta.url));
@@ -24,8 +25,9 @@ describe('Model Steering Integration', () => {
__dirname,
'../test-utils/fixtures/steering.responses',
);
const contentGenerator = await FakeContentGenerator.fromFile(fakeResponsesPath);
rig = new AppRig({
fakeResponsesPath,
contentGenerator,
configOverrides: { modelSteering: true },
});
await rig.initialize();

View File

@@ -9,6 +9,7 @@ import { act } from 'react';
import { AppRig } from './AppRig.js';
import path from 'node:path';
import { fileURLToPath } from 'node:url';
import { FakeContentGenerator } from '@google/gemini-cli-core';
import { debugLogger } from '@google/gemini-cli-core';
const __dirname = path.dirname(fileURLToPath(import.meta.url));
@@ -26,8 +27,9 @@ describe('AppRig', () => {
'fixtures',
'steering.responses',
);
const contentGenerator = await FakeContentGenerator.fromFile(fakeResponsesPath);
rig = new AppRig({
fakeResponsesPath,
contentGenerator,
configOverrides: { modelSteering: true },
});
await rig.initialize();
@@ -67,7 +69,8 @@ describe('AppRig', () => {
'fixtures',
'simple.responses',
);
rig = new AppRig({ fakeResponsesPath });
const contentGenerator = await FakeContentGenerator.fromFile(fakeResponsesPath);
rig = new AppRig({ contentGenerator });
await rig.initialize();
await act(async () => {
rig!.render();

View File

@@ -31,6 +31,7 @@ import {
debugLogger,
CoreToolCallStatus,
ConsecaSafetyChecker,
type ContentGenerator,
} from '@google/gemini-cli-core';
import {
type MockShellCommand,
@@ -54,32 +55,38 @@ import type { Content, GenerateContentParameters } from '@google/genai';
const sessionStateMap = new Map<string, StreamingState>();
const activeRigs = new Map<string, AppRig>();
// Mock StreamingContext to report state changes back to the observer
vi.mock('../ui/contexts/StreamingContext.js', async (importOriginal) => {
// Mock useGeminiStream to report state changes back to the observer
vi.mock('../ui/hooks/useGeminiStream.js', async (importOriginal) => {
const original =
await importOriginal<typeof import('../ui/contexts/StreamingContext.js')>();
const { useConfig } = await import('../ui/contexts/ConfigContext.js');
await importOriginal<typeof import('../ui/hooks/useGeminiStream.js')>();
const React = await import('react');
return {
...original,
useStreamingContext: () => {
const state = original.useStreamingContext();
const config = useConfig();
const sessionId = config.getSessionId();
// eslint-disable-next-line @typescript-eslint/no-explicit-any
useGeminiStream: (...args: any[]) => {
// eslint-disable-next-line @typescript-eslint/no-unsafe-type-assertion, @typescript-eslint/no-explicit-any, @typescript-eslint/no-unsafe-assignment
const result = (original.useGeminiStream as any)(...args);
const config = args[3]; // config is the 4th argument
const sessionId = config?.getSessionId?.();
React.useEffect(() => {
sessionStateMap.set(sessionId, state);
// If we see activity, we are no longer "awaiting" the start of a response
if (state !== StreamingState.Idle) {
const rig = activeRigs.get(sessionId);
if (rig) {
rig.awaitingResponse = false;
if (sessionId) {
debugLogger.log(
`[AppRig React Hook] State updating to: ${result.streamingState}`,
);
sessionStateMap.set(sessionId, result.streamingState);
// If we see activity, we are no longer "awaiting" the start of a response
if (result.streamingState !== StreamingState.Idle) {
const rig = activeRigs.get(sessionId);
if (rig) {
rig.awaitingResponse = false;
}
}
}
}, [sessionId, state]);
}, [sessionId, result.streamingState]);
return state;
return result;
},
};
});
@@ -137,10 +144,10 @@ vi.mock('../ui/components/GeminiRespondingSpinner.js', async () => {
});
export interface AppRigOptions {
fakeResponsesPath?: string;
terminalWidth?: number;
terminalHeight?: number;
configOverrides?: Partial<ConfigParameters>;
contentGenerator?: ContentGenerator;
}
export interface PendingConfirmation {
@@ -160,11 +167,13 @@ export class AppRig {
private pendingConfirmations = new Map<string, PendingConfirmation>();
private breakpointTools = new Set<string | undefined>();
private lastAwaitedConfirmation: PendingConfirmation | undefined;
private lastIsBusyLog = 0;
/**
* True if a message was just sent but React hasn't yet reported a non-idle state.
*/
awaitingResponse = false;
activeStreamCount = 0;
constructor(private options: AppRigOptions = {}) {
const uniqueId = randomUUID();
@@ -194,7 +203,7 @@ export class AppRig {
cwd: this.testDir,
debugMode: false,
model: 'test-model',
fakeResponses: this.options.fakeResponsesPath,
contentGenerator: this.options.contentGenerator,
interactive: true,
approvalMode,
policyEngineConfig,
@@ -205,8 +214,38 @@ export class AppRig {
};
this.config = makeFakeConfig(configParams);
if (this.options.fakeResponsesPath) {
this.stubRefreshAuth();
// Track active streams directly from the client to prevent false idleness during synchronous mock yields
const client = this.config.getGeminiClient();
const originalStream = client.sendMessageStream.bind(client);
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment, @typescript-eslint/no-unsafe-type-assertion
client.sendMessageStream = async function* (this: AppRig, ...args: any[]): AsyncGenerator<any, any, any> {
this.awaitingResponse = false;
this.activeStreamCount++;
try {
// eslint-disable-next-line @typescript-eslint/no-unsafe-type-assertion, @typescript-eslint/no-explicit-any
yield* (originalStream as any)(...args);
} finally {
this.activeStreamCount = Math.max(0, this.activeStreamCount - 1);
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
}.bind(this) as any;
if (this.config.fakeResponses || this.options.contentGenerator) {
if (!this.options.contentGenerator && !this.config.fakeResponses) {
this.stubRefreshAuth();
}
if (!process.env['GEMINI_API_KEY']) {
vi.stubEnv('GEMINI_API_KEY', 'test-api-key');
}
MockShellExecutionService.setPassthrough(false);
} else {
if (!process.env['GEMINI_API_KEY']) {
throw new Error(
'GEMINI_API_KEY must be set in the environment for live model tests.',
);
}
// For live tests, we allow falling through to the real shell service if no mock matches
MockShellExecutionService.setPassthrough(true);
}
this.setupMessageBusListeners();
@@ -222,18 +261,6 @@ export class AppRig {
private setupEnvironment() {
// Stub environment variables to avoid interference from developer's machine
vi.stubEnv('GEMINI_CLI_HOME', this.testDir);
if (this.options.fakeResponsesPath) {
vi.stubEnv('GEMINI_API_KEY', 'test-api-key');
MockShellExecutionService.setPassthrough(false);
} else {
if (!process.env['GEMINI_API_KEY']) {
throw new Error(
'GEMINI_API_KEY must be set in the environment for live model tests.',
);
}
// For live tests, we allow falling through to the real shell service if no mock matches
MockShellExecutionService.setPassthrough(true);
}
vi.stubEnv('GEMINI_DEFAULT_AUTH_TYPE', AuthType.USE_GEMINI);
}
@@ -348,18 +375,28 @@ export class AppRig {
* Returns true if the agent is currently busy (responding or executing tools).
*/
isBusy(): boolean {
if (this.awaitingResponse) {
const reactState = sessionStateMap.get(this.sessionId);
if (reactState && reactState !== StreamingState.Idle) {
this.awaitingResponse = false;
}
if (!this.lastIsBusyLog || Date.now() - this.lastIsBusyLog > 1000) {
debugLogger.log(`[AppRig] isBusy check: awaitingResponse=${this.awaitingResponse}, activeStreams=${this.activeStreamCount}, reactState=${reactState}`);
this.lastIsBusyLog = Date.now();
}
if (this.awaitingResponse || this.activeStreamCount > 0) {
return true;
}
const reactState = sessionStateMap.get(this.sessionId);
// If we have a React-based state, use it as the definitive signal.
// 'responding' and 'waiting-for-confirmation' both count as busy for the overall task.
if (reactState !== undefined) {
return reactState !== StreamingState.Idle;
}
// Fallback to tool tracking if React hasn't reported yet
// Fallback to tool tracking
const isAnyToolActive = this.toolCalls.some((tc) => {
if (
tc.status === CoreToolCallStatus.Executing ||
@@ -535,6 +572,7 @@ export class AppRig {
| { type: 'confirmation'; confirmation: PendingConfirmation }
| { type: 'idle' }
> {
debugLogger.log(`[AppRig] waitForNextEvent started`);
let confirmation: PendingConfirmation | undefined;
let isIdle = false;
@@ -554,6 +592,7 @@ export class AppRig {
},
);
debugLogger.log(`[AppRig] waitForNextEvent finished: confirmation=${!!confirmation}, isIdle=${isIdle}`);
if (confirmation) {
this.lastAwaitedConfirmation = confirmation;
return { type: 'confirmation', confirmation };
@@ -630,8 +669,11 @@ export class AppRig {
onConfirmation?: (confirmation: PendingConfirmation) => void | boolean,
timeout = 60000,
) {
debugLogger.log(`[AppRig] drainBreakpointsUntilIdle started`);
while (true) {
debugLogger.log(`[AppRig] drainBreakpointsUntilIdle: waiting for next event`);
const event = await this.waitForNextEvent(timeout);
debugLogger.log(`[AppRig] drainBreakpointsUntilIdle: got event type ${event.type}`);
if (event.type === 'idle') {
break;
}
@@ -640,9 +682,30 @@ export class AppRig {
const handled = onConfirmation?.(confirmation);
if (!handled) {
debugLogger.log(`[AppRig] drainBreakpointsUntilIdle: resolving tool ${confirmation.toolName}`);
await this.resolveTool(confirmation);
}
}
debugLogger.log(`[AppRig] drainBreakpointsUntilIdle finished`);
}
/**
* Acts as an automated user ('Mock User') to prime the system with a specific
* history state before handing off control to a live trial or eval.
*
* @param prompts An array of user messages to send sequentially.
* @param timeout Optional timeout per interaction.
*/
async driveMockUser(prompts: string[], timeout = 60000) {
debugLogger.log(`[AppRig] driveMockUser started with ${prompts.length} prompts`);
for (let i = 0; i < prompts.length; i++) {
const prompt = prompts[i];
debugLogger.log(`[AppRig] driveMockUser: sending prompt ${i + 1}: ${prompt}`);
await this.sendMessage(prompt);
debugLogger.log(`[AppRig] driveMockUser: draining breakpoints after prompt ${i + 1}`);
await this.drainBreakpointsUntilIdle(undefined, timeout);
}
debugLogger.log(`[AppRig] driveMockUser finished`);
}
getConfig(): Config {

View File

@@ -776,9 +776,47 @@ export const renderWithProviders = (
<VimModeProvider>
<ShellFocusContext.Provider value={shellFocus}>
<SessionStatsProvider>
<StreamingContext.Provider
value={finalUiState.streamingState}
>
{providedUiState?.streamingState !== undefined ? (
<StreamingContext.Provider
value={finalUiState.streamingState}
>
<UIActionsContext.Provider value={finalUIActions}>
<OverflowProvider>
<ToolActionsProvider
config={finalConfig}
toolCalls={allToolCalls}
>
<AskUserActionsProvider
request={null}
onSubmit={vi.fn()}
onCancel={vi.fn()}
>
<KeypressProvider>
<MouseProvider
mouseEventsEnabled={mouseEventsEnabled}
>
<TerminalProvider>
<ScrollProvider>
<ContextCapture>
<Box
width={terminalWidth}
flexShrink={0}
flexGrow={0}
flexDirection="column"
>
{component}
</Box>
</ContextCapture>
</ScrollProvider>
</TerminalProvider>
</MouseProvider>
</KeypressProvider>
</AskUserActionsProvider>
</ToolActionsProvider>
</OverflowProvider>
</UIActionsContext.Provider>
</StreamingContext.Provider>
) : (
<UIActionsContext.Provider value={finalUIActions}>
<OverflowProvider>
<ToolActionsProvider
@@ -814,7 +852,7 @@ export const renderWithProviders = (
</ToolActionsProvider>
</OverflowProvider>
</UIActionsContext.Provider>
</StreamingContext.Provider>
)}
</SessionStatsProvider>
</ShellFocusContext.Provider>
</VimModeProvider>

View File

@@ -495,6 +495,7 @@ export interface ConfigParameters {
mcpEnablementCallbacks?: McpEnablementCallbacks;
userMemory?: string | HierarchicalMemory;
geminiMdFileCount?: number;
contentGenerator?: ContentGenerator;
geminiMdFilePaths?: string[];
approvalMode?: ApprovalMode;
showMemoryUsage?: boolean;
@@ -566,7 +567,7 @@ export interface ConfigParameters {
maxAttempts?: number;
enableShellOutputEfficiency?: boolean;
shellToolInactivityTimeout?: number;
fakeResponses?: string;
fakeResponses?: string | any[];
recordResponses?: string;
ptyInfo?: string;
disableYoloMode?: boolean;
@@ -625,6 +626,7 @@ export class Config implements McpContext, AgentLoopContext {
private trackerService?: TrackerService;
private contentGeneratorConfig!: ContentGeneratorConfig;
private contentGenerator!: ContentGenerator;
private _initialContentGenerator?: ContentGenerator;
readonly modelConfigService: ModelConfigService;
private readonly embeddingModel: string;
private readonly sandbox: SandboxConfig | undefined;
@@ -764,7 +766,7 @@ export class Config implements McpContext, AgentLoopContext {
private readonly maxAttempts: number;
private readonly enableShellOutputEfficiency: boolean;
private readonly shellToolInactivityTimeout: number;
readonly fakeResponses?: string;
readonly fakeResponses?: string | any[];
readonly recordResponses?: string;
private readonly disableYoloMode: boolean;
private readonly rawOutput: boolean;
@@ -829,6 +831,7 @@ export class Config implements McpContext, AgentLoopContext {
this.pendingIncludeDirectories = params.includeDirectories ?? [];
this.debugMode = params.debugMode;
this.question = params.question;
this._initialContentGenerator = params.contentGenerator;
this.coreTools = params.coreTools;
this.allowedTools = params.allowedTools;
@@ -1253,11 +1256,17 @@ export class Config implements McpContext, AgentLoopContext {
baseUrl,
customHeaders,
);
this.contentGenerator = await createContentGenerator(
newContentGeneratorConfig,
this,
this.getSessionId(),
);
if (this._initialContentGenerator) {
this.contentGenerator = this._initialContentGenerator;
// We only use it once, on first initialization. Future refreshes will create real ones
// unless we want it to persist forever, but usually AppRig manages this.
} else {
this.contentGenerator = await createContentGenerator(
newContentGeneratorConfig,
this,
this.getSessionId(),
);
}
// Only assign to instance properties after successful initialization
this.contentGeneratorConfig = newContentGeneratorConfig;

View File

@@ -21,9 +21,10 @@ import type { UserTierId, GeminiUserTier } from '../code_assist/types.js';
import { LoggingContentGenerator } from './loggingContentGenerator.js';
import { InstallationManager } from '../utils/installationManager.js';
import { FakeContentGenerator } from './fakeContentGenerator.js';
import { FallbackContentGenerator } from './fallbackContentGenerator.js';
import { parseCustomHeaders } from '../utils/customHeaderUtils.js';
import { RecordingContentGenerator } from './recordingContentGenerator.js';
import { getVersion, resolveModel } from '../../index.js';
import { debugLogger, getVersion, resolveModel } from '../../index.js';
import type { LlmRole } from '../telemetry/llmRole.js';
/**
@@ -160,12 +161,6 @@ export async function createContentGenerator(
sessionId?: string,
): Promise<ContentGenerator> {
const generator = await (async () => {
if (gcConfig.fakeResponses) {
const fakeGenerator = await FakeContentGenerator.fromFile(
gcConfig.fakeResponses,
);
return new LoggingContentGenerator(fakeGenerator, gcConfig);
}
const version = await getVersion();
const model = resolveModel(
gcConfig.getModel(),
@@ -194,23 +189,21 @@ export async function createContentGenerator(
) {
baseHeaders['Authorization'] = `Bearer ${config.apiKey}`;
}
let realGenerator: ContentGenerator;
if (
config.authType === AuthType.LOGIN_WITH_GOOGLE ||
config.authType === AuthType.COMPUTE_ADC
) {
const httpOptions = { headers: baseHeaders };
return new LoggingContentGenerator(
await createCodeAssistContentGenerator(
httpOptions,
config.authType,
gcConfig,
sessionId,
),
realGenerator = await createCodeAssistContentGenerator(
httpOptions,
config.authType,
gcConfig,
sessionId,
);
}
if (
} else if (
config.authType === AuthType.USE_GEMINI ||
config.authType === AuthType.USE_VERTEX_AI ||
config.authType === AuthType.GATEWAY
@@ -242,11 +235,28 @@ export async function createContentGenerator(
httpOptions,
...(apiVersionEnv && { apiVersion: apiVersionEnv }),
});
return new LoggingContentGenerator(googleGenAI.models, gcConfig);
realGenerator = googleGenAI.models;
} else {
throw new Error(
`Error creating contentGenerator: Unsupported authType: ${config.authType}`,
);
}
throw new Error(
`Error creating contentGenerator: Unsupported authType: ${config.authType}`,
);
let targetGenerator = realGenerator;
if (gcConfig.fakeResponses) {
if (Array.isArray(gcConfig.fakeResponses)) {
debugLogger.log(`[createContentGenerator] Instantiating FakeContentGenerator with ${gcConfig.fakeResponses.length} in-memory mock responses.`);
const fakeGen = new FakeContentGenerator(gcConfig.fakeResponses);
targetGenerator = new FallbackContentGenerator(fakeGen, realGenerator);
} else {
debugLogger.log(`[createContentGenerator] Instantiating FakeContentGenerator from file: ${gcConfig.fakeResponses}`);
const fakeGen = await FakeContentGenerator.fromFile(gcConfig.fakeResponses);
targetGenerator = new FallbackContentGenerator(fakeGen, realGenerator);
}
}
return new LoggingContentGenerator(targetGenerator, gcConfig);
})();
if (gcConfig.recordResponses) {

View File

@@ -18,6 +18,16 @@ import type { UserTierId, GeminiUserTier } from '../code_assist/types.js';
import { safeJsonStringify } from '../utils/safeJsonStringify.js';
import type { LlmRole } from '../telemetry/types.js';
export class MockExhaustedError extends Error {
constructor(method: string, request?: unknown) {
super(
`No more mock responses for ${method}, got request:\n` +
safeJsonStringify(request),
);
this.name = 'MockExhaustedError';
}
}
export type FakeResponse =
| {
method: 'generateContent';
@@ -53,7 +63,9 @@ export class FakeContentGenerator implements ContentGenerator {
return this.sentRequests;
}
static async fromFile(filePath: string): Promise<FakeContentGenerator> {
static async fromFile(
filePath: string,
): Promise<FakeContentGenerator> {
const fileContent = await promises.readFile(filePath, 'utf-8');
const responses = fileContent
.split('\n')
@@ -67,13 +79,14 @@ export class FakeContentGenerator implements ContentGenerator {
M extends FakeResponse['method'],
R = Extract<FakeResponse, { method: M }>['response'],
>(method: M, request: unknown): R {
const response = this.responses[this.callCounter++];
const response = this.responses[this.callCounter];
if (!response) {
throw new Error(
`No more mock responses for ${method}, got request:\n` +
safeJsonStringify(request),
);
throw new MockExhaustedError(method, request);
}
// We only increment the counter if we actually consume a mock response
this.callCounter++;
if (response.method !== method) {
throw new Error(
`Unexpected response type, next response was for ${response.method} but expected ${method}`,
@@ -85,28 +98,29 @@ export class FakeContentGenerator implements ContentGenerator {
async generateContent(
request: GenerateContentParameters,
_userPromptId: string,
// eslint-disable-next-line @typescript-eslint/no-unused-vars
userPromptId: string,
role: LlmRole,
): Promise<GenerateContentResponse> {
this.sentRequests.push(request);
const next = this.getNextResponse('generateContent', request);
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
return Object.setPrototypeOf(
this.getNextResponse('generateContent', request),
GenerateContentResponse.prototype,
);
return Object.setPrototypeOf(next, GenerateContentResponse.prototype);
}
async generateContentStream(
request: GenerateContentParameters,
_userPromptId: string,
// eslint-disable-next-line @typescript-eslint/no-unused-vars
userPromptId: string,
role: LlmRole,
): Promise<AsyncGenerator<GenerateContentResponse>> {
this.sentRequests.push(request);
const responses = this.getNextResponse('generateContentStream', request);
async function* stream() {
for (const response of responses) {
// Add a tiny delay to ensure React has time to render the 'Responding'
// state. If the mock stream finishes synchronously, AppRig's
// awaitingResponse flag may never be cleared, causing the rig to hang.
await new Promise((resolve) => setTimeout(resolve, 5));
for (const response of responses!) {
yield Object.setPrototypeOf(
response,
GenerateContentResponse.prototype,
@@ -119,16 +133,15 @@ export class FakeContentGenerator implements ContentGenerator {
async countTokens(
request: CountTokensParameters,
): Promise<CountTokensResponse> {
return this.getNextResponse('countTokens', request);
const next = this.getNextResponse('countTokens', request);
return next;
}
async embedContent(
request: EmbedContentParameters,
): Promise<EmbedContentResponse> {
const next = this.getNextResponse('embedContent', request);
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
return Object.setPrototypeOf(
this.getNextResponse('embedContent', request),
EmbedContentResponse.prototype,
);
return Object.setPrototypeOf(next, EmbedContentResponse.prototype);
}
}

View File

@@ -0,0 +1,97 @@
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import type { ContentGenerator } from './contentGenerator.js';
import type { GenerateContentParameters, GenerateContentResponse, CountTokensParameters, CountTokensResponse, EmbedContentParameters, EmbedContentResponse } from '@google/genai';
import type { LlmRole } from '../telemetry/types.js';
import { debugLogger } from '../utils/debugLogger.js';
import { MockExhaustedError } from './fakeContentGenerator.js';
/**
* A ContentGenerator that attempts to use a primary generator,
* and falls back to a secondary generator if the primary throws MockExhaustedError.
*/
export class FallbackContentGenerator implements ContentGenerator {
get userTier() { return this.primary.userTier; }
get userTierName() { return this.primary.userTierName; }
get paidTier() { return this.primary.paidTier; }
constructor(
private readonly primary: ContentGenerator,
private readonly fallback: ContentGenerator,
private readonly onFallback?: (method: string) => void,
) {}
async generateContent(
request: GenerateContentParameters,
userPromptId: string,
role: LlmRole,
): Promise<GenerateContentResponse> {
try {
return await this.primary.generateContent(request, userPromptId, role);
} catch (error) {
if (error instanceof MockExhaustedError) {
debugLogger.log(`[FallbackContentGenerator] Exhausted primary generator for generateContent. Falling back.`);
this.onFallback?.('generateContent');
return this.fallback.generateContent(request, userPromptId, role);
}
throw error;
}
}
async generateContentStream(
request: GenerateContentParameters,
userPromptId: string,
role: LlmRole,
): Promise<AsyncGenerator<GenerateContentResponse>> {
try {
return await this.primary.generateContentStream(request, userPromptId, role);
} catch (error) {
if (error instanceof MockExhaustedError) {
debugLogger.log(`[FallbackContentGenerator] Exhausted primary generator for generateContentStream. Falling back.`);
this.onFallback?.('generateContentStream');
return this.fallback.generateContentStream(request, userPromptId, role);
}
throw error;
}
}
async countTokens(
request: CountTokensParameters,
): Promise<CountTokensResponse> {
try {
if (!this.primary.countTokens) {
throw new MockExhaustedError('countTokens');
}
return await this.primary.countTokens(request);
} catch (error) {
if (error instanceof MockExhaustedError && this.fallback.countTokens) {
debugLogger.log(`[FallbackContentGenerator] Exhausted primary generator for countTokens. Falling back.`);
this.onFallback?.('countTokens');
return this.fallback.countTokens(request);
}
throw error;
}
}
async embedContent(
request: EmbedContentParameters,
): Promise<EmbedContentResponse> {
try {
if (!this.primary.embedContent) {
throw new MockExhaustedError('embedContent');
}
return await this.primary.embedContent(request);
} catch (error) {
if (error instanceof MockExhaustedError && this.fallback.embedContent) {
debugLogger.log(`[FallbackContentGenerator] Exhausted primary generator for embedContent. Falling back.`);
this.onFallback?.('embedContent');
return this.fallback.embedContent(request);
}
throw error;
}
}
}

View File

@@ -0,0 +1,57 @@
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import type { GenerateContentResponse } from '@google/genai';
import type { FakeResponse } from './fakeContentGenerator.js';
export type FakeRequest = { method: 'userText'; text: string };
export type ScriptItem = FakeResponse | FakeRequest;
export function mockGenerateContentStreamText(text: string): FakeResponse {
return {
method: 'generateContentStream',
response: [
{
candidates: [{ content: { parts: [{ text }] }, finishReason: 'STOP' }],
},
] as GenerateContentResponse[],
};
}
export function mockGenerateContentText(text: string): FakeResponse {
return {
method: 'generateContent',
response: {
candidates: [{ content: { parts: [{ text }] }, finishReason: 'STOP' }],
} as GenerateContentResponse,
};
}
export function userText(text: string): FakeRequest {
return { method: 'userText', text };
}
export function isFakeResponse(item: ScriptItem): item is FakeResponse {
return item.method !== 'userText';
}
export function isFakeRequest(item: ScriptItem): item is FakeRequest {
return item.method === 'userText';
}
/**
* Extracts all FakeRequests from a script array and maps them to their string text.
*/
export function extractUserPrompts(script: ScriptItem[]): string[] {
return script.filter(isFakeRequest).map((req) => req.text);
}
/**
* Extracts all FakeResponses from a script array.
*/
export function extractFakeResponses(script: ScriptItem[]): FakeResponse[] {
return script.filter(isFakeResponse);
}

View File

@@ -35,6 +35,9 @@ export * from './commands/types.js';
export * from './core/baseLlmClient.js';
export * from './core/client.js';
export * from './core/contentGenerator.js';
export * from './core/fakeContentGenerator.js';
export * from './core/fallbackContentGenerator.js';
export * from './core/scriptUtils.js';
export * from './core/loggingContentGenerator.js';
export * from './core/geminiChat.js';
export * from './core/logger.js';