mirror of
https://github.com/google-gemini/gemini-cli.git
synced 2026-03-11 06:31:01 -07:00
refactor(core): Migrate next speaker check to use BaseLlmClient (#8424)
This commit is contained in:
@@ -295,6 +295,12 @@ describe('Gemini Client (client.ts)', () => {
|
||||
getProjectTempDir: vi.fn().mockReturnValue('/test/temp'),
|
||||
},
|
||||
getContentGenerator: vi.fn().mockReturnValue(mockContentGenerator),
|
||||
getBaseLlmClient: vi.fn().mockReturnValue({
|
||||
generateJson: vi.fn().mockResolvedValue({
|
||||
next_speaker: 'user',
|
||||
reasoning: 'test',
|
||||
}),
|
||||
}),
|
||||
} as unknown as Config;
|
||||
|
||||
client = new GeminiClient(mockConfig);
|
||||
|
||||
@@ -541,8 +541,9 @@ export class GeminiClient {
|
||||
|
||||
const nextSpeakerCheck = await checkNextSpeaker(
|
||||
this.getChat(),
|
||||
this,
|
||||
this.config.getBaseLlmClient(),
|
||||
signal,
|
||||
prompt_id,
|
||||
);
|
||||
logNextSpeakerCheck(
|
||||
this.config,
|
||||
|
||||
@@ -8,7 +8,8 @@ import type { Mock } from 'vitest';
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import type { Content } from '@google/genai';
|
||||
import { DEFAULT_GEMINI_FLASH_MODEL } from '../config/models.js';
|
||||
import { GeminiClient } from '../core/client.js';
|
||||
import { BaseLlmClient } from '../core/baseLlmClient.js';
|
||||
import type { ContentGenerator } from '../core/contentGenerator.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
import type { NextSpeakerResponse } from './nextSpeakerChecker.js';
|
||||
import { checkNextSpeaker } from './nextSpeakerChecker.js';
|
||||
@@ -41,14 +42,15 @@ vi.mock('node:fs', () => {
|
||||
});
|
||||
|
||||
// Mock GeminiClient and Config constructor
|
||||
vi.mock('../core/client.js');
|
||||
vi.mock('../core/baseLlmClient.js');
|
||||
vi.mock('../config/config.js');
|
||||
|
||||
describe('checkNextSpeaker', () => {
|
||||
let chatInstance: GeminiChat;
|
||||
let mockConfig: Config;
|
||||
let mockGeminiClient: GeminiClient;
|
||||
let mockBaseLlmClient: BaseLlmClient;
|
||||
const abortSignal = new AbortController().signal;
|
||||
const promptId = 'test-prompt-id';
|
||||
|
||||
beforeEach(() => {
|
||||
vi.resetAllMocks();
|
||||
@@ -61,7 +63,15 @@ describe('checkNextSpeaker', () => {
|
||||
},
|
||||
} as unknown as Config;
|
||||
|
||||
mockGeminiClient = new GeminiClient(mockConfig);
|
||||
mockBaseLlmClient = new BaseLlmClient(
|
||||
{
|
||||
generateContent: vi.fn(),
|
||||
generateContentStream: vi.fn(),
|
||||
countTokens: vi.fn(),
|
||||
embedContent: vi.fn(),
|
||||
} as ContentGenerator,
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
// GeminiChat will receive the mocked instances via the mocked GoogleGenAI constructor
|
||||
chatInstance = new GeminiChat(
|
||||
@@ -82,11 +92,12 @@ describe('checkNextSpeaker', () => {
|
||||
(chatInstance.getHistory as Mock).mockReturnValue([]);
|
||||
const result = await checkNextSpeaker(
|
||||
chatInstance,
|
||||
mockGeminiClient,
|
||||
mockBaseLlmClient,
|
||||
abortSignal,
|
||||
promptId,
|
||||
);
|
||||
expect(result).toBeNull();
|
||||
expect(mockGeminiClient.generateJson).not.toHaveBeenCalled();
|
||||
expect(mockBaseLlmClient.generateJson).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should return null if the last speaker was the user', async () => {
|
||||
@@ -95,11 +106,12 @@ describe('checkNextSpeaker', () => {
|
||||
]);
|
||||
const result = await checkNextSpeaker(
|
||||
chatInstance,
|
||||
mockGeminiClient,
|
||||
mockBaseLlmClient,
|
||||
abortSignal,
|
||||
promptId,
|
||||
);
|
||||
expect(result).toBeNull();
|
||||
expect(mockGeminiClient.generateJson).not.toHaveBeenCalled();
|
||||
expect(mockBaseLlmClient.generateJson).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("should return { next_speaker: 'model' } when model intends to continue", async () => {
|
||||
@@ -110,15 +122,16 @@ describe('checkNextSpeaker', () => {
|
||||
reasoning: 'Model stated it will do something.',
|
||||
next_speaker: 'model',
|
||||
};
|
||||
(mockGeminiClient.generateJson as Mock).mockResolvedValue(mockApiResponse);
|
||||
(mockBaseLlmClient.generateJson as Mock).mockResolvedValue(mockApiResponse);
|
||||
|
||||
const result = await checkNextSpeaker(
|
||||
chatInstance,
|
||||
mockGeminiClient,
|
||||
mockBaseLlmClient,
|
||||
abortSignal,
|
||||
promptId,
|
||||
);
|
||||
expect(result).toEqual(mockApiResponse);
|
||||
expect(mockGeminiClient.generateJson).toHaveBeenCalledTimes(1);
|
||||
expect(mockBaseLlmClient.generateJson).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it("should return { next_speaker: 'user' } when model asks a question", async () => {
|
||||
@@ -129,12 +142,13 @@ describe('checkNextSpeaker', () => {
|
||||
reasoning: 'Model asked a question.',
|
||||
next_speaker: 'user',
|
||||
};
|
||||
(mockGeminiClient.generateJson as Mock).mockResolvedValue(mockApiResponse);
|
||||
(mockBaseLlmClient.generateJson as Mock).mockResolvedValue(mockApiResponse);
|
||||
|
||||
const result = await checkNextSpeaker(
|
||||
chatInstance,
|
||||
mockGeminiClient,
|
||||
mockBaseLlmClient,
|
||||
abortSignal,
|
||||
promptId,
|
||||
);
|
||||
expect(result).toEqual(mockApiResponse);
|
||||
});
|
||||
@@ -147,87 +161,92 @@ describe('checkNextSpeaker', () => {
|
||||
reasoning: 'Model made a statement, awaiting user input.',
|
||||
next_speaker: 'user',
|
||||
};
|
||||
(mockGeminiClient.generateJson as Mock).mockResolvedValue(mockApiResponse);
|
||||
(mockBaseLlmClient.generateJson as Mock).mockResolvedValue(mockApiResponse);
|
||||
|
||||
const result = await checkNextSpeaker(
|
||||
chatInstance,
|
||||
mockGeminiClient,
|
||||
mockBaseLlmClient,
|
||||
abortSignal,
|
||||
promptId,
|
||||
);
|
||||
expect(result).toEqual(mockApiResponse);
|
||||
});
|
||||
|
||||
it('should return null if geminiClient.generateJson throws an error', async () => {
|
||||
it('should return null if baseLlmClient.generateJson throws an error', async () => {
|
||||
const consoleWarnSpy = vi
|
||||
.spyOn(console, 'warn')
|
||||
.mockImplementation(() => {});
|
||||
(chatInstance.getHistory as Mock).mockReturnValue([
|
||||
{ role: 'model', parts: [{ text: 'Some model output.' }] },
|
||||
] as Content[]);
|
||||
(mockGeminiClient.generateJson as Mock).mockRejectedValue(
|
||||
(mockBaseLlmClient.generateJson as Mock).mockRejectedValue(
|
||||
new Error('API Error'),
|
||||
);
|
||||
|
||||
const result = await checkNextSpeaker(
|
||||
chatInstance,
|
||||
mockGeminiClient,
|
||||
mockBaseLlmClient,
|
||||
abortSignal,
|
||||
promptId,
|
||||
);
|
||||
expect(result).toBeNull();
|
||||
consoleWarnSpy.mockRestore();
|
||||
});
|
||||
|
||||
it('should return null if geminiClient.generateJson returns invalid JSON (missing next_speaker)', async () => {
|
||||
it('should return null if baseLlmClient.generateJson returns invalid JSON (missing next_speaker)', async () => {
|
||||
(chatInstance.getHistory as Mock).mockReturnValue([
|
||||
{ role: 'model', parts: [{ text: 'Some model output.' }] },
|
||||
] as Content[]);
|
||||
(mockGeminiClient.generateJson as Mock).mockResolvedValue({
|
||||
(mockBaseLlmClient.generateJson as Mock).mockResolvedValue({
|
||||
reasoning: 'This is incomplete.',
|
||||
} as unknown as NextSpeakerResponse); // Type assertion to simulate invalid response
|
||||
|
||||
const result = await checkNextSpeaker(
|
||||
chatInstance,
|
||||
mockGeminiClient,
|
||||
mockBaseLlmClient,
|
||||
abortSignal,
|
||||
promptId,
|
||||
);
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it('should return null if geminiClient.generateJson returns a non-string next_speaker', async () => {
|
||||
it('should return null if baseLlmClient.generateJson returns a non-string next_speaker', async () => {
|
||||
(chatInstance.getHistory as Mock).mockReturnValue([
|
||||
{ role: 'model', parts: [{ text: 'Some model output.' }] },
|
||||
] as Content[]);
|
||||
(mockGeminiClient.generateJson as Mock).mockResolvedValue({
|
||||
(mockBaseLlmClient.generateJson as Mock).mockResolvedValue({
|
||||
reasoning: 'Model made a statement, awaiting user input.',
|
||||
next_speaker: 123, // Invalid type
|
||||
} as unknown as NextSpeakerResponse);
|
||||
|
||||
const result = await checkNextSpeaker(
|
||||
chatInstance,
|
||||
mockGeminiClient,
|
||||
mockBaseLlmClient,
|
||||
abortSignal,
|
||||
promptId,
|
||||
);
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it('should return null if geminiClient.generateJson returns an invalid next_speaker string value', async () => {
|
||||
it('should return null if baseLlmClient.generateJson returns an invalid next_speaker string value', async () => {
|
||||
(chatInstance.getHistory as Mock).mockReturnValue([
|
||||
{ role: 'model', parts: [{ text: 'Some model output.' }] },
|
||||
] as Content[]);
|
||||
(mockGeminiClient.generateJson as Mock).mockResolvedValue({
|
||||
(mockBaseLlmClient.generateJson as Mock).mockResolvedValue({
|
||||
reasoning: 'Model made a statement, awaiting user input.',
|
||||
next_speaker: 'neither', // Invalid enum value
|
||||
} as unknown as NextSpeakerResponse);
|
||||
|
||||
const result = await checkNextSpeaker(
|
||||
chatInstance,
|
||||
mockGeminiClient,
|
||||
mockBaseLlmClient,
|
||||
abortSignal,
|
||||
promptId,
|
||||
);
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it('should call generateJson with DEFAULT_GEMINI_FLASH_MODEL', async () => {
|
||||
it('should call generateJson with the correct parameters', async () => {
|
||||
(chatInstance.getHistory as Mock).mockReturnValue([
|
||||
{ role: 'model', parts: [{ text: 'Some model output.' }] },
|
||||
] as Content[]);
|
||||
@@ -235,13 +254,19 @@ describe('checkNextSpeaker', () => {
|
||||
reasoning: 'Model made a statement, awaiting user input.',
|
||||
next_speaker: 'user',
|
||||
};
|
||||
(mockGeminiClient.generateJson as Mock).mockResolvedValue(mockApiResponse);
|
||||
(mockBaseLlmClient.generateJson as Mock).mockResolvedValue(mockApiResponse);
|
||||
|
||||
await checkNextSpeaker(chatInstance, mockGeminiClient, abortSignal);
|
||||
await checkNextSpeaker(
|
||||
chatInstance,
|
||||
mockBaseLlmClient,
|
||||
abortSignal,
|
||||
promptId,
|
||||
);
|
||||
|
||||
expect(mockGeminiClient.generateJson).toHaveBeenCalled();
|
||||
const generateJsonCall = (mockGeminiClient.generateJson as Mock).mock
|
||||
expect(mockBaseLlmClient.generateJson).toHaveBeenCalled();
|
||||
const generateJsonCall = (mockBaseLlmClient.generateJson as Mock).mock
|
||||
.calls[0];
|
||||
expect(generateJsonCall[3]).toBe(DEFAULT_GEMINI_FLASH_MODEL);
|
||||
expect(generateJsonCall[0].model).toBe(DEFAULT_GEMINI_FLASH_MODEL);
|
||||
expect(generateJsonCall[0].promptId).toBe(promptId);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
import type { Content } from '@google/genai';
|
||||
import { DEFAULT_GEMINI_FLASH_MODEL } from '../config/models.js';
|
||||
import type { GeminiClient } from '../core/client.js';
|
||||
import type { BaseLlmClient } from '../core/baseLlmClient.js';
|
||||
import type { GeminiChat } from '../core/geminiChat.js';
|
||||
import { isFunctionResponse } from './messageInspectors.js';
|
||||
|
||||
@@ -41,8 +41,9 @@ export interface NextSpeakerResponse {
|
||||
|
||||
export async function checkNextSpeaker(
|
||||
chat: GeminiChat,
|
||||
geminiClient: GeminiClient,
|
||||
baseLlmClient: BaseLlmClient,
|
||||
abortSignal: AbortSignal,
|
||||
promptId: string,
|
||||
): Promise<NextSpeakerResponse | null> {
|
||||
// We need to capture the curated history because there are many moments when the model will return invalid turns
|
||||
// that when passed back up to the endpoint will break subsequent calls. An example of this is when the model decides
|
||||
@@ -108,12 +109,13 @@ export async function checkNextSpeaker(
|
||||
];
|
||||
|
||||
try {
|
||||
const parsedResponse = (await geminiClient.generateJson(
|
||||
const parsedResponse = (await baseLlmClient.generateJson({
|
||||
contents,
|
||||
RESPONSE_SCHEMA,
|
||||
schema: RESPONSE_SCHEMA,
|
||||
model: DEFAULT_GEMINI_FLASH_MODEL,
|
||||
abortSignal,
|
||||
DEFAULT_GEMINI_FLASH_MODEL,
|
||||
)) as unknown as NextSpeakerResponse;
|
||||
promptId,
|
||||
})) as unknown as NextSpeakerResponse;
|
||||
|
||||
if (
|
||||
parsedResponse &&
|
||||
|
||||
Reference in New Issue
Block a user