refactor(core): decouple GeminiChat from uiTelemetryService via Usage events (#12196)

This commit is contained in:
Sandy Tao
2025-10-30 14:05:38 -07:00
committed by GitHub
parent 68afb7200e
commit 322feaafa6
6 changed files with 88 additions and 31 deletions

View File

@@ -11,14 +11,12 @@ import {
} from './chatCompressionService.js';
import type { Content, GenerateContentResponse } from '@google/genai';
import { CompressionStatus } from '../core/turn.js';
import { uiTelemetryService } from '../telemetry/uiTelemetry.js';
import { tokenLimit } from '../core/tokenLimits.js';
import type { GeminiChat } from '../core/geminiChat.js';
import type { Config } from '../config/config.js';
import { getInitialChatHistory } from '../utils/environmentContext.js';
import type { ContentGenerator } from '../core/contentGenerator.js';
vi.mock('../telemetry/uiTelemetry.js');
vi.mock('../core/tokenLimits.js');
vi.mock('../telemetry/loggers.js');
vi.mock('../utils/environmentContext.js');
@@ -114,6 +112,7 @@ describe('ChatCompressionService', () => {
service = new ChatCompressionService();
mockChat = {
getHistory: vi.fn(),
getLastPromptTokenCount: vi.fn().mockReturnValue(500),
} as unknown as GeminiChat;
mockConfig = {
getChatCompression: vi.fn(),
@@ -121,7 +120,6 @@ describe('ChatCompressionService', () => {
} as unknown as Config;
vi.mocked(tokenLimit).mockReturnValue(1000);
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(500);
vi.mocked(getInitialChatHistory).mockImplementation(
async (_config, extraHistory) => extraHistory || [],
);
@@ -165,7 +163,7 @@ describe('ChatCompressionService', () => {
vi.mocked(mockChat.getHistory).mockReturnValue([
{ role: 'user', parts: [{ text: 'hi' }] },
]);
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(600);
vi.mocked(mockChat.getLastPromptTokenCount).mockReturnValue(600);
vi.mocked(tokenLimit).mockReturnValue(1000);
// Threshold is 0.7 * 1000 = 700. 600 < 700, so NOOP.
@@ -189,7 +187,7 @@ describe('ChatCompressionService', () => {
{ role: 'model', parts: [{ text: 'msg4' }] },
];
vi.mocked(mockChat.getHistory).mockReturnValue(history);
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(800);
vi.mocked(mockChat.getLastPromptTokenCount).mockReturnValue(800);
vi.mocked(tokenLimit).mockReturnValue(1000);
const mockGenerateContent = vi.fn().mockResolvedValue({
candidates: [
@@ -227,7 +225,7 @@ describe('ChatCompressionService', () => {
{ role: 'model', parts: [{ text: 'msg4' }] },
];
vi.mocked(mockChat.getHistory).mockReturnValue(history);
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(100);
vi.mocked(mockChat.getLastPromptTokenCount).mockReturnValue(100);
vi.mocked(tokenLimit).mockReturnValue(1000);
const mockGenerateContent = vi.fn().mockResolvedValue({
@@ -262,7 +260,7 @@ describe('ChatCompressionService', () => {
{ role: 'model', parts: [{ text: 'msg2' }] },
];
vi.mocked(mockChat.getHistory).mockReturnValue(history);
vi.mocked(uiTelemetryService.getLastPromptTokenCount).mockReturnValue(10);
vi.mocked(mockChat.getLastPromptTokenCount).mockReturnValue(10);
vi.mocked(tokenLimit).mockReturnValue(1000);
const longSummary = 'a'.repeat(1000); // Long summary to inflate token count