Show raw input token counts in json output. (#15021)

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
This commit is contained in:
Jacob Richman
2025-12-15 18:47:39 -08:00
committed by GitHub
parent bb0c0d8ee3
commit 79f664d593
17 changed files with 189 additions and 129 deletions
@@ -173,6 +173,7 @@ describe('UiTelemetryService', () => {
totalLatencyMs: 500,
},
tokens: {
input: 5,
prompt: 10,
candidates: 20,
total: 30,
@@ -227,6 +228,7 @@ describe('UiTelemetryService', () => {
totalLatencyMs: 1100,
},
tokens: {
input: 10,
prompt: 25,
candidates: 45,
total: 70,
@@ -301,6 +303,7 @@ describe('UiTelemetryService', () => {
totalLatencyMs: 300,
},
tokens: {
input: 0,
prompt: 0,
candidates: 0,
total: 0,
@@ -345,6 +348,7 @@ describe('UiTelemetryService', () => {
totalLatencyMs: 800,
},
tokens: {
input: 5,
prompt: 10,
candidates: 20,
total: 30,
@@ -43,6 +43,7 @@ export interface ModelMetrics {
totalLatencyMs: number;
};
tokens: {
input: number;
prompt: number;
candidates: number;
total: number;
@@ -80,6 +81,7 @@ const createInitialModelMetrics = (): ModelMetrics => ({
totalLatencyMs: 0,
},
tokens: {
input: 0,
prompt: 0,
candidates: 0,
total: 0,
@@ -171,6 +173,10 @@ export class UiTelemetryService extends EventEmitter {
modelMetrics.tokens.cached += event.usage.cached_content_token_count;
modelMetrics.tokens.thoughts += event.usage.thoughts_token_count;
modelMetrics.tokens.tool += event.usage.tool_token_count;
modelMetrics.tokens.input = Math.max(
0,
modelMetrics.tokens.prompt - modelMetrics.tokens.cached,
);
}
private processApiError(event: ApiErrorEvent) {