Show raw input token counts in json output. (#15021)

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
This commit is contained in:
Jacob Richman
2025-12-15 18:47:39 -08:00
committed by GitHub
parent bb0c0d8ee3
commit 79f664d593
17 changed files with 189 additions and 129 deletions

View File

@@ -21,6 +21,7 @@ describe('calculateErrorRate', () => {
const metrics: ModelMetrics = {
api: { totalRequests: 0, totalErrors: 0, totalLatencyMs: 0 },
tokens: {
input: 0,
prompt: 0,
candidates: 0,
total: 0,
@@ -36,6 +37,7 @@ describe('calculateErrorRate', () => {
const metrics: ModelMetrics = {
api: { totalRequests: 10, totalErrors: 2, totalLatencyMs: 0 },
tokens: {
input: 0,
prompt: 0,
candidates: 0,
total: 0,
@@ -53,6 +55,7 @@ describe('calculateAverageLatency', () => {
const metrics: ModelMetrics = {
api: { totalRequests: 0, totalErrors: 0, totalLatencyMs: 1000 },
tokens: {
input: 0,
prompt: 0,
candidates: 0,
total: 0,
@@ -68,6 +71,7 @@ describe('calculateAverageLatency', () => {
const metrics: ModelMetrics = {
api: { totalRequests: 10, totalErrors: 0, totalLatencyMs: 1500 },
tokens: {
input: 0,
prompt: 0,
candidates: 0,
total: 0,
@@ -85,6 +89,7 @@ describe('calculateCacheHitRate', () => {
const metrics: ModelMetrics = {
api: { totalRequests: 0, totalErrors: 0, totalLatencyMs: 0 },
tokens: {
input: 0,
prompt: 0,
candidates: 0,
total: 0,
@@ -100,6 +105,7 @@ describe('calculateCacheHitRate', () => {
const metrics: ModelMetrics = {
api: { totalRequests: 0, totalErrors: 0, totalLatencyMs: 0 },
tokens: {
input: 150,
prompt: 200,
candidates: 0,
total: 0,
@@ -143,6 +149,7 @@ describe('computeSessionStats', () => {
successRate: 0,
agreementRate: 0,
totalPromptTokens: 0,
totalInputTokens: 0,
totalCachedTokens: 0,
totalLinesAdded: 0,
totalLinesRemoved: 0,
@@ -155,6 +162,7 @@ describe('computeSessionStats', () => {
'gemini-pro': {
api: { totalRequests: 1, totalErrors: 0, totalLatencyMs: 750 },
tokens: {
input: 10,
prompt: 10,
candidates: 10,
total: 20,
@@ -193,6 +201,7 @@ describe('computeSessionStats', () => {
'gemini-pro': {
api: { totalRequests: 2, totalErrors: 0, totalLatencyMs: 1000 },
tokens: {
input: 100,
prompt: 150,
candidates: 10,
total: 160,

View File

@@ -50,6 +50,10 @@ export const computeSessionStats = (
(acc, model) => acc + model.tokens.cached,
0,
);
const totalInputTokens = Object.values(models).reduce(
(acc, model) => acc + model.tokens.input,
0,
);
const totalPromptTokens = Object.values(models).reduce(
(acc, model) => acc + model.tokens.prompt,
0,
@@ -82,6 +86,7 @@ export const computeSessionStats = (
successRate,
agreementRate,
totalCachedTokens,
totalInputTokens,
totalPromptTokens,
totalLinesAdded: files.totalLinesAdded,
totalLinesRemoved: files.totalLinesRemoved,