feat(cli): polish cached token stats and simplify stats display when quota is present. (#14961)

This commit is contained in:
Jacob Richman
2025-12-11 15:17:14 -08:00
committed by GitHub
parent d818fb1d91
commit 54de67536d
7 changed files with 201 additions and 131 deletions

View File

@@ -116,7 +116,7 @@ describe('<ModelStatsDisplay />', () => {
});
const output = lastFrame();
expect(output).not.toContain('Cached');
expect(output).not.toContain('Cache Reads');
expect(output).not.toContain('Thoughts');
expect(output).not.toContain('Tool');
expect(output).toMatchSnapshot();
@@ -168,7 +168,7 @@ describe('<ModelStatsDisplay />', () => {
});
const output = lastFrame();
expect(output).toContain('Cached');
expect(output).toContain('Cache Reads');
expect(output).toContain('Thoughts');
expect(output).toContain('Tool');
expect(output).toMatchSnapshot();

View File

@@ -160,24 +160,28 @@ export const ModelStatsDisplay: React.FC = () => {
<StatRow
title="Total"
values={getModelValues((m) => (
<Text color={theme.status.warning}>
<Text color={theme.text.secondary}>
{m.tokens.total.toLocaleString()}
</Text>
))}
/>
<StatRow
title="Prompt"
title="Input"
isSubtle
values={getModelValues((m) => m.tokens.prompt.toLocaleString())}
values={getModelValues((m) => (
<Text color={theme.text.primary}>
{Math.max(0, m.tokens.prompt - m.tokens.cached).toLocaleString()}
</Text>
))}
/>
{hasCached && (
<StatRow
title="Cached"
title="Cache Reads"
isSubtle
values={getModelValues((m) => {
const cacheHitRate = calculateCacheHitRate(m);
return (
<Text color={theme.status.success}>
<Text color={theme.text.secondary}>
{m.tokens.cached.toLocaleString()} ({cacheHitRate.toFixed(1)}%)
</Text>
);
@@ -188,20 +192,32 @@ export const ModelStatsDisplay: React.FC = () => {
<StatRow
title="Thoughts"
isSubtle
values={getModelValues((m) => m.tokens.thoughts.toLocaleString())}
values={getModelValues((m) => (
<Text color={theme.text.primary}>
{m.tokens.thoughts.toLocaleString()}
</Text>
))}
/>
)}
{hasTool && (
<StatRow
title="Tool"
isSubtle
values={getModelValues((m) => m.tokens.tool.toLocaleString())}
values={getModelValues((m) => (
<Text color={theme.text.primary}>
{m.tokens.tool.toLocaleString()}
</Text>
))}
/>
)}
<StatRow
title="Output"
isSubtle
values={getModelValues((m) => m.tokens.candidates.toLocaleString())}
values={getModelValues((m) => (
<Text color={theme.text.primary}>
{m.tokens.candidates.toLocaleString()}
</Text>
))}
/>
</Box>
);

View File

@@ -76,7 +76,6 @@ describe('<StatsDisplay />', () => {
expect(output).toContain('Performance');
expect(output).toContain('Interaction Summary');
expect(output).not.toContain('Efficiency & Optimizations');
expect(output).toMatchSnapshot();
});
@@ -113,8 +112,8 @@ describe('<StatsDisplay />', () => {
expect(output).toContain('gemini-2.5-pro');
expect(output).toContain('gemini-2.5-flash');
expect(output).toContain('1,000');
expect(output).toContain('25,000');
expect(output).toContain('15,000');
expect(output).toContain('10,000');
expect(output).toMatchSnapshot();
});
@@ -167,7 +166,6 @@ describe('<StatsDisplay />', () => {
expect(output).toContain('Performance');
expect(output).toContain('Interaction Summary');
expect(output).toContain('User Agreement');
expect(output).toContain('Savings Highlight');
expect(output).toContain('gemini-2.5-pro');
expect(output).toMatchSnapshot();
});
@@ -232,7 +230,6 @@ describe('<StatsDisplay />', () => {
const { lastFrame } = renderWithMockedStats(metrics);
const output = lastFrame();
expect(output).not.toContain('Efficiency & Optimizations');
expect(output).toMatchSnapshot();
});
});
@@ -442,7 +439,7 @@ describe('<StatsDisplay />', () => {
);
const output = lastFrame();
expect(output).toContain('Usage limit remaining');
expect(output).toContain('Usage left');
expect(output).toContain('75.0%');
expect(output).toContain('(Resets in 1h 30m)');
expect(output).toMatchSnapshot();

View File

@@ -17,6 +17,8 @@ import {
TOOL_SUCCESS_RATE_MEDIUM,
USER_AGREEMENT_RATE_HIGH,
USER_AGREEMENT_RATE_MEDIUM,
CACHE_EFFICIENCY_HIGH,
CACHE_EFFICIENCY_MEDIUM,
} from '../utils/displayUtils.js';
import { computeSessionStats } from '../utils/computeStats.js';
import {
@@ -36,8 +38,7 @@ const StatRow: React.FC<StatRowProps> = ({ title, children }) => (
<Box width={28}>
<Text color={theme.text.link}>{title}</Text>
</Box>
{/* FIX: Wrap children in a Box that can grow to fill remaining space */}
<Box flexGrow={1}>{children}</Box>
{children}
</Box>
);
@@ -53,8 +54,7 @@ const SubStatRow: React.FC<SubStatRowProps> = ({ title, children }) => (
<Box width={26}>
<Text color={theme.text.secondary}>» {title}</Text>
</Box>
{/* FIX: Apply the same flexGrow fix here */}
<Box flexGrow={1}>{children}</Box>
{children}
</Box>
);
@@ -84,11 +84,16 @@ const buildModelRows = (
// 1. Models with active usage
const activeRows = Object.entries(models).map(([name, metrics]) => {
const modelName = getBaseModelName(name);
const cachedTokens = metrics.tokens.cached;
const totalInputTokens = metrics.tokens.prompt;
const uncachedTokens = Math.max(0, totalInputTokens - cachedTokens);
return {
key: name,
modelName,
requests: metrics.api.totalRequests,
inputTokens: metrics.tokens.prompt.toLocaleString(),
cachedTokens: cachedTokens.toLocaleString(),
uncachedTokens: uncachedTokens.toLocaleString(),
totalInputTokens: totalInputTokens.toLocaleString(),
outputTokens: metrics.tokens.candidates.toLocaleString(),
bucket: quotas?.buckets?.find((b) => b.modelId === modelName),
isActive: true,
@@ -108,7 +113,9 @@ const buildModelRows = (
key: bucket.modelId!,
modelName: bucket.modelId!,
requests: '-',
inputTokens: '-',
cachedTokens: '-',
uncachedTokens: '-',
totalInputTokens: '-',
outputTokens: '-',
bucket,
isActive: false,
@@ -143,50 +150,91 @@ const formatResetTime = (resetTime: string): string => {
const ModelUsageTable: React.FC<{
models: Record<string, ModelMetrics>;
totalCachedTokens: number;
cacheEfficiency: number;
quotas?: RetrieveUserQuotaResponse;
}> = ({ models, totalCachedTokens, cacheEfficiency, quotas }) => {
cacheEfficiency: number;
totalCachedTokens: number;
}> = ({ models, quotas, cacheEfficiency, totalCachedTokens }) => {
const rows = buildModelRows(models, quotas);
if (rows.length === 0) {
return null;
}
const showQuotaColumn = !!quotas && rows.some((row) => !!row.bucket);
const nameWidth = 25;
const requestsWidth = 8;
const inputTokensWidth = 15;
const requestsWidth = 7;
const uncachedWidth = 15;
const cachedWidth = 14;
const outputTokensWidth = 15;
const usageLimitWidth = quotas ? 30 : 0;
const usageLimitWidth = showQuotaColumn ? 28 : 0;
const cacheEfficiencyColor = getStatusColor(cacheEfficiency, {
green: CACHE_EFFICIENCY_HIGH,
yellow: CACHE_EFFICIENCY_MEDIUM,
});
return (
<Box flexDirection="column" marginTop={1}>
{/* Header */}
<Box>
<Box width={nameWidth}>
<Text bold color={theme.text.primary}>
<Box alignItems="flex-end">
<Box width={nameWidth} flexGrow={1}>
<Text bold color={theme.text.primary} wrap="truncate-end">
Model Usage
</Text>
</Box>
<Box width={requestsWidth} justifyContent="flex-end">
<Box
width={requestsWidth}
flexDirection="column"
alignItems="flex-end"
flexShrink={0}
>
<Text bold color={theme.text.primary}>
Reqs
</Text>
</Box>
<Box width={inputTokensWidth} justifyContent="flex-end">
<Text bold color={theme.text.primary}>
Input Tokens
</Text>
</Box>
<Box width={outputTokensWidth} justifyContent="flex-end">
<Text bold color={theme.text.primary}>
Output Tokens
</Text>
</Box>
{quotas && (
<Box width={usageLimitWidth} justifyContent="flex-end">
{!showQuotaColumn && (
<>
<Box
width={uncachedWidth}
flexDirection="column"
alignItems="flex-end"
flexShrink={0}
>
<Text bold color={theme.text.primary}>
Input Tokens
</Text>
</Box>
<Box
width={cachedWidth}
flexDirection="column"
alignItems="flex-end"
flexShrink={0}
>
<Text bold color={theme.text.primary}>
Cache Reads
</Text>
</Box>
<Box
width={outputTokensWidth}
flexDirection="column"
alignItems="flex-end"
flexShrink={0}
>
<Text bold color={theme.text.primary}>
Output Tokens
</Text>
</Box>
</>
)}
{showQuotaColumn && (
<Box
width={usageLimitWidth}
flexDirection="column"
alignItems="flex-end"
>
<Text bold color={theme.text.primary}>
Usage limit remaining
Usage left
</Text>
</Box>
)}
@@ -200,46 +248,77 @@ const ModelUsageTable: React.FC<{
borderLeft={false}
borderRight={false}
borderColor={theme.border.default}
width={
nameWidth +
requestsWidth +
inputTokensWidth +
outputTokensWidth +
usageLimitWidth
}
width="100%"
></Box>
{rows.map((row) => (
<Box key={row.key}>
<Box width={nameWidth}>
<Text color={theme.text.primary}>{row.modelName}</Text>
<Box width={nameWidth} flexGrow={1}>
<Text color={theme.text.primary} wrap="truncate-end">
{row.modelName}
</Text>
</Box>
<Box width={requestsWidth} justifyContent="flex-end">
<Box
width={requestsWidth}
flexDirection="column"
alignItems="flex-end"
flexShrink={0}
>
<Text
color={row.isActive ? theme.text.primary : theme.text.secondary}
>
{row.requests}
</Text>
</Box>
<Box width={inputTokensWidth} justifyContent="flex-end">
<Text
color={row.isActive ? theme.status.warning : theme.text.secondary}
>
{row.inputTokens}
</Text>
</Box>
<Box width={outputTokensWidth} justifyContent="flex-end">
<Text
color={row.isActive ? theme.status.warning : theme.text.secondary}
>
{row.outputTokens}
</Text>
</Box>
<Box width={usageLimitWidth} justifyContent="flex-end">
{!showQuotaColumn && (
<>
<Box
width={uncachedWidth}
flexDirection="column"
alignItems="flex-end"
flexShrink={0}
>
<Text
color={
row.isActive ? theme.text.primary : theme.text.secondary
}
>
{row.uncachedTokens}
</Text>
</Box>
<Box
width={cachedWidth}
flexDirection="column"
alignItems="flex-end"
flexShrink={0}
>
<Text color={theme.text.secondary}>{row.cachedTokens}</Text>
</Box>
<Box
width={outputTokensWidth}
flexDirection="column"
alignItems="flex-end"
flexShrink={0}
>
<Text
color={
row.isActive ? theme.text.primary : theme.text.secondary
}
>
{row.outputTokens}
</Text>
</Box>
</>
)}
<Box
width={usageLimitWidth}
flexDirection="column"
alignItems="flex-end"
>
{row.bucket &&
row.bucket.remainingFraction != null &&
row.bucket.resetTime && (
<Text color={theme.text.secondary}>
<Text color={theme.text.secondary} wrap="truncate-end">
{(row.bucket.remainingFraction * 100).toFixed(1)}%{' '}
{formatResetTime(row.bucket.resetTime)}
</Text>
@@ -248,17 +327,20 @@ const ModelUsageTable: React.FC<{
</Box>
))}
{cacheEfficiency > 0 && (
{cacheEfficiency > 0 && !showQuotaColumn && (
<Box flexDirection="column" marginTop={1}>
<Text color={theme.text.primary}>
<Text color={theme.status.success}>Savings Highlight:</Text>{' '}
{totalCachedTokens.toLocaleString()} ({cacheEfficiency.toFixed(1)}
%) of input tokens were served from the cache, reducing costs.
{totalCachedTokens.toLocaleString()} (
<Text color={cacheEfficiencyColor}>
{cacheEfficiency.toFixed(1)}%
</Text>
) of input tokens were served from the cache, reducing costs.
</Text>
</Box>
)}
{models && (
{showQuotaColumn && (
<>
<Box marginTop={1} marginBottom={2}>
<Text color={theme.text.primary}>
@@ -322,6 +404,7 @@ export const StatsDisplay: React.FC<StatsDisplayProps> = ({
flexDirection="column"
paddingY={1}
paddingX={2}
overflow="hidden"
>
{renderTitle()}
<Box height={1} />
@@ -393,9 +476,9 @@ export const StatsDisplay: React.FC<StatsDisplayProps> = ({
</Section>
<ModelUsageTable
models={models}
totalCachedTokens={computed.totalCachedTokens}
cacheEfficiency={computed.cacheEfficiency}
quotas={quotas}
cacheEfficiency={computed.cacheEfficiency}
totalCachedTokens={computed.totalCachedTokens}
/>
</Box>
);

View File

@@ -14,8 +14,8 @@ exports[`<ModelStatsDisplay /> > should display a single model correctly 1`] = `
│ │
│ Tokens │
│ Total 30 │
│ ↳ Prompt 10
│ ↳ Cached 5 (50.0%) │
│ ↳ Input 5
│ ↳ Cache Reads 5 (50.0%) │
│ ↳ Thoughts 2 │
│ ↳ Tool 1 │
│ ↳ Output 20 │
@@ -37,8 +37,8 @@ exports[`<ModelStatsDisplay /> > should display conditional rows if at least one
│ │
│ Tokens │
│ Total 30 15 │
│ ↳ Prompt 10 5 │
│ ↳ Cached 5 (50.0%) 0 (0.0%) │
│ ↳ Input 5 5 │
│ ↳ Cache Reads 5 (50.0%) 0 (0.0%) │
│ ↳ Thoughts 2 0 │
│ ↳ Tool 0 3 │
│ ↳ Output 20 10 │
@@ -60,8 +60,8 @@ exports[`<ModelStatsDisplay /> > should display stats for multiple models correc
│ │
│ Tokens │
│ Total 300 600 │
│ ↳ Prompt 100 200 │
│ ↳ Cached 50 (50.0%) 100 (50.0%) │
│ ↳ Input 50 100 │
│ ↳ Cache Reads 50 (50.0%) 100 (50.0%) │
│ ↳ Thoughts 10 20 │
│ ↳ Tool 5 10 │
│ ↳ Output 200 400 │
@@ -83,8 +83,8 @@ exports[`<ModelStatsDisplay /> > should handle large values without wrapping or
│ │
│ Tokens │
│ Total 999,999,999 │
│ ↳ Prompt 987,654,321
│ ↳ Cached 123,456,789 (12.5%) │
│ ↳ Input 864,197,532 │
│ ↳ Cache Reads 123,456,789 (12.5%) │
│ ↳ Thoughts 111,111,111 │
│ ↳ Tool 222,222,222 │
│ ↳ Output 123,456,789 │
@@ -106,7 +106,7 @@ exports[`<ModelStatsDisplay /> > should not display conditional rows if no model
│ │
│ Tokens │
│ Total 30 │
│ ↳ Prompt 10 │
│ ↳ Input 10 │
│ ↳ Output 20 │
│ │
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯"

View File

@@ -18,17 +18,11 @@ exports[`<SessionSummaryDisplay /> > renders the summary display with a title 1`
│ » Tool Time: 0s (0.0%) │
│ │
│ │
│ Model Usage Reqs Input Tokens Output Tokens
│ ───────────────────────────────────────────────────────────────
│ gemini-2.5-pro 10 1,000 2,000
│ Model Usage Reqs Input Tokens Cache Reads Output Tokens │
│ ──────────────────────────────────────────────────────────────────────────────────────────────
│ gemini-2.5-pro 10 500 500 2,000
│ │
│ Savings Highlight: 500 (50.0%) of input tokens were served from the cache, reducing costs. │
│ │
│ Usage limits span all sessions and reset daily. │
│ /auth to upgrade or switch to API key. │
│ │
│ │
│ » Tip: For a full token breakdown, run \`/stats model\`. │
│ │
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯"
`;

View File

@@ -118,15 +118,9 @@ exports[`<StatsDisplay /> > Conditional Rendering Tests > hides Efficiency secti
│ » Tool Time: 0s (0.0%) │
│ │
│ │
│ Model Usage Reqs Input Tokens Output Tokens
│ ───────────────────────────────────────────────────────────────
│ gemini-2.5-pro 1 100 100
│ │
│ Usage limits span all sessions and reset daily. │
│ /auth to upgrade or switch to API key. │
│ │
│ │
│ » Tip: For a full token breakdown, run \`/stats model\`. │
│ Model Usage Reqs Input Tokens Cache Reads Output Tokens │
│ ──────────────────────────────────────────────────────────────────────────────────────────────
│ gemini-2.5-pro 1 100 0 100
│ │
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯"
`;
@@ -168,9 +162,9 @@ exports[`<StatsDisplay /> > Quota Display > renders quota information for unused
│ » Tool Time: 0s (0.0%) │
│ │
│ │
│ Model Usage Reqs Input Tokens Output Tokens Usage limit remaining
│ ─────────────────────────────────────────────────────────────────────────────────────────────
│ gemini-2.5-flash - - - 50.0% (Resets in 2h)
│ Model Usage Reqs Usage left
│ ─────────────────────────────────────────────────────────────────────────────────────────────
│ gemini-2.5-flash - 50.0% (Resets in 2h) │
│ │
│ Usage limits span all sessions and reset daily. │
│ /auth to upgrade or switch to API key. │
@@ -198,11 +192,9 @@ exports[`<StatsDisplay /> > Quota Display > renders quota information when quota
│ » Tool Time: 0s (0.0%) │
│ │
│ │
│ Model Usage Reqs Input Tokens Output Tokens Usage limit remaining
│ ─────────────────────────────────────────────────────────────────────────────────────────────
│ gemini-2.5-pro 1 100 100 75.0% (Resets in 1h 30m)
│ │
│ Savings Highlight: 50 (50.0%) of input tokens were served from the cache, reducing costs. │
│ Model Usage Reqs Usage left
│ ─────────────────────────────────────────────────────────────────────────────────────────────
│ gemini-2.5-pro 1 75.0% (Resets in 1h 30m) │
│ │
│ Usage limits span all sessions and reset daily. │
│ /auth to upgrade or switch to API key. │
@@ -270,19 +262,13 @@ exports[`<StatsDisplay /> > renders a table with two models correctly 1`] = `
│ » Tool Time: 0s (0.0%) │
│ │
│ │
│ Model Usage Reqs Input Tokens Output Tokens
│ ───────────────────────────────────────────────────────────────
│ gemini-2.5-pro 3 1,000 2,000
│ gemini-2.5-flash 5 25,000 15,000
│ Model Usage Reqs Input Tokens Cache Reads Output Tokens │
│ ──────────────────────────────────────────────────────────────────────────────────────────────
│ gemini-2.5-pro 3 500 500 2,000
│ gemini-2.5-flash 5 15,000 10,000 15,000
│ │
│ Savings Highlight: 10,500 (40.4%) of input tokens were served from the cache, reducing costs. │
│ │
│ Usage limits span all sessions and reset daily. │
│ /auth to upgrade or switch to API key. │
│ │
│ │
│ » Tip: For a full token breakdown, run \`/stats model\`. │
│ │
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯"
`;
@@ -304,18 +290,12 @@ exports[`<StatsDisplay /> > renders all sections when all data is present 1`] =
│ » Tool Time: 123ms (55.2%) │
│ │
│ │
│ Model Usage Reqs Input Tokens Output Tokens
│ ───────────────────────────────────────────────────────────────
│ gemini-2.5-pro 1 100 100
│ Model Usage Reqs Input Tokens Cache Reads Output Tokens │
│ ──────────────────────────────────────────────────────────────────────────────────────────────
│ gemini-2.5-pro 1 50 50 100
│ │
│ Savings Highlight: 50 (50.0%) of input tokens were served from the cache, reducing costs. │
│ │
│ Usage limits span all sessions and reset daily. │
│ /auth to upgrade or switch to API key. │
│ │
│ │
│ » Tip: For a full token breakdown, run \`/stats model\`. │
│ │
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯"
`;