diff --git a/packages/cli/src/ui/components/ModelStatsDisplay.test.tsx b/packages/cli/src/ui/components/ModelStatsDisplay.test.tsx
index 76e9d19edf..fed978ea25 100644
--- a/packages/cli/src/ui/components/ModelStatsDisplay.test.tsx
+++ b/packages/cli/src/ui/components/ModelStatsDisplay.test.tsx
@@ -11,7 +11,7 @@ import * as SessionContext from '../contexts/SessionContext.js';
import * as SettingsContext from '../contexts/SettingsContext.js';
import type { LoadedSettings } from '../../config/settings.js';
import type { SessionMetrics } from '../contexts/SessionContext.js';
-import { ToolCallDecision } from '@google/gemini-cli-core';
+import { ToolCallDecision, LlmRole } from '@google/gemini-cli-core';
// Mock the context to provide controlled data for testing
vi.mock('../contexts/SessionContext.js', async (importOriginal) => {
@@ -118,6 +118,7 @@ describe('', () => {
thoughts: 0,
tool: 0,
},
+ roles: {},
},
},
tools: {
@@ -160,6 +161,7 @@ describe('', () => {
thoughts: 2,
tool: 0,
},
+ roles: {},
},
'gemini-2.5-flash': {
api: { totalRequests: 1, totalErrors: 0, totalLatencyMs: 50 },
@@ -172,6 +174,7 @@ describe('', () => {
thoughts: 0,
tool: 3,
},
+ roles: {},
},
},
tools: {
@@ -214,6 +217,7 @@ describe('', () => {
thoughts: 10,
tool: 5,
},
+ roles: {},
},
'gemini-2.5-flash': {
api: { totalRequests: 20, totalErrors: 2, totalLatencyMs: 500 },
@@ -226,6 +230,7 @@ describe('', () => {
thoughts: 20,
tool: 10,
},
+ roles: {},
},
},
tools: {
@@ -271,6 +276,7 @@ describe('', () => {
thoughts: 111111111,
tool: 222222222,
},
+ roles: {},
},
},
tools: {
@@ -309,6 +315,7 @@ describe('', () => {
thoughts: 2,
tool: 1,
},
+ roles: {},
},
},
tools: {
@@ -351,6 +358,7 @@ describe('', () => {
thoughts: 100,
tool: 50,
},
+ roles: {},
},
'gemini-3-flash-preview': {
api: { totalRequests: 20, totalErrors: 0, totalLatencyMs: 1000 },
@@ -363,6 +371,7 @@ describe('', () => {
thoughts: 200,
tool: 100,
},
+ roles: {},
},
},
tools: {
@@ -390,6 +399,64 @@ describe('', () => {
const output = lastFrame();
expect(output).toContain('gemini-3-pro-');
expect(output).toContain('gemini-3-flash-');
+ });
+
+ it('should display role breakdown correctly', () => {
+ const { lastFrame } = renderWithMockedStats({
+ models: {
+ 'gemini-2.5-pro': {
+ api: { totalRequests: 2, totalErrors: 0, totalLatencyMs: 200 },
+ tokens: {
+ input: 20,
+ prompt: 30,
+ candidates: 40,
+ total: 70,
+ cached: 10,
+ thoughts: 0,
+ tool: 0,
+ },
+ roles: {
+ [LlmRole.MAIN]: {
+ totalRequests: 1,
+ totalErrors: 0,
+ totalLatencyMs: 100,
+ tokens: {
+ input: 10,
+ prompt: 15,
+ candidates: 20,
+ total: 35,
+ cached: 5,
+ thoughts: 0,
+ tool: 0,
+ },
+ },
+ },
+ },
+ },
+ tools: {
+ totalCalls: 0,
+ totalSuccess: 0,
+ totalFail: 0,
+ totalDurationMs: 0,
+ totalDecisions: {
+ accept: 0,
+ reject: 0,
+ modify: 0,
+ [ToolCallDecision.AUTO_ACCEPT]: 0,
+ },
+ byName: {},
+ },
+ files: {
+ totalLinesAdded: 0,
+ totalLinesRemoved: 0,
+ },
+ });
+
+ const output = lastFrame();
+ expect(output).toContain('main');
+ expect(output).toContain('Input');
+ expect(output).toContain('Output');
+ expect(output).toContain('Cache Reads');
expect(output).toMatchSnapshot();
});
@@ -427,6 +494,7 @@ describe('', () => {
thoughts: 0,
tool: 0,
},
+ roles: {},
},
},
tools: {
@@ -462,4 +530,121 @@ describe('', () => {
expect(output).toContain('Tier:');
expect(output).toContain('Pro');
});
+
+ it('should handle long role name layout', () => {
+ // Use the longest valid role name to test layout
+ const longRoleName = LlmRole.UTILITY_LOOP_DETECTOR;
+
+ const { lastFrame } = renderWithMockedStats({
+ models: {
+ 'gemini-2.5-pro': {
+ api: { totalRequests: 1, totalErrors: 0, totalLatencyMs: 100 },
+ tokens: {
+ input: 10,
+ prompt: 10,
+ candidates: 20,
+ total: 30,
+ cached: 0,
+ thoughts: 0,
+ tool: 0,
+ },
+ roles: {
+ [longRoleName]: {
+ totalRequests: 1,
+ totalErrors: 0,
+ totalLatencyMs: 100,
+ tokens: {
+ input: 10,
+ prompt: 10,
+ candidates: 20,
+ total: 30,
+ cached: 0,
+ thoughts: 0,
+ tool: 0,
+ },
+ },
+ },
+ },
+ },
+ tools: {
+ totalCalls: 0,
+ totalSuccess: 0,
+ totalFail: 0,
+ totalDurationMs: 0,
+ totalDecisions: {
+ accept: 0,
+ reject: 0,
+ modify: 0,
+ [ToolCallDecision.AUTO_ACCEPT]: 0,
+ },
+ byName: {},
+ },
+ files: {
+ totalLinesAdded: 0,
+ totalLinesRemoved: 0,
+ },
+ });
+
+ const output = lastFrame();
+ expect(output).toContain(longRoleName);
+ expect(output).toMatchSnapshot();
+ });
+
+ it('should filter out invalid role names', () => {
+ const invalidRoleName =
+ 'this_is_a_very_long_role_name_that_should_be_wrapped' as LlmRole;
+ const { lastFrame } = renderWithMockedStats({
+ models: {
+ 'gemini-2.5-pro': {
+ api: { totalRequests: 1, totalErrors: 0, totalLatencyMs: 100 },
+ tokens: {
+ input: 10,
+ prompt: 10,
+ candidates: 20,
+ total: 30,
+ cached: 0,
+ thoughts: 0,
+ tool: 0,
+ },
+ roles: {
+ [invalidRoleName]: {
+ totalRequests: 1,
+ totalErrors: 0,
+ totalLatencyMs: 100,
+ tokens: {
+ input: 10,
+ prompt: 10,
+ candidates: 20,
+ total: 30,
+ cached: 0,
+ thoughts: 0,
+ tool: 0,
+ },
+ },
+ },
+ },
+ },
+ tools: {
+ totalCalls: 0,
+ totalSuccess: 0,
+ totalFail: 0,
+ totalDurationMs: 0,
+ totalDecisions: {
+ accept: 0,
+ reject: 0,
+ modify: 0,
+ [ToolCallDecision.AUTO_ACCEPT]: 0,
+ },
+ byName: {},
+ },
+ files: {
+ totalLinesAdded: 0,
+ totalLinesRemoved: 0,
+ },
+ });
+
+ const output = lastFrame();
+ expect(output).not.toContain(invalidRoleName);
+ expect(output).toMatchSnapshot();
+ });
});
diff --git a/packages/cli/src/ui/components/ModelStatsDisplay.tsx b/packages/cli/src/ui/components/ModelStatsDisplay.tsx
index 085d23a524..eec58e9968 100644
--- a/packages/cli/src/ui/components/ModelStatsDisplay.tsx
+++ b/packages/cli/src/ui/components/ModelStatsDisplay.tsx
@@ -13,10 +13,17 @@ import {
calculateCacheHitRate,
calculateErrorRate,
} from '../utils/computeStats.js';
-import { useSessionStats } from '../contexts/SessionContext.js';
+import {
+ useSessionStats,
+ type ModelMetrics,
+} from '../contexts/SessionContext.js';
import { Table, type Column } from './Table.js';
import { useSettings } from '../contexts/SettingsContext.js';
-import { getDisplayString, isAutoModel } from '@google/gemini-cli-core';
+import {
+ getDisplayString,
+ isAutoModel,
+ LlmRole,
+} from '@google/gemini-cli-core';
import type { QuotaStats } from '../types.js';
import { QuotaStatsInfo } from './QuotaStatsInfo.js';
@@ -25,9 +32,11 @@ interface StatRowData {
isSection?: boolean;
isSubtle?: boolean;
// Dynamic keys for model values
- [key: string]: string | React.ReactNode | boolean | undefined;
+ [key: string]: string | React.ReactNode | boolean | undefined | number;
}
+type RoleMetrics = NonNullable[LlmRole]>;
+
interface ModelStatsDisplayProps {
selectedAuthType?: string;
userEmail?: string;
@@ -81,6 +90,22 @@ export const ModelStatsDisplay: React.FC = ({
([, metrics]) => metrics.tokens.cached > 0,
);
+ const allRoles = [
+ ...new Set(
+ activeModels.flatMap(([, metrics]) => Object.keys(metrics.roles ?? {})),
+ ),
+ ]
+ .filter((role): role is LlmRole => {
+ const validRoles: string[] = Object.values(LlmRole);
+ return validRoles.includes(role);
+ })
+ .sort((a, b) => {
+ if (a === b) return 0;
+ if (a === LlmRole.MAIN) return -1;
+ if (b === LlmRole.MAIN) return 1;
+ return a.localeCompare(b);
+ });
+
// Helper to create a row with values for each model
const createRow = (
metric: string,
@@ -204,6 +229,60 @@ export const ModelStatsDisplay: React.FC = ({
),
);
+ // Roles Section
+ if (allRoles.length > 0) {
+ // Spacer
+ rows.push({ metric: '' });
+ rows.push({ metric: 'Roles', isSection: true });
+
+ allRoles.forEach((role) => {
+ // Role Header Row
+ const roleHeaderRow: StatRowData = {
+ metric: role,
+ isSection: true,
+ color: theme.text.primary,
+ };
+ // We don't populate model values for the role header row
+ rows.push(roleHeaderRow);
+
+ const addRoleMetric = (
+ metric: string,
+ getValue: (r: RoleMetrics) => string | React.ReactNode,
+ ) => {
+ const row: StatRowData = {
+ metric,
+ isSubtle: true,
+ };
+ activeModels.forEach(([name, metrics]) => {
+ const roleMetrics = metrics.roles?.[role];
+ if (roleMetrics) {
+ row[name] = getValue(roleMetrics);
+ } else {
+ row[name] = -;
+ }
+ });
+ rows.push(row);
+ };
+
+ addRoleMetric('Requests', (r) => r.totalRequests.toLocaleString());
+ addRoleMetric('Input', (r) => (
+
+ {r.tokens.input.toLocaleString()}
+
+ ));
+ addRoleMetric('Output', (r) => (
+
+ {r.tokens.candidates.toLocaleString()}
+
+ ));
+ addRoleMetric('Cache Reads', (r) => (
+
+ {r.tokens.cached.toLocaleString()}
+
+ ));
+ });
+ }
+
const columns: Array> = [
{
key: 'metric',
diff --git a/packages/cli/src/ui/components/SessionSummaryDisplay.test.tsx b/packages/cli/src/ui/components/SessionSummaryDisplay.test.tsx
index f878cc35c3..27a1e61c24 100644
--- a/packages/cli/src/ui/components/SessionSummaryDisplay.test.tsx
+++ b/packages/cli/src/ui/components/SessionSummaryDisplay.test.tsx
@@ -55,6 +55,7 @@ describe('', () => {
thoughts: 300,
tool: 200,
},
+ roles: {},
},
},
tools: {
diff --git a/packages/cli/src/ui/components/StatsDisplay.test.tsx b/packages/cli/src/ui/components/StatsDisplay.test.tsx
index 21bf60fba9..54da9f2f9f 100644
--- a/packages/cli/src/ui/components/StatsDisplay.test.tsx
+++ b/packages/cli/src/ui/components/StatsDisplay.test.tsx
@@ -93,6 +93,7 @@ describe('', () => {
thoughts: 100,
tool: 50,
},
+ roles: {},
},
'gemini-2.5-flash': {
api: { totalRequests: 5, totalErrors: 1, totalLatencyMs: 4500 },
@@ -105,6 +106,7 @@ describe('', () => {
thoughts: 2000,
tool: 1000,
},
+ roles: {},
},
},
});
@@ -133,6 +135,7 @@ describe('', () => {
thoughts: 0,
tool: 0,
},
+ roles: {},
},
},
tools: {
@@ -227,6 +230,7 @@ describe('', () => {
thoughts: 0,
tool: 0,
},
+ roles: {},
},
},
});
@@ -411,6 +415,7 @@ describe('', () => {
thoughts: 0,
tool: 0,
},
+ roles: {},
},
},
});
diff --git a/packages/cli/src/ui/components/__snapshots__/ModelStatsDisplay.test.tsx.snap b/packages/cli/src/ui/components/__snapshots__/ModelStatsDisplay.test.tsx.snap
index f7b773ef90..b987b709e7 100644
--- a/packages/cli/src/ui/components/__snapshots__/ModelStatsDisplay.test.tsx.snap
+++ b/packages/cli/src/ui/components/__snapshots__/ModelStatsDisplay.test.tsx.snap
@@ -44,6 +44,32 @@ exports[` > should display conditional rows if at least one
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯"
`;
+exports[` > should display role breakdown correctly 1`] = `
+"╭──────────────────────────────────────────────────────────────────────────────────────────────────╮
+│ │
+│ Model Stats For Nerds │
+│ │
+│ │
+│ Metric gemini-2.5-pro │
+│ ────────────────────────────────────────────────────────────────────────────────────────────── │
+│ API │
+│ Requests 2 │
+│ Errors 0 (0.0%) │
+│ Avg Latency 100ms │
+│ Tokens │
+│ Total 70 │
+│ ↳ Input 20 │
+│ ↳ Cache Reads 10 (33.3%) │
+│ ↳ Output 40 │
+│ Roles │
+│ main │
+│ ↳ Requests 1 │
+│ ↳ Input 10 │
+│ ↳ Output 20 │
+│ ↳ Cache Reads 5 │
+╰──────────────────────────────────────────────────────────────────────────────────────────────────╯"
+`;
+
exports[` > should display stats for multiple models correctly 1`] = `
"╭──────────────────────────────────────────────────────────────────────────────────────────────────╮
│ │
@@ -66,6 +92,25 @@ exports[` > should display stats for multiple models correc
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯"
`;
+exports[` > should filter out invalid role names 1`] = `
+"╭──────────────────────────────────────────────────────────────────────────────────────────────────╮
+│ │
+│ Model Stats For Nerds │
+│ │
+│ │
+│ Metric gemini-2.5-pro │
+│ ────────────────────────────────────────────────────────────────────────────────────────────── │
+│ API │
+│ Requests 1 │
+│ Errors 0 (0.0%) │
+│ Avg Latency 100ms │
+│ Tokens │
+│ Total 30 │
+│ ↳ Input 10 │
+│ ↳ Output 20 │
+╰──────────────────────────────────────────────────────────────────────────────────────────────────╯"
+`;
+
exports[` > should handle large values without wrapping or overlapping 1`] = `
"╭──────────────────────────────────────────────────────────────────────────────────────────────────╮
│ │
@@ -88,6 +133,31 @@ exports[` > should handle large values without wrapping or
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯"
`;
+exports[` > should handle long role name layout 1`] = `
+"╭──────────────────────────────────────────────────────────────────────────────────────────────────╮
+│ │
+│ Model Stats For Nerds │
+│ │
+│ │
+│ Metric gemini-2.5-pro │
+│ ────────────────────────────────────────────────────────────────────────────────────────────── │
+│ API │
+│ Requests 1 │
+│ Errors 0 (0.0%) │
+│ Avg Latency 100ms │
+│ Tokens │
+│ Total 30 │
+│ ↳ Input 10 │
+│ ↳ Output 20 │
+│ Roles │
+│ utility_loop_detector │
+│ ↳ Requests 1 │
+│ ↳ Input 10 │
+│ ↳ Output 20 │
+│ ↳ Cache Reads 0 │
+╰──────────────────────────────────────────────────────────────────────────────────────────────────╯"
+`;
+
exports[` > should handle models with long names (gemini-3-*-preview) without layout breaking 1`] = `
"╭──────────────────────────────────────────────────────────────────────────────╮
│ │
diff --git a/packages/cli/src/ui/components/triage/TriageDuplicates.tsx b/packages/cli/src/ui/components/triage/TriageDuplicates.tsx
index a79fbb2eb1..abc749b6d3 100644
--- a/packages/cli/src/ui/components/triage/TriageDuplicates.tsx
+++ b/packages/cli/src/ui/components/triage/TriageDuplicates.tsx
@@ -8,7 +8,7 @@ import { useState, useEffect, useCallback } from 'react';
import { Box, Text } from 'ink';
import Spinner from 'ink-spinner';
import type { Config } from '@google/gemini-cli-core';
-import { debugLogger, spawnAsync } from '@google/gemini-cli-core';
+import { debugLogger, spawnAsync, LlmRole } from '@google/gemini-cli-core';
import { useKeypress } from '../../hooks/useKeypress.js';
import { keyMatchers, Command } from '../../keyMatchers.js';
@@ -279,6 +279,7 @@ Return a JSON object with:
},
abortSignal: new AbortController().signal,
promptId: 'triage-duplicates',
+ role: LlmRole.UTILITY_TOOL,
});
// eslint-disable-next-line @typescript-eslint/no-unsafe-type-assertion
diff --git a/packages/cli/src/ui/components/triage/TriageIssues.tsx b/packages/cli/src/ui/components/triage/TriageIssues.tsx
index 01322440ae..3a654a40de 100644
--- a/packages/cli/src/ui/components/triage/TriageIssues.tsx
+++ b/packages/cli/src/ui/components/triage/TriageIssues.tsx
@@ -8,7 +8,7 @@ import { useState, useEffect, useCallback, useRef } from 'react';
import { Box, Text } from 'ink';
import Spinner from 'ink-spinner';
import type { Config } from '@google/gemini-cli-core';
-import { debugLogger, spawnAsync } from '@google/gemini-cli-core';
+import { debugLogger, spawnAsync, LlmRole } from '@google/gemini-cli-core';
import { useKeypress } from '../../hooks/useKeypress.js';
import { keyMatchers, Command } from '../../keyMatchers.js';
import { TextInput } from '../shared/TextInput.js';
@@ -223,6 +223,7 @@ Return a JSON object with:
},
abortSignal: abortControllerRef.current.signal,
promptId: 'triage-issues',
+ role: LlmRole.UTILITY_TOOL,
});
// eslint-disable-next-line @typescript-eslint/no-unsafe-type-assertion
diff --git a/packages/cli/src/ui/contexts/SessionContext.test.tsx b/packages/cli/src/ui/contexts/SessionContext.test.tsx
index 5ab0204255..5ab76e4519 100644
--- a/packages/cli/src/ui/contexts/SessionContext.test.tsx
+++ b/packages/cli/src/ui/contexts/SessionContext.test.tsx
@@ -100,6 +100,7 @@ describe('SessionStatsContext', () => {
thoughts: 20,
tool: 10,
},
+ roles: {},
},
},
tools: {
@@ -180,6 +181,7 @@ describe('SessionStatsContext', () => {
thoughts: 0,
tool: 0,
},
+ roles: {},
},
},
tools: {
diff --git a/packages/cli/src/ui/hooks/usePromptCompletion.ts b/packages/cli/src/ui/hooks/usePromptCompletion.ts
index 1079095a82..f359b27b2b 100644
--- a/packages/cli/src/ui/hooks/usePromptCompletion.ts
+++ b/packages/cli/src/ui/hooks/usePromptCompletion.ts
@@ -6,7 +6,7 @@
import { useState, useCallback, useRef, useEffect, useMemo } from 'react';
import type { Config } from '@google/gemini-cli-core';
-import { debugLogger, getResponseText } from '@google/gemini-cli-core';
+import { debugLogger, getResponseText, LlmRole } from '@google/gemini-cli-core';
import type { Content } from '@google/genai';
import type { TextBuffer } from '../components/shared/text-buffer.js';
import { isSlashCommand } from '../utils/commandUtils.js';
@@ -110,6 +110,7 @@ export function usePromptCompletion({
{ model: 'prompt-completion' },
contents,
signal,
+ LlmRole.UTILITY_AUTOCOMPLETE,
);
if (signal.aborted) {
diff --git a/packages/cli/src/ui/utils/computeStats.test.ts b/packages/cli/src/ui/utils/computeStats.test.ts
index b3677164a7..09baec304f 100644
--- a/packages/cli/src/ui/utils/computeStats.test.ts
+++ b/packages/cli/src/ui/utils/computeStats.test.ts
@@ -29,6 +29,7 @@ describe('calculateErrorRate', () => {
thoughts: 0,
tool: 0,
},
+ roles: {},
};
expect(calculateErrorRate(metrics)).toBe(0);
});
@@ -45,6 +46,7 @@ describe('calculateErrorRate', () => {
thoughts: 0,
tool: 0,
},
+ roles: {},
};
expect(calculateErrorRate(metrics)).toBe(20);
});
@@ -63,6 +65,7 @@ describe('calculateAverageLatency', () => {
thoughts: 0,
tool: 0,
},
+ roles: {},
};
expect(calculateAverageLatency(metrics)).toBe(0);
});
@@ -79,6 +82,7 @@ describe('calculateAverageLatency', () => {
thoughts: 0,
tool: 0,
},
+ roles: {},
};
expect(calculateAverageLatency(metrics)).toBe(150);
});
@@ -97,6 +101,7 @@ describe('calculateCacheHitRate', () => {
thoughts: 0,
tool: 0,
},
+ roles: {},
};
expect(calculateCacheHitRate(metrics)).toBe(0);
});
@@ -113,6 +118,7 @@ describe('calculateCacheHitRate', () => {
thoughts: 0,
tool: 0,
},
+ roles: {},
};
expect(calculateCacheHitRate(metrics)).toBe(25);
});
@@ -170,6 +176,7 @@ describe('computeSessionStats', () => {
thoughts: 0,
tool: 0,
},
+ roles: {},
},
},
tools: {
@@ -209,6 +216,7 @@ describe('computeSessionStats', () => {
thoughts: 0,
tool: 0,
},
+ roles: {},
},
},
tools: {
diff --git a/packages/cli/src/zed-integration/zedIntegration.test.ts b/packages/cli/src/zed-integration/zedIntegration.test.ts
index ec6f046374..edc32f04b6 100644
--- a/packages/cli/src/zed-integration/zedIntegration.test.ts
+++ b/packages/cli/src/zed-integration/zedIntegration.test.ts
@@ -25,6 +25,7 @@ import {
type GeminiChat,
type Config,
type MessageBus,
+ LlmRole,
} from '@google/gemini-cli-core';
import {
SettingScope,
@@ -588,7 +589,8 @@ describe('Session', () => {
}),
]),
expect.anything(),
- expect.anything(),
+ expect.any(AbortSignal),
+ LlmRole.MAIN,
);
});
diff --git a/packages/cli/src/zed-integration/zedIntegration.ts b/packages/cli/src/zed-integration/zedIntegration.ts
index 1d976e5de6..cae51a6127 100644
--- a/packages/cli/src/zed-integration/zedIntegration.ts
+++ b/packages/cli/src/zed-integration/zedIntegration.ts
@@ -35,6 +35,7 @@ import {
startupProfiler,
Kind,
partListUnionToString,
+ LlmRole,
} from '@google/gemini-cli-core';
import * as acp from '@agentclientprotocol/sdk';
import { AcpFileSystemService } from './fileSystemService.js';
@@ -493,6 +494,7 @@ export class Session {
nextMessage?.parts ?? [],
promptId,
pendingSend.signal,
+ LlmRole.MAIN,
);
nextMessage = null;
diff --git a/packages/core/src/agents/local-executor.test.ts b/packages/core/src/agents/local-executor.test.ts
index 6b33e0b76b..d2634ecc52 100644
--- a/packages/core/src/agents/local-executor.test.ts
+++ b/packages/core/src/agents/local-executor.test.ts
@@ -47,6 +47,7 @@ import {
logAgentFinish,
logRecoveryAttempt,
} from '../telemetry/loggers.js';
+import { LlmRole } from '../telemetry/types.js';
import {
AgentStartEvent,
AgentFinishEvent,
@@ -1407,6 +1408,7 @@ describe('LocalAgentExecutor', () => {
expect.any(Array),
expect.any(String),
expect.any(AbortSignal),
+ LlmRole.SUBAGENT,
);
});
@@ -1452,6 +1454,7 @@ describe('LocalAgentExecutor', () => {
expect.any(Array),
expect.any(String),
expect.any(AbortSignal),
+ LlmRole.SUBAGENT,
);
});
});
diff --git a/packages/core/src/agents/local-executor.ts b/packages/core/src/agents/local-executor.ts
index e9fee219e3..b30f1ae53e 100644
--- a/packages/core/src/agents/local-executor.ts
+++ b/packages/core/src/agents/local-executor.ts
@@ -59,6 +59,7 @@ import { getVersion } from '../utils/version.js';
import { getToolCallContext } from '../utils/toolCallContext.js';
import { scheduleAgentTools } from './agent-scheduler.js';
import { DeadlineTimer } from '../utils/deadlineTimer.js';
+import { LlmRole } from '../telemetry/types.js';
/** A callback function to report on agent activity. */
export type ActivityCallback = (activity: SubagentActivityEvent) => void;
@@ -699,6 +700,8 @@ export class LocalAgentExecutor {
modelToUse = requestedModel;
}
+ const role = LlmRole.SUBAGENT;
+
const responseStream = await chat.sendMessageStream(
{
model: modelToUse,
@@ -707,6 +710,7 @@ export class LocalAgentExecutor {
message.parts || [],
promptId,
signal,
+ role,
);
const functionCalls: FunctionCall[] = [];
diff --git a/packages/core/src/code_assist/server.test.ts b/packages/core/src/code_assist/server.test.ts
index 35b91fd1c5..89ce45e1aa 100644
--- a/packages/core/src/code_assist/server.test.ts
+++ b/packages/core/src/code_assist/server.test.ts
@@ -9,6 +9,7 @@ import { CodeAssistServer } from './server.js';
import { OAuth2Client } from 'google-auth-library';
import { UserTierId, ActionStatus } from './types.js';
import { FinishReason } from '@google/genai';
+import { LlmRole } from '../telemetry/types.js';
vi.mock('google-auth-library');
@@ -69,6 +70,7 @@ describe('CodeAssistServer', () => {
contents: [{ role: 'user', parts: [{ text: 'request' }] }],
},
'user-prompt-id',
+ LlmRole.MAIN,
);
expect(mockRequest).toHaveBeenCalledWith({
@@ -126,6 +128,7 @@ describe('CodeAssistServer', () => {
contents: [{ role: 'user', parts: [{ text: 'request' }] }],
},
'user-prompt-id',
+ LlmRole.MAIN,
);
expect(recordConversationOfferedSpy).toHaveBeenCalledWith(
@@ -170,6 +173,7 @@ describe('CodeAssistServer', () => {
contents: [{ role: 'user', parts: [{ text: 'request' }] }],
},
'user-prompt-id',
+ LlmRole.MAIN,
);
expect(server.recordCodeAssistMetrics).toHaveBeenCalledWith(
@@ -208,6 +212,7 @@ describe('CodeAssistServer', () => {
contents: [{ role: 'user', parts: [{ text: 'request' }] }],
},
'user-prompt-id',
+ LlmRole.MAIN,
);
const mockResponseData = {
@@ -369,6 +374,7 @@ describe('CodeAssistServer', () => {
contents: [{ role: 'user', parts: [{ text: 'request' }] }],
},
'user-prompt-id',
+ LlmRole.MAIN,
);
// Push SSE data to the stream
diff --git a/packages/core/src/code_assist/server.ts b/packages/core/src/code_assist/server.ts
index 055c041d2b..871af4cbfa 100644
--- a/packages/core/src/code_assist/server.ts
+++ b/packages/core/src/code_assist/server.ts
@@ -53,6 +53,7 @@ import {
recordConversationOffered,
} from './telemetry.js';
import { getClientMetadata } from './experiments/client_metadata.js';
+import type { LlmRole } from '../telemetry/types.js';
/** HTTP options to be used in each of the requests. */
export interface HttpOptions {
/** Additional HTTP headers to be sent with the request. */
@@ -75,6 +76,8 @@ export class CodeAssistServer implements ContentGenerator {
async generateContentStream(
req: GenerateContentParameters,
userPromptId: string,
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
+ role: LlmRole,
): Promise> {
const responses =
await this.requestStreamingPost(
@@ -125,6 +128,8 @@ export class CodeAssistServer implements ContentGenerator {
async generateContent(
req: GenerateContentParameters,
userPromptId: string,
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
+ role: LlmRole,
): Promise {
const start = Date.now();
const response = await this.requestPost(
diff --git a/packages/core/src/core/baseLlmClient.test.ts b/packages/core/src/core/baseLlmClient.test.ts
index c1f796389e..4d09a1edd9 100644
--- a/packages/core/src/core/baseLlmClient.test.ts
+++ b/packages/core/src/core/baseLlmClient.test.ts
@@ -30,6 +30,7 @@ import { MalformedJsonResponseEvent } from '../telemetry/types.js';
import { getErrorMessage } from '../utils/errors.js';
import type { ModelConfigService } from '../services/modelConfigService.js';
import { makeResolvedModelConfig } from '../services/modelConfigServiceTestUtils.js';
+import { LlmRole } from '../telemetry/types.js';
vi.mock('../utils/errorReporting.js');
vi.mock('../telemetry/loggers.js');
@@ -128,6 +129,7 @@ describe('BaseLlmClient', () => {
schema: { type: 'object', properties: { color: { type: 'string' } } },
abortSignal: abortController.signal,
promptId: 'test-prompt-id',
+ role: LlmRole.UTILITY_TOOL,
};
});
@@ -169,6 +171,7 @@ describe('BaseLlmClient', () => {
},
},
'test-prompt-id',
+ LlmRole.UTILITY_TOOL,
);
});
@@ -191,6 +194,7 @@ describe('BaseLlmClient', () => {
}),
}),
expect.any(String),
+ LlmRole.UTILITY_TOOL,
);
});
@@ -209,6 +213,7 @@ describe('BaseLlmClient', () => {
expect(mockGenerateContent).toHaveBeenCalledWith(
expect.any(Object),
customPromptId,
+ LlmRole.UTILITY_TOOL,
);
});
@@ -528,6 +533,7 @@ describe('BaseLlmClient', () => {
contents: [{ role: 'user', parts: [{ text: 'Give me content.' }] }],
abortSignal: abortController.signal,
promptId: 'content-prompt-id',
+ role: LlmRole.UTILITY_TOOL,
};
const result = await client.generateContent(options);
@@ -556,6 +562,7 @@ describe('BaseLlmClient', () => {
},
},
'content-prompt-id',
+ LlmRole.UTILITY_TOOL,
);
});
@@ -568,6 +575,7 @@ describe('BaseLlmClient', () => {
contents: [{ role: 'user', parts: [{ text: 'Give me content.' }] }],
abortSignal: abortController.signal,
promptId: 'content-prompt-id',
+ role: LlmRole.UTILITY_TOOL,
};
await client.generateContent(options);
@@ -590,6 +598,7 @@ describe('BaseLlmClient', () => {
contents: [{ role: 'user', parts: [{ text: 'Give me content.' }] }],
abortSignal: abortController.signal,
promptId: 'content-prompt-id',
+ role: LlmRole.UTILITY_TOOL,
};
await expect(client.generateContent(options)).rejects.toThrow(
@@ -634,6 +643,7 @@ describe('BaseLlmClient', () => {
contents: [{ role: 'user', parts: [{ text: 'Give me a color.' }] }],
abortSignal: abortController.signal,
promptId: 'content-prompt-id',
+ role: LlmRole.UTILITY_TOOL,
};
jsonOptions = {
@@ -655,6 +665,7 @@ describe('BaseLlmClient', () => {
await client.generateContent({
...contentOptions,
modelConfigKey: { model: successfulModel },
+ role: LlmRole.UTILITY_TOOL,
});
expect(mockAvailabilityService.markHealthy).toHaveBeenCalledWith(
@@ -680,6 +691,7 @@ describe('BaseLlmClient', () => {
...contentOptions,
modelConfigKey: { model: firstModel },
maxAttempts: 2,
+ role: LlmRole.UTILITY_TOOL,
});
await vi.runAllTimersAsync();
@@ -689,6 +701,7 @@ describe('BaseLlmClient', () => {
...contentOptions,
modelConfigKey: { model: firstModel },
maxAttempts: 2,
+ role: LlmRole.UTILITY_TOOL,
});
expect(mockConfig.setActiveModel).toHaveBeenCalledWith(firstModel);
@@ -699,6 +712,7 @@ describe('BaseLlmClient', () => {
expect(mockGenerateContent).toHaveBeenLastCalledWith(
expect.objectContaining({ model: fallbackModel }),
expect.any(String),
+ LlmRole.UTILITY_TOOL,
);
});
@@ -724,6 +738,7 @@ describe('BaseLlmClient', () => {
await client.generateContent({
...contentOptions,
modelConfigKey: { model: stickyModel },
+ role: LlmRole.UTILITY_TOOL,
});
expect(mockAvailabilityService.consumeStickyAttempt).toHaveBeenCalledWith(
@@ -763,6 +778,7 @@ describe('BaseLlmClient', () => {
expect(mockGenerateContent).toHaveBeenLastCalledWith(
expect.objectContaining({ model: availableModel }),
jsonOptions.promptId,
+ LlmRole.UTILITY_TOOL,
);
});
@@ -814,6 +830,7 @@ describe('BaseLlmClient', () => {
...contentOptions,
modelConfigKey: { model: firstModel },
maxAttempts: 2,
+ role: LlmRole.UTILITY_TOOL,
});
expect(mockGenerateContent).toHaveBeenCalledTimes(2);
diff --git a/packages/core/src/core/baseLlmClient.ts b/packages/core/src/core/baseLlmClient.ts
index a508cdd038..64730ff74c 100644
--- a/packages/core/src/core/baseLlmClient.ts
+++ b/packages/core/src/core/baseLlmClient.ts
@@ -27,6 +27,7 @@ import {
applyModelSelection,
createAvailabilityContextProvider,
} from '../availability/policyHelpers.js';
+import { LlmRole } from '../telemetry/types.js';
const DEFAULT_MAX_ATTEMPTS = 5;
@@ -51,6 +52,10 @@ export interface GenerateJsonOptions {
* A unique ID for the prompt, used for logging/telemetry correlation.
*/
promptId: string;
+ /**
+ * The role of the LLM call.
+ */
+ role: LlmRole;
/**
* The maximum number of attempts for the request.
*/
@@ -76,6 +81,10 @@ export interface GenerateContentOptions {
* A unique ID for the prompt, used for logging/telemetry correlation.
*/
promptId: string;
+ /**
+ * The role of the LLM call.
+ */
+ role: LlmRole;
/**
* The maximum number of attempts for the request.
*/
@@ -115,6 +124,7 @@ export class BaseLlmClient {
systemInstruction,
abortSignal,
promptId,
+ role,
maxAttempts,
} = options;
@@ -150,6 +160,7 @@ export class BaseLlmClient {
},
shouldRetryOnContent,
'generateJson',
+ role,
);
// If we are here, the content is valid (not empty and parsable).
@@ -215,6 +226,7 @@ export class BaseLlmClient {
systemInstruction,
abortSignal,
promptId,
+ role,
maxAttempts,
} = options;
@@ -234,6 +246,7 @@ export class BaseLlmClient {
},
shouldRetryOnContent,
'generateContent',
+ role,
);
}
@@ -241,6 +254,7 @@ export class BaseLlmClient {
options: _CommonGenerateOptions,
shouldRetryOnContent: (response: GenerateContentResponse) => boolean,
errorContext: 'generateJson' | 'generateContent',
+ role: LlmRole = LlmRole.UTILITY_TOOL,
): Promise {
const {
modelConfigKey,
@@ -293,7 +307,11 @@ export class BaseLlmClient {
config: finalConfig,
contents,
};
- return this.contentGenerator.generateContent(requestParams, promptId);
+ return this.contentGenerator.generateContent(
+ requestParams,
+ promptId,
+ role,
+ );
};
return await retryWithBackoff(apiCall, {
diff --git a/packages/core/src/core/client.test.ts b/packages/core/src/core/client.test.ts
index 185019434b..caa5d17ea0 100644
--- a/packages/core/src/core/client.test.ts
+++ b/packages/core/src/core/client.test.ts
@@ -47,6 +47,7 @@ import type {
} from '../services/modelConfigService.js';
import { ClearcutLogger } from '../telemetry/clearcut-logger/clearcut-logger.js';
import * as policyCatalog from '../availability/policyCatalog.js';
+import { LlmRole } from '../telemetry/types.js';
import { partToString } from '../utils/partUtils.js';
import { coreEvents } from '../utils/events.js';
@@ -2913,6 +2914,7 @@ ${JSON.stringify(
{ model: 'test-model' },
contents,
abortSignal,
+ LlmRole.MAIN,
);
expect(mockContentGenerator.generateContent).toHaveBeenCalledWith(
@@ -2927,6 +2929,7 @@ ${JSON.stringify(
contents,
},
'test-session-id',
+ LlmRole.MAIN,
);
});
@@ -2938,6 +2941,7 @@ ${JSON.stringify(
{ model: initialModel },
contents,
new AbortController().signal,
+ LlmRole.MAIN,
);
expect(mockContentGenerator.generateContent).toHaveBeenCalledWith(
@@ -2945,6 +2949,7 @@ ${JSON.stringify(
model: initialModel,
}),
'test-session-id',
+ LlmRole.MAIN,
);
});
diff --git a/packages/core/src/core/client.ts b/packages/core/src/core/client.ts
index fb9edaa7a5..951da7d6ef 100644
--- a/packages/core/src/core/client.ts
+++ b/packages/core/src/core/client.ts
@@ -64,6 +64,7 @@ import { resolveModel } from '../config/models.js';
import type { RetryAvailabilityContext } from '../utils/retry.js';
import { partToString } from '../utils/partUtils.js';
import { coreEvents, CoreEvent } from '../utils/events.js';
+import type { LlmRole } from '../telemetry/types.js';
const MAX_TURNS = 100;
@@ -925,6 +926,7 @@ export class GeminiClient {
modelConfigKey: ModelConfigKey,
contents: Content[],
abortSignal: AbortSignal,
+ role: LlmRole,
): Promise {
const desiredModelConfig =
this.config.modelConfigService.getResolvedConfig(modelConfigKey);
@@ -979,6 +981,7 @@ export class GeminiClient {
contents,
},
this.lastPromptId,
+ role,
);
};
const onPersistent429Callback = async (
diff --git a/packages/core/src/core/contentGenerator.ts b/packages/core/src/core/contentGenerator.ts
index 0c9b36634e..bfd8221f75 100644
--- a/packages/core/src/core/contentGenerator.ts
+++ b/packages/core/src/core/contentGenerator.ts
@@ -24,6 +24,7 @@ import { FakeContentGenerator } from './fakeContentGenerator.js';
import { parseCustomHeaders } from '../utils/customHeaderUtils.js';
import { RecordingContentGenerator } from './recordingContentGenerator.js';
import { getVersion, resolveModel } from '../../index.js';
+import type { LlmRole } from '../telemetry/llmRole.js';
/**
* Interface abstracting the core functionalities for generating content and counting tokens.
@@ -32,11 +33,13 @@ export interface ContentGenerator {
generateContent(
request: GenerateContentParameters,
userPromptId: string,
+ role: LlmRole,
): Promise;
generateContentStream(
request: GenerateContentParameters,
userPromptId: string,
+ role: LlmRole,
): Promise>;
countTokens(request: CountTokensParameters): Promise;
diff --git a/packages/core/src/core/fakeContentGenerator.test.ts b/packages/core/src/core/fakeContentGenerator.test.ts
index de8306e516..673fa6b2e7 100644
--- a/packages/core/src/core/fakeContentGenerator.test.ts
+++ b/packages/core/src/core/fakeContentGenerator.test.ts
@@ -18,6 +18,7 @@ import {
type CountTokensParameters,
type EmbedContentParameters,
} from '@google/genai';
+import { LlmRole } from '../telemetry/types.js';
vi.mock('node:fs', async (importOriginal) => {
const actual = await importOriginal();
@@ -79,6 +80,7 @@ describe('FakeContentGenerator', () => {
const response = await generator.generateContent(
{} as GenerateContentParameters,
'id',
+ LlmRole.MAIN,
);
expect(response).instanceOf(GenerateContentResponse);
expect(response).toEqual(fakeGenerateContentResponse.response);
@@ -91,6 +93,7 @@ describe('FakeContentGenerator', () => {
const stream = await generator.generateContentStream(
{} as GenerateContentParameters,
'id',
+ LlmRole.MAIN,
);
const responses = [];
for await (const response of stream) {
@@ -121,7 +124,11 @@ describe('FakeContentGenerator', () => {
];
const generator = new FakeContentGenerator(fakeResponses);
for (const fakeResponse of fakeResponses) {
- const response = await generator[fakeResponse.method]({} as never, '');
+ const response = await generator[fakeResponse.method](
+ {} as never,
+ '',
+ LlmRole.MAIN,
+ );
if (fakeResponse.method === 'generateContentStream') {
const responses = [];
for await (const item of response as AsyncGenerator) {
@@ -137,7 +144,11 @@ describe('FakeContentGenerator', () => {
it('should throw error when no more responses', async () => {
const generator = new FakeContentGenerator([fakeGenerateContentResponse]);
- await generator.generateContent({} as GenerateContentParameters, 'id');
+ await generator.generateContent(
+ {} as GenerateContentParameters,
+ 'id',
+ LlmRole.MAIN,
+ );
await expect(
generator.embedContent({} as EmbedContentParameters),
).rejects.toThrowError('No more mock responses for embedContent');
@@ -145,10 +156,18 @@ describe('FakeContentGenerator', () => {
generator.countTokens({} as CountTokensParameters),
).rejects.toThrowError('No more mock responses for countTokens');
await expect(
- generator.generateContentStream({} as GenerateContentParameters, 'id'),
+ generator.generateContentStream(
+ {} as GenerateContentParameters,
+ 'id',
+ LlmRole.MAIN,
+ ),
).rejects.toThrow('No more mock responses for generateContentStream');
await expect(
- generator.generateContent({} as GenerateContentParameters, 'id'),
+ generator.generateContent(
+ {} as GenerateContentParameters,
+ 'id',
+ LlmRole.MAIN,
+ ),
).rejects.toThrowError('No more mock responses for generateContent');
});
@@ -161,6 +180,7 @@ describe('FakeContentGenerator', () => {
const response = await generator.generateContent(
{} as GenerateContentParameters,
'id',
+ LlmRole.MAIN,
);
expect(response).toEqual(fakeGenerateContentResponse.response);
});
diff --git a/packages/core/src/core/fakeContentGenerator.ts b/packages/core/src/core/fakeContentGenerator.ts
index a6185b3eae..5bedc2d187 100644
--- a/packages/core/src/core/fakeContentGenerator.ts
+++ b/packages/core/src/core/fakeContentGenerator.ts
@@ -16,6 +16,7 @@ import { promises } from 'node:fs';
import type { ContentGenerator } from './contentGenerator.js';
import type { UserTierId } from '../code_assist/types.js';
import { safeJsonStringify } from '../utils/safeJsonStringify.js';
+import type { LlmRole } from '../telemetry/types.js';
export type FakeResponse =
| {
@@ -79,6 +80,8 @@ export class FakeContentGenerator implements ContentGenerator {
async generateContent(
request: GenerateContentParameters,
_userPromptId: string,
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
+ role: LlmRole,
): Promise {
return Object.setPrototypeOf(
this.getNextResponse('generateContent', request),
@@ -89,6 +92,8 @@ export class FakeContentGenerator implements ContentGenerator {
async generateContentStream(
request: GenerateContentParameters,
_userPromptId: string,
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
+ role: LlmRole,
): Promise> {
const responses = this.getNextResponse('generateContentStream', request);
async function* stream() {
diff --git a/packages/core/src/core/geminiChat.test.ts b/packages/core/src/core/geminiChat.test.ts
index c75cc4967d..8a6b3f8bc8 100644
--- a/packages/core/src/core/geminiChat.test.ts
+++ b/packages/core/src/core/geminiChat.test.ts
@@ -28,6 +28,7 @@ import type { ModelAvailabilityService } from '../availability/modelAvailability
import * as policyHelpers from '../availability/policyHelpers.js';
import { makeResolvedModelConfig } from '../services/modelConfigServiceTestUtils.js';
import type { HookSystem } from '../hooks/hookSystem.js';
+import { LlmRole } from '../telemetry/types.js';
// Mock fs module to prevent actual file system operations during tests
const mockFileSystem = new Map();
@@ -287,6 +288,7 @@ describe('GeminiChat', () => {
'test message',
'prompt-id-tool-call-empty-end',
new AbortController().signal,
+ LlmRole.MAIN,
);
await expect(
(async () => {
@@ -340,6 +342,7 @@ describe('GeminiChat', () => {
'test message',
'prompt-id-no-finish-empty-end',
new AbortController().signal,
+ LlmRole.MAIN,
);
await expect(
(async () => {
@@ -387,6 +390,7 @@ describe('GeminiChat', () => {
'test message',
'prompt-id-valid-then-invalid-end',
new AbortController().signal,
+ LlmRole.MAIN,
);
await expect(
(async () => {
@@ -435,6 +439,7 @@ describe('GeminiChat', () => {
'test message',
'prompt-id-empty-chunk-consolidation',
new AbortController().signal,
+ LlmRole.MAIN,
);
for await (const _ of stream) {
// Consume the stream
@@ -494,6 +499,7 @@ describe('GeminiChat', () => {
'test message',
'prompt-id-multi-chunk',
new AbortController().signal,
+ LlmRole.MAIN,
);
for await (const _ of stream) {
// Consume the stream to trigger history recording.
@@ -543,6 +549,7 @@ describe('GeminiChat', () => {
'test message',
'prompt-id-mixed-chunk',
new AbortController().signal,
+ LlmRole.MAIN,
);
for await (const _ of stream) {
// This loop consumes the stream.
@@ -612,6 +619,7 @@ describe('GeminiChat', () => {
},
'prompt-id-stream-1',
new AbortController().signal,
+ LlmRole.MAIN,
);
// 4. Assert: The stream processing should throw an InvalidStreamError.
@@ -656,6 +664,7 @@ describe('GeminiChat', () => {
'test message',
'prompt-id-1',
new AbortController().signal,
+ LlmRole.MAIN,
);
// Should not throw an error
@@ -693,6 +702,7 @@ describe('GeminiChat', () => {
'test message',
'prompt-id-1',
new AbortController().signal,
+ LlmRole.MAIN,
);
await expect(
@@ -729,6 +739,7 @@ describe('GeminiChat', () => {
'test message',
'prompt-id-1',
new AbortController().signal,
+ LlmRole.MAIN,
);
await expect(
@@ -765,6 +776,7 @@ describe('GeminiChat', () => {
'test message',
'prompt-id-1',
new AbortController().signal,
+ LlmRole.MAIN,
);
// Should not throw an error
@@ -802,6 +814,7 @@ describe('GeminiChat', () => {
'test',
'prompt-id-malformed',
new AbortController().signal,
+ LlmRole.MAIN,
);
// Should throw an error
@@ -849,6 +862,7 @@ describe('GeminiChat', () => {
'test retry',
'prompt-id-retry-malformed',
new AbortController().signal,
+ LlmRole.MAIN,
);
const events: StreamEvent[] = [];
for await (const event of stream) {
@@ -906,6 +920,7 @@ describe('GeminiChat', () => {
'hello',
'prompt-id-1',
new AbortController().signal,
+ LlmRole.MAIN,
);
for await (const _ of stream) {
// consume stream
@@ -931,6 +946,7 @@ describe('GeminiChat', () => {
},
},
'prompt-id-1',
+ LlmRole.MAIN,
);
});
@@ -954,6 +970,7 @@ describe('GeminiChat', () => {
'hello',
'prompt-id-thinking-level',
new AbortController().signal,
+ LlmRole.MAIN,
);
for await (const _ of stream) {
// consume stream
@@ -970,6 +987,7 @@ describe('GeminiChat', () => {
}),
}),
'prompt-id-thinking-level',
+ LlmRole.MAIN,
);
});
@@ -993,6 +1011,7 @@ describe('GeminiChat', () => {
'hello',
'prompt-id-thinking-budget',
new AbortController().signal,
+ LlmRole.MAIN,
);
for await (const _ of stream) {
// consume stream
@@ -1003,12 +1022,13 @@ describe('GeminiChat', () => {
model: 'gemini-2.0-flash',
config: expect.objectContaining({
thinkingConfig: {
- thinkingBudget: DEFAULT_THINKING_MODE,
+ thinkingBudget: 8192,
thinkingLevel: undefined,
},
}),
}),
'prompt-id-thinking-budget',
+ LlmRole.MAIN,
);
});
});
@@ -1060,6 +1080,7 @@ describe('GeminiChat', () => {
'test',
'prompt-id-no-retry',
new AbortController().signal,
+ LlmRole.MAIN,
);
await expect(
@@ -1108,6 +1129,7 @@ describe('GeminiChat', () => {
'test message',
'prompt-id-yield-retry',
new AbortController().signal,
+ LlmRole.MAIN,
);
const events: StreamEvent[] = [];
for await (const event of stream) {
@@ -1150,6 +1172,7 @@ describe('GeminiChat', () => {
'test',
'prompt-id-retry-success',
new AbortController().signal,
+ LlmRole.MAIN,
);
const chunks: StreamEvent[] = [];
for await (const chunk of stream) {
@@ -1222,6 +1245,7 @@ describe('GeminiChat', () => {
'test message',
'prompt-id-retry-temperature',
new AbortController().signal,
+ LlmRole.MAIN,
);
for await (const _ of stream) {
@@ -1243,6 +1267,7 @@ describe('GeminiChat', () => {
}),
}),
'prompt-id-retry-temperature',
+ LlmRole.MAIN,
);
// Second call (retry) should have temperature 1
@@ -1256,6 +1281,7 @@ describe('GeminiChat', () => {
}),
}),
'prompt-id-retry-temperature',
+ LlmRole.MAIN,
);
});
@@ -1281,6 +1307,7 @@ describe('GeminiChat', () => {
'test',
'prompt-id-retry-fail',
new AbortController().signal,
+ LlmRole.MAIN,
);
await expect(async () => {
for await (const _ of stream) {
@@ -1347,6 +1374,7 @@ describe('GeminiChat', () => {
'test message',
'prompt-id-400',
new AbortController().signal,
+ LlmRole.MAIN,
);
await expect(
@@ -1386,9 +1414,11 @@ describe('GeminiChat', () => {
'test message',
'prompt-id-429-retry',
new AbortController().signal,
+ LlmRole.MAIN,
);
const events: StreamEvent[] = [];
+
for await (const event of stream) {
events.push(event);
}
@@ -1435,9 +1465,11 @@ describe('GeminiChat', () => {
'test message',
'prompt-id-500-retry',
new AbortController().signal,
+ LlmRole.MAIN,
);
const events: StreamEvent[] = [];
+
for await (const event of stream) {
events.push(event);
}
@@ -1492,9 +1524,11 @@ describe('GeminiChat', () => {
'test message',
'prompt-id-fetch-error-retry',
new AbortController().signal,
+ LlmRole.MAIN,
);
const events: StreamEvent[] = [];
+
for await (const event of stream) {
events.push(event);
}
@@ -1556,6 +1590,7 @@ describe('GeminiChat', () => {
'Second question',
'prompt-id-retry-existing',
new AbortController().signal,
+ LlmRole.MAIN,
);
for await (const _ of stream) {
// consume stream
@@ -1628,6 +1663,7 @@ describe('GeminiChat', () => {
'test empty stream',
'prompt-id-empty-stream',
new AbortController().signal,
+ LlmRole.MAIN,
);
const chunks: StreamEvent[] = [];
for await (const chunk of stream) {
@@ -1709,6 +1745,7 @@ describe('GeminiChat', () => {
'first',
'prompt-1',
new AbortController().signal,
+ LlmRole.MAIN,
);
const firstStreamIterator = firstStream[Symbol.asyncIterator]();
await firstStreamIterator.next();
@@ -1719,6 +1756,7 @@ describe('GeminiChat', () => {
'second',
'prompt-2',
new AbortController().signal,
+ LlmRole.MAIN,
);
// 5. Assert that only one API call has been made so far.
@@ -1824,6 +1862,7 @@ describe('GeminiChat', () => {
'trigger 429',
'prompt-id-fb1',
new AbortController().signal,
+ LlmRole.MAIN,
);
// Consume stream to trigger logic
@@ -1890,6 +1929,7 @@ describe('GeminiChat', () => {
'test message',
'prompt-id-discard-test',
new AbortController().signal,
+ LlmRole.MAIN,
);
const events: StreamEvent[] = [];
for await (const event of stream) {
@@ -2106,6 +2146,7 @@ describe('GeminiChat', () => {
'test',
'prompt-healthy',
new AbortController().signal,
+ LlmRole.MAIN,
);
for await (const _ of stream) {
// consume
@@ -2141,6 +2182,7 @@ describe('GeminiChat', () => {
'test',
'prompt-sticky-once',
new AbortController().signal,
+ LlmRole.MAIN,
);
for await (const _ of stream) {
// consume
@@ -2191,6 +2233,7 @@ describe('GeminiChat', () => {
'test',
'prompt-fallback-arg',
new AbortController().signal,
+ LlmRole.MAIN,
);
for await (const _ of stream) {
// consume
@@ -2269,6 +2312,7 @@ describe('GeminiChat', () => {
'test',
'prompt-config-refresh',
new AbortController().signal,
+ LlmRole.MAIN,
);
// Consume to drive both attempts
for await (const _ of stream) {
@@ -2281,9 +2325,12 @@ describe('GeminiChat', () => {
1,
expect.objectContaining({
model: 'model-a',
- config: expect.objectContaining({ temperature: 0.1 }),
+ config: expect.objectContaining({
+ temperature: 0.1,
+ }),
}),
expect.any(String),
+ LlmRole.MAIN,
);
expect(
mockContentGenerator.generateContentStream,
@@ -2291,9 +2338,12 @@ describe('GeminiChat', () => {
2,
expect.objectContaining({
model: 'model-b',
- config: expect.objectContaining({ temperature: 0.9 }),
+ config: expect.objectContaining({
+ temperature: 0.9,
+ }),
}),
expect.any(String),
+ LlmRole.MAIN,
);
});
});
@@ -2323,6 +2373,7 @@ describe('GeminiChat', () => {
'test',
'prompt-id',
new AbortController().signal,
+ LlmRole.MAIN,
);
const events: StreamEvent[] = [];
@@ -2353,6 +2404,7 @@ describe('GeminiChat', () => {
'test',
'prompt-id',
new AbortController().signal,
+ LlmRole.MAIN,
);
const events: StreamEvent[] = [];
@@ -2392,6 +2444,7 @@ describe('GeminiChat', () => {
'test',
'prompt-id',
new AbortController().signal,
+ LlmRole.MAIN,
);
const events: StreamEvent[] = [];
@@ -2428,6 +2481,7 @@ describe('GeminiChat', () => {
'test',
'prompt-id',
new AbortController().signal,
+ LlmRole.MAIN,
);
const events: StreamEvent[] = [];
diff --git a/packages/core/src/core/geminiChat.ts b/packages/core/src/core/geminiChat.ts
index 7057d8d210..6b1ede738c 100644
--- a/packages/core/src/core/geminiChat.ts
+++ b/packages/core/src/core/geminiChat.ts
@@ -55,6 +55,7 @@ import {
createAvailabilityContextProvider,
} from '../availability/policyHelpers.js';
import { coreEvents } from '../utils/events.js';
+import type { LlmRole } from '../telemetry/types.js';
export enum StreamEventType {
/** A regular content chunk from the API. */
@@ -292,6 +293,7 @@ export class GeminiChat {
message: PartListUnion,
prompt_id: string,
signal: AbortSignal,
+ role: LlmRole,
displayContent?: PartListUnion,
): Promise> {
await this.sendPromise;
@@ -362,6 +364,7 @@ export class GeminiChat {
requestContents,
prompt_id,
signal,
+ role,
);
isConnectionPhase = false;
for await (const chunk of stream) {
@@ -467,6 +470,7 @@ export class GeminiChat {
requestContents: Content[],
prompt_id: string,
abortSignal: AbortSignal,
+ role: LlmRole,
): Promise> {
const contentsForPreviewModel =
this.ensureActiveLoopHasThoughtSignatures(requestContents);
@@ -599,6 +603,7 @@ export class GeminiChat {
config,
},
prompt_id,
+ role,
);
};
diff --git a/packages/core/src/core/geminiChat_network_retry.test.ts b/packages/core/src/core/geminiChat_network_retry.test.ts
index 07561fed36..519ef3ee14 100644
--- a/packages/core/src/core/geminiChat_network_retry.test.ts
+++ b/packages/core/src/core/geminiChat_network_retry.test.ts
@@ -14,6 +14,7 @@ import { setSimulate429 } from '../utils/testUtils.js';
import { HookSystem } from '../hooks/hookSystem.js';
import { createMockMessageBus } from '../test-utils/mock-message-bus.js';
import { createAvailabilityServiceMock } from '../availability/testUtils.js';
+import { LlmRole } from '../telemetry/types.js';
// Mock fs module
vi.mock('node:fs', async (importOriginal) => {
@@ -154,6 +155,7 @@ describe('GeminiChat Network Retries', () => {
'test message',
'prompt-id-retry-network',
new AbortController().signal,
+ LlmRole.MAIN,
);
const events: StreamEvent[] = [];
@@ -223,6 +225,7 @@ describe('GeminiChat Network Retries', () => {
'test message',
'prompt-id-retry-fetch',
new AbortController().signal,
+ LlmRole.MAIN,
);
const events: StreamEvent[] = [];
@@ -263,6 +266,7 @@ describe('GeminiChat Network Retries', () => {
'test message',
'prompt-id-no-retry',
new AbortController().signal,
+ LlmRole.MAIN,
);
await expect(async () => {
@@ -304,6 +308,7 @@ describe('GeminiChat Network Retries', () => {
'test message',
'prompt-id-ssl-retry',
new AbortController().signal,
+ LlmRole.MAIN,
);
const events: StreamEvent[] = [];
@@ -353,6 +358,7 @@ describe('GeminiChat Network Retries', () => {
'test message',
'prompt-id-connection-retry',
new AbortController().signal,
+ LlmRole.MAIN,
);
const events: StreamEvent[] = [];
@@ -384,6 +390,7 @@ describe('GeminiChat Network Retries', () => {
'test message',
'prompt-id-no-connection-retry',
new AbortController().signal,
+ LlmRole.MAIN,
);
await expect(async () => {
@@ -438,6 +445,7 @@ describe('GeminiChat Network Retries', () => {
'test message',
'prompt-id-ssl-mid-stream',
new AbortController().signal,
+ LlmRole.MAIN,
);
const events: StreamEvent[] = [];
diff --git a/packages/core/src/core/loggingContentGenerator.test.ts b/packages/core/src/core/loggingContentGenerator.test.ts
index fafeb5d1d2..dd354fa16f 100644
--- a/packages/core/src/core/loggingContentGenerator.test.ts
+++ b/packages/core/src/core/loggingContentGenerator.test.ts
@@ -30,8 +30,8 @@ import type {
import type { ContentGenerator } from './contentGenerator.js';
import { LoggingContentGenerator } from './loggingContentGenerator.js';
import type { Config } from '../config/config.js';
-import { ApiRequestEvent } from '../telemetry/types.js';
import { UserTierId } from '../code_assist/types.js';
+import { ApiRequestEvent, LlmRole } from '../telemetry/types.js';
describe('LoggingContentGenerator', () => {
let wrapped: ContentGenerator;
@@ -89,13 +89,18 @@ describe('LoggingContentGenerator', () => {
const promise = loggingContentGenerator.generateContent(
req,
userPromptId,
+ LlmRole.MAIN,
);
vi.advanceTimersByTime(1000);
await promise;
- expect(wrapped.generateContent).toHaveBeenCalledWith(req, userPromptId);
+ expect(wrapped.generateContent).toHaveBeenCalledWith(
+ req,
+ userPromptId,
+ LlmRole.MAIN,
+ );
expect(logApiRequest).toHaveBeenCalledWith(
config,
expect.any(ApiRequestEvent),
@@ -118,6 +123,7 @@ describe('LoggingContentGenerator', () => {
const promise = loggingContentGenerator.generateContent(
req,
userPromptId,
+ LlmRole.MAIN,
);
vi.advanceTimersByTime(1000);
@@ -156,12 +162,17 @@ describe('LoggingContentGenerator', () => {
vi.mocked(wrapped.generateContentStream).mockResolvedValue(
createAsyncGenerator(),
);
+
const startTime = new Date('2025-01-01T00:00:00.000Z');
+
vi.setSystemTime(startTime);
const stream = await loggingContentGenerator.generateContentStream(
req,
+
userPromptId,
+
+ LlmRole.MAIN,
);
vi.advanceTimersByTime(1000);
@@ -173,6 +184,7 @@ describe('LoggingContentGenerator', () => {
expect(wrapped.generateContentStream).toHaveBeenCalledWith(
req,
userPromptId,
+ LlmRole.MAIN,
);
expect(logApiRequest).toHaveBeenCalledWith(
config,
@@ -203,6 +215,7 @@ describe('LoggingContentGenerator', () => {
const stream = await loggingContentGenerator.generateContentStream(
req,
userPromptId,
+ LlmRole.MAIN,
);
vi.advanceTimersByTime(1000);
@@ -240,6 +253,7 @@ describe('LoggingContentGenerator', () => {
await loggingContentGenerator.generateContentStream(
req,
mainAgentPromptId,
+ LlmRole.MAIN,
);
expect(config.setLatestApiRequest).toHaveBeenCalledWith(req);
@@ -264,6 +278,7 @@ describe('LoggingContentGenerator', () => {
await loggingContentGenerator.generateContentStream(
req,
subAgentPromptId,
+ LlmRole.SUBAGENT,
);
expect(config.setLatestApiRequest).not.toHaveBeenCalled();
diff --git a/packages/core/src/core/loggingContentGenerator.ts b/packages/core/src/core/loggingContentGenerator.ts
index f8d22934ed..12a1722475 100644
--- a/packages/core/src/core/loggingContentGenerator.ts
+++ b/packages/core/src/core/loggingContentGenerator.ts
@@ -22,6 +22,7 @@ import {
ApiResponseEvent,
ApiErrorEvent,
} from '../telemetry/types.js';
+import type { LlmRole } from '../telemetry/llmRole.js';
import type { Config } from '../config/config.js';
import type { UserTierId } from '../code_assist/types.js';
import {
@@ -65,6 +66,7 @@ export class LoggingContentGenerator implements ContentGenerator {
contents: Content[],
model: string,
promptId: string,
+ role: LlmRole,
generationConfig?: GenerateContentConfig,
serverDetails?: ServerDetails,
): void {
@@ -80,6 +82,7 @@ export class LoggingContentGenerator implements ContentGenerator {
server: serverDetails,
},
requestText,
+ role,
),
);
}
@@ -122,6 +125,7 @@ export class LoggingContentGenerator implements ContentGenerator {
durationMs: number,
model: string,
prompt_id: string,
+ role: LlmRole,
responseId: string | undefined,
responseCandidates?: Candidate[],
usageMetadata?: GenerateContentResponseUsageMetadata,
@@ -147,6 +151,7 @@ export class LoggingContentGenerator implements ContentGenerator {
this.config.getContentGeneratorConfig()?.authType,
usageMetadata,
responseText,
+ role,
),
);
}
@@ -157,6 +162,7 @@ export class LoggingContentGenerator implements ContentGenerator {
model: string,
prompt_id: string,
requestContents: Content[],
+ role: LlmRole,
generationConfig?: GenerateContentConfig,
serverDetails?: ServerDetails,
): void {
@@ -181,6 +187,7 @@ export class LoggingContentGenerator implements ContentGenerator {
? // eslint-disable-next-line @typescript-eslint/no-unsafe-type-assertion
(error as StructuredError).status
: undefined,
+ role,
),
);
}
@@ -188,6 +195,7 @@ export class LoggingContentGenerator implements ContentGenerator {
async generateContent(
req: GenerateContentParameters,
userPromptId: string,
+ role: LlmRole,
): Promise {
return runInDevTraceSpan(
{
@@ -203,6 +211,7 @@ export class LoggingContentGenerator implements ContentGenerator {
contents,
req.model,
userPromptId,
+ role,
req.config,
serverDetails,
);
@@ -211,6 +220,7 @@ export class LoggingContentGenerator implements ContentGenerator {
const response = await this.wrapped.generateContent(
req,
userPromptId,
+ role,
);
spanMetadata.output = {
response,
@@ -222,6 +232,7 @@ export class LoggingContentGenerator implements ContentGenerator {
durationMs,
response.modelVersion || req.model,
userPromptId,
+ role,
response.responseId,
response.candidates,
response.usageMetadata,
@@ -247,6 +258,7 @@ export class LoggingContentGenerator implements ContentGenerator {
req.model,
userPromptId,
contents,
+ role,
req.config,
serverDetails,
);
@@ -259,6 +271,7 @@ export class LoggingContentGenerator implements ContentGenerator {
async generateContentStream(
req: GenerateContentParameters,
userPromptId: string,
+ role: LlmRole,
): Promise> {
return runInDevTraceSpan(
{
@@ -283,13 +296,18 @@ export class LoggingContentGenerator implements ContentGenerator {
toContents(req.contents),
req.model,
userPromptId,
+ role,
req.config,
serverDetails,
);
let stream: AsyncGenerator;
try {
- stream = await this.wrapped.generateContentStream(req, userPromptId);
+ stream = await this.wrapped.generateContentStream(
+ req,
+ userPromptId,
+ role,
+ );
} catch (error) {
const durationMs = Date.now() - startTime;
this._logApiError(
@@ -298,6 +316,7 @@ export class LoggingContentGenerator implements ContentGenerator {
req.model,
userPromptId,
toContents(req.contents),
+ role,
req.config,
serverDetails,
);
@@ -309,6 +328,7 @@ export class LoggingContentGenerator implements ContentGenerator {
stream,
startTime,
userPromptId,
+ role,
spanMetadata,
endSpan,
);
@@ -321,6 +341,7 @@ export class LoggingContentGenerator implements ContentGenerator {
stream: AsyncGenerator,
startTime: number,
userPromptId: string,
+ role: LlmRole,
spanMetadata: SpanMetadata,
endSpan: () => void,
): AsyncGenerator {
@@ -344,6 +365,7 @@ export class LoggingContentGenerator implements ContentGenerator {
durationMs,
responses[0]?.modelVersion || req.model,
userPromptId,
+ role,
responses[0]?.responseId,
responses.flatMap((response) => response.candidates || []),
lastUsageMetadata,
@@ -378,6 +400,7 @@ export class LoggingContentGenerator implements ContentGenerator {
responses[0]?.modelVersion || req.model,
userPromptId,
requestContents,
+ role,
req.config,
serverDetails,
);
diff --git a/packages/core/src/core/recordingContentGenerator.test.ts b/packages/core/src/core/recordingContentGenerator.test.ts
index c69c62ebfa..cbdb239ecf 100644
--- a/packages/core/src/core/recordingContentGenerator.test.ts
+++ b/packages/core/src/core/recordingContentGenerator.test.ts
@@ -18,6 +18,7 @@ import { describe, it, expect, vi, beforeEach, type Mock } from 'vitest';
import { safeJsonStringify } from '../utils/safeJsonStringify.js';
import type { ContentGenerator } from './contentGenerator.js';
import { RecordingContentGenerator } from './recordingContentGenerator.js';
+import { LlmRole } from '../telemetry/types.js';
vi.mock('node:fs', () => ({
appendFileSync: vi.fn(),
@@ -51,9 +52,14 @@ describe('RecordingContentGenerator', () => {
const response = await recorder.generateContent(
{} as GenerateContentParameters,
'id1',
+ LlmRole.MAIN,
);
expect(response).toEqual(mockResponse);
- expect(mockRealGenerator.generateContent).toHaveBeenCalledWith({}, 'id1');
+ expect(mockRealGenerator.generateContent).toHaveBeenCalledWith(
+ {},
+ 'id1',
+ LlmRole.MAIN,
+ );
expect(appendFileSync).toHaveBeenCalledWith(
filePath,
@@ -90,6 +96,7 @@ describe('RecordingContentGenerator', () => {
const stream = await recorder.generateContentStream(
{} as GenerateContentParameters,
'id1',
+ LlmRole.MAIN,
);
const responses = [];
for await (const response of stream) {
@@ -100,6 +107,7 @@ describe('RecordingContentGenerator', () => {
expect(mockRealGenerator.generateContentStream).toHaveBeenCalledWith(
{},
'id1',
+ LlmRole.MAIN,
);
expect(appendFileSync).toHaveBeenCalledWith(
diff --git a/packages/core/src/core/recordingContentGenerator.ts b/packages/core/src/core/recordingContentGenerator.ts
index 71d783a9d2..f2193bb16d 100644
--- a/packages/core/src/core/recordingContentGenerator.ts
+++ b/packages/core/src/core/recordingContentGenerator.ts
@@ -17,6 +17,7 @@ import type { ContentGenerator } from './contentGenerator.js';
import type { FakeResponse } from './fakeContentGenerator.js';
import type { UserTierId } from '../code_assist/types.js';
import { safeJsonStringify } from '../utils/safeJsonStringify.js';
+import type { LlmRole } from '../telemetry/types.js';
// A ContentGenerator that wraps another content generator and records all the
// responses, with the ability to write them out to a file. These files are
@@ -41,10 +42,12 @@ export class RecordingContentGenerator implements ContentGenerator {
async generateContent(
request: GenerateContentParameters,
userPromptId: string,
+ role: LlmRole,
): Promise {
const response = await this.realGenerator.generateContent(
request,
userPromptId,
+ role,
);
const recordedResponse: FakeResponse = {
method: 'generateContent',
@@ -61,6 +64,7 @@ export class RecordingContentGenerator implements ContentGenerator {
async generateContentStream(
request: GenerateContentParameters,
userPromptId: string,
+ role: LlmRole,
): Promise> {
const recordedResponse: FakeResponse = {
method: 'generateContentStream',
@@ -70,6 +74,7 @@ export class RecordingContentGenerator implements ContentGenerator {
const realResponses = await this.realGenerator.generateContentStream(
request,
userPromptId,
+ role,
);
async function* stream(filePath: string) {
diff --git a/packages/core/src/core/turn.test.ts b/packages/core/src/core/turn.test.ts
index 0fc96b444f..94a713c3b7 100644
--- a/packages/core/src/core/turn.test.ts
+++ b/packages/core/src/core/turn.test.ts
@@ -14,6 +14,7 @@ import type { GenerateContentResponse, Part, Content } from '@google/genai';
import { reportError } from '../utils/errorReporting.js';
import type { GeminiChat } from './geminiChat.js';
import { InvalidStreamError, StreamEventType } from './geminiChat.js';
+import { LlmRole } from '../telemetry/types.js';
const mockSendMessageStream = vi.fn();
const mockGetHistory = vi.fn();
@@ -102,6 +103,7 @@ describe('Turn', () => {
reqParts,
'prompt-id-1',
expect.any(AbortSignal),
+ LlmRole.MAIN,
undefined,
);
diff --git a/packages/core/src/core/turn.ts b/packages/core/src/core/turn.ts
index a0f5fbd7bf..f31050dd83 100644
--- a/packages/core/src/core/turn.ts
+++ b/packages/core/src/core/turn.ts
@@ -29,6 +29,7 @@ import { parseThought, type ThoughtSummary } from '../utils/thoughtUtils.js';
import { createUserContent } from '@google/genai';
import type { ModelConfigKey } from '../services/modelConfigService.js';
import { getCitations } from '../utils/generateContentResponseUtilities.js';
+import { LlmRole } from '../telemetry/types.js';
import {
type ToolCallRequestInfo,
@@ -251,6 +252,7 @@ export class Turn {
req: PartListUnion,
signal: AbortSignal,
displayContent?: PartListUnion,
+ role: LlmRole = LlmRole.MAIN,
): AsyncGenerator {
try {
// Note: This assumes `sendMessageStream` yields events like
@@ -260,6 +262,7 @@ export class Turn {
req,
this.prompt_id,
signal,
+ role,
displayContent,
);
diff --git a/packages/core/src/output/json-formatter.test.ts b/packages/core/src/output/json-formatter.test.ts
index 14d2cb47c4..13321fae77 100644
--- a/packages/core/src/output/json-formatter.test.ts
+++ b/packages/core/src/output/json-formatter.test.ts
@@ -79,6 +79,7 @@ describe('JsonFormatter', () => {
thoughts: 103,
tool: 0,
},
+ roles: {},
},
'gemini-2.5-flash': {
api: {
@@ -95,6 +96,7 @@ describe('JsonFormatter', () => {
thoughts: 138,
tool: 0,
},
+ roles: {},
},
},
tools: {
diff --git a/packages/core/src/output/stream-json-formatter.test.ts b/packages/core/src/output/stream-json-formatter.test.ts
index 557b72a0a9..69dbaac23b 100644
--- a/packages/core/src/output/stream-json-formatter.test.ts
+++ b/packages/core/src/output/stream-json-formatter.test.ts
@@ -289,6 +289,7 @@ describe('StreamJsonFormatter', () => {
thoughts: 0,
tool: 0,
},
+ roles: {},
};
metrics.tools.totalCalls = 2;
metrics.tools.totalDecisions[ToolCallDecision.AUTO_ACCEPT] = 2;
@@ -319,6 +320,7 @@ describe('StreamJsonFormatter', () => {
thoughts: 0,
tool: 0,
},
+ roles: {},
};
metrics.models['gemini-ultra'] = {
api: { totalRequests: 1, totalErrors: 0, totalLatencyMs: 2000 },
@@ -331,6 +333,7 @@ describe('StreamJsonFormatter', () => {
thoughts: 0,
tool: 0,
},
+ roles: {},
};
metrics.tools.totalCalls = 5;
@@ -360,6 +363,7 @@ describe('StreamJsonFormatter', () => {
thoughts: 0,
tool: 0,
},
+ roles: {},
};
const result = formatter.convertToStreamStats(metrics, 1200);
diff --git a/packages/core/src/routing/strategies/classifierStrategy.ts b/packages/core/src/routing/strategies/classifierStrategy.ts
index b21bb5e471..980e89829d 100644
--- a/packages/core/src/routing/strategies/classifierStrategy.ts
+++ b/packages/core/src/routing/strategies/classifierStrategy.ts
@@ -20,6 +20,7 @@ import {
isFunctionResponse,
} from '../../utils/messageInspectors.js';
import { debugLogger } from '../../utils/debugLogger.js';
+import { LlmRole } from '../../telemetry/types.js';
// The number of recent history turns to provide to the router for context.
const HISTORY_TURNS_FOR_CONTEXT = 4;
@@ -161,6 +162,7 @@ export class ClassifierStrategy implements RoutingStrategy {
systemInstruction: CLASSIFIER_SYSTEM_PROMPT,
abortSignal: context.signal,
promptId,
+ role: LlmRole.UTILITY_ROUTER,
});
const routerResponse = ClassifierResponseSchema.parse(jsonResponse);
diff --git a/packages/core/src/routing/strategies/numericalClassifierStrategy.ts b/packages/core/src/routing/strategies/numericalClassifierStrategy.ts
index 5c31fa3057..d4ddf99b8d 100644
--- a/packages/core/src/routing/strategies/numericalClassifierStrategy.ts
+++ b/packages/core/src/routing/strategies/numericalClassifierStrategy.ts
@@ -16,6 +16,7 @@ import { resolveClassifierModel, isGemini3Model } from '../../config/models.js';
import { createUserContent, Type } from '@google/genai';
import type { Config } from '../../config/config.js';
import { debugLogger } from '../../utils/debugLogger.js';
+import { LlmRole } from '../../telemetry/types.js';
// The number of recent history turns to provide to the router for context.
const HISTORY_TURNS_FOR_CONTEXT = 8;
@@ -169,6 +170,7 @@ export class NumericalClassifierStrategy implements RoutingStrategy {
systemInstruction: CLASSIFIER_SYSTEM_PROMPT,
abortSignal: context.signal,
promptId,
+ role: LlmRole.UTILITY_ROUTER,
});
const routerResponse = ClassifierResponseSchema.parse(jsonResponse);
diff --git a/packages/core/src/services/chatCompressionService.ts b/packages/core/src/services/chatCompressionService.ts
index 90101052d9..6f5366aad5 100644
--- a/packages/core/src/services/chatCompressionService.ts
+++ b/packages/core/src/services/chatCompressionService.ts
@@ -31,6 +31,7 @@ import {
PREVIEW_GEMINI_FLASH_MODEL,
} from '../config/models.js';
import { PreCompressTrigger } from '../hooks/types.js';
+import { LlmRole } from '../telemetry/types.js';
/**
* Default threshold for compression token count as a fraction of the model's
@@ -339,6 +340,7 @@ export class ChatCompressionService {
promptId,
// TODO(joshualitt): wire up a sensible abort signal,
abortSignal: abortSignal ?? new AbortController().signal,
+ role: LlmRole.UTILITY_COMPRESSOR,
});
const summary = getResponseText(summaryResponse) ?? '';
@@ -365,6 +367,7 @@ export class ChatCompressionService {
],
systemInstruction: { text: getCompressionPrompt(config) },
promptId: `${promptId}-verify`,
+ role: LlmRole.UTILITY_COMPRESSOR,
abortSignal: abortSignal ?? new AbortController().signal,
});
diff --git a/packages/core/src/services/loopDetectionService.ts b/packages/core/src/services/loopDetectionService.ts
index 2e4a73cf03..8ae2b77898 100644
--- a/packages/core/src/services/loopDetectionService.ts
+++ b/packages/core/src/services/loopDetectionService.ts
@@ -25,6 +25,7 @@ import {
isFunctionResponse,
} from '../utils/messageInspectors.js';
import { debugLogger } from '../utils/debugLogger.js';
+import { LlmRole } from '../telemetry/types.js';
const TOOL_CALL_LOOP_THRESHOLD = 5;
const CONTENT_LOOP_THRESHOLD = 10;
@@ -554,6 +555,7 @@ export class LoopDetectionService {
abortSignal: signal,
promptId: this.promptId,
maxAttempts: 2,
+ role: LlmRole.UTILITY_LOOP_DETECTOR,
});
if (
diff --git a/packages/core/src/services/sessionSummaryService.ts b/packages/core/src/services/sessionSummaryService.ts
index 98ffd66fca..09c60a2e31 100644
--- a/packages/core/src/services/sessionSummaryService.ts
+++ b/packages/core/src/services/sessionSummaryService.ts
@@ -10,6 +10,7 @@ import { partListUnionToString } from '../core/geminiRequest.js';
import { debugLogger } from '../utils/debugLogger.js';
import type { Content } from '@google/genai';
import { getResponseText } from '../utils/partUtils.js';
+import { LlmRole } from '../telemetry/types.js';
const DEFAULT_MAX_MESSAGES = 20;
const DEFAULT_TIMEOUT_MS = 5000;
@@ -124,6 +125,7 @@ export class SessionSummaryService {
contents,
abortSignal: abortController.signal,
promptId: 'session-summary-generation',
+ role: LlmRole.UTILITY_SUMMARIZER,
});
const summary = getResponseText(response);
diff --git a/packages/core/src/telemetry/index.ts b/packages/core/src/telemetry/index.ts
index ee2cf3d41e..2b09fde334 100644
--- a/packages/core/src/telemetry/index.ts
+++ b/packages/core/src/telemetry/index.ts
@@ -65,6 +65,7 @@ export {
ToolCallDecision,
RewindEvent,
} from './types.js';
+export { LlmRole } from './llmRole.js';
export { makeSlashCommandEvent, makeChatCompressionEvent } from './types.js';
export type { TelemetryEvent } from './types.js';
export { SpanStatusCode, ValueType } from '@opentelemetry/api';
diff --git a/packages/core/src/telemetry/llmRole.ts b/packages/core/src/telemetry/llmRole.ts
new file mode 100644
index 0000000000..47e68a8442
--- /dev/null
+++ b/packages/core/src/telemetry/llmRole.ts
@@ -0,0 +1,18 @@
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+export enum LlmRole {
+ MAIN = 'main',
+ SUBAGENT = 'subagent',
+ UTILITY_TOOL = 'utility_tool',
+ UTILITY_COMPRESSOR = 'utility_compressor',
+ UTILITY_SUMMARIZER = 'utility_summarizer',
+ UTILITY_ROUTER = 'utility_router',
+ UTILITY_LOOP_DETECTOR = 'utility_loop_detector',
+ UTILITY_NEXT_SPEAKER = 'utility_next_speaker',
+ UTILITY_EDIT_CORRECTOR = 'utility_edit_corrector',
+ UTILITY_AUTOCOMPLETE = 'utility_autocomplete',
+}
diff --git a/packages/core/src/telemetry/loggers.test.ts b/packages/core/src/telemetry/loggers.test.ts
index 39b884148e..316cf0b33f 100644
--- a/packages/core/src/telemetry/loggers.test.ts
+++ b/packages/core/src/telemetry/loggers.test.ts
@@ -93,6 +93,7 @@ import {
EVENT_EXTENSION_UPDATE,
HookCallEvent,
EVENT_HOOK_CALL,
+ LlmRole,
} from './types.js';
import * as metrics from './metrics.js';
import { FileOperation } from './metrics.js';
@@ -520,6 +521,30 @@ describe('loggers', () => {
'event.timestamp': '2025-01-01T00:00:00.000Z',
});
});
+
+ it('should log an API response with a role', () => {
+ const event = new ApiResponseEvent(
+ 'test-model',
+ 100,
+ { prompt_id: 'prompt-id-role', contents: [] },
+ { candidates: [] },
+ AuthType.LOGIN_WITH_GOOGLE,
+ {},
+ 'test-response',
+ LlmRole.SUBAGENT,
+ );
+
+ logApiResponse(mockConfig, event);
+
+ expect(mockLogger.emit).toHaveBeenCalledWith({
+ body: 'API response from test-model. Status: 200. Duration: 100ms.',
+ attributes: expect.objectContaining({
+ 'event.name': EVENT_API_RESPONSE,
+ prompt_id: 'prompt-id-role',
+ role: 'subagent',
+ }),
+ });
+ });
});
describe('logApiError', () => {
@@ -654,6 +679,30 @@ describe('loggers', () => {
'event.timestamp': '2025-01-01T00:00:00.000Z',
});
});
+
+ it('should log an API error with a role', () => {
+ const event = new ApiErrorEvent(
+ 'test-model',
+ 'error',
+ 100,
+ { prompt_id: 'prompt-id-role', contents: [] },
+ AuthType.LOGIN_WITH_GOOGLE,
+ 'ApiError',
+ 503,
+ LlmRole.SUBAGENT,
+ );
+
+ logApiError(mockConfig, event);
+
+ expect(mockLogger.emit).toHaveBeenCalledWith({
+ body: 'API error for test-model. Error: error. Duration: 100ms.',
+ attributes: expect.objectContaining({
+ 'event.name': EVENT_API_ERROR,
+ prompt_id: 'prompt-id-role',
+ role: 'subagent',
+ }),
+ });
+ });
});
describe('logApiRequest', () => {
@@ -917,6 +966,26 @@ describe('loggers', () => {
}),
});
});
+
+ it('should log an API request with a role', () => {
+ const event = new ApiRequestEvent(
+ 'test-model',
+ { prompt_id: 'prompt-id-role', contents: [] },
+ 'request text',
+ LlmRole.SUBAGENT,
+ );
+
+ logApiRequest(mockConfig, event);
+
+ expect(mockLogger.emit).toHaveBeenCalledWith({
+ body: 'API request to test-model.',
+ attributes: expect.objectContaining({
+ 'event.name': EVENT_API_REQUEST,
+ prompt_id: 'prompt-id-role',
+ role: 'subagent',
+ }),
+ });
+ });
});
describe('logFlashFallback', () => {
diff --git a/packages/core/src/telemetry/types.ts b/packages/core/src/telemetry/types.ts
index 54cca4f61f..497ff97469 100644
--- a/packages/core/src/telemetry/types.ts
+++ b/packages/core/src/telemetry/types.ts
@@ -41,6 +41,8 @@ import {
} from './semantic.js';
import { sanitizeHookName } from './sanitize.js';
import { getFileDiffFromResultDisplay } from '../utils/fileDiffUtils.js';
+import { LlmRole } from './llmRole.js';
+export { LlmRole };
export interface BaseTelemetryEvent {
'event.name': string;
@@ -375,17 +377,20 @@ export class ApiRequestEvent implements BaseTelemetryEvent {
model: string;
prompt: GenAIPromptDetails;
request_text?: string;
+ role?: LlmRole;
constructor(
model: string,
prompt_details: GenAIPromptDetails,
request_text?: string,
+ role?: LlmRole,
) {
this['event.name'] = 'api_request';
this['event.timestamp'] = new Date().toISOString();
this.model = model;
this.prompt = prompt_details;
this.request_text = request_text;
+ this.role = role;
}
toLogRecord(config: Config): LogRecord {
@@ -397,6 +402,9 @@ export class ApiRequestEvent implements BaseTelemetryEvent {
prompt_id: this.prompt.prompt_id,
request_text: this.request_text,
};
+ if (this.role) {
+ attributes['role'] = this.role;
+ }
return { body: `API request to ${this.model}.`, attributes };
}
@@ -445,6 +453,7 @@ export class ApiErrorEvent implements BaseTelemetryEvent {
status_code?: number | string;
duration_ms: number;
auth_type?: string;
+ role?: LlmRole;
constructor(
model: string,
@@ -454,6 +463,7 @@ export class ApiErrorEvent implements BaseTelemetryEvent {
auth_type?: string,
error_type?: string,
status_code?: number | string,
+ role?: LlmRole,
) {
this['event.name'] = 'api_error';
this['event.timestamp'] = new Date().toISOString();
@@ -464,6 +474,7 @@ export class ApiErrorEvent implements BaseTelemetryEvent {
this.duration_ms = duration_ms;
this.prompt = prompt_details;
this.auth_type = auth_type;
+ this.role = role;
}
toLogRecord(config: Config): LogRecord {
@@ -482,6 +493,10 @@ export class ApiErrorEvent implements BaseTelemetryEvent {
auth_type: this.auth_type,
};
+ if (this.role) {
+ attributes['role'] = this.role;
+ }
+
if (this.error_type) {
attributes['error.type'] = this.error_type;
}
@@ -590,6 +605,7 @@ export class ApiResponseEvent implements BaseTelemetryEvent {
response: GenAIResponseDetails;
usage: GenAIUsageDetails;
finish_reasons: OTelFinishReason[];
+ role?: LlmRole;
constructor(
model: string,
@@ -599,6 +615,7 @@ export class ApiResponseEvent implements BaseTelemetryEvent {
auth_type?: string,
usage_data?: GenerateContentResponseUsageMetadata,
response_text?: string,
+ role?: LlmRole,
) {
this['event.name'] = 'api_response';
this['event.timestamp'] = new Date().toISOString();
@@ -619,6 +636,7 @@ export class ApiResponseEvent implements BaseTelemetryEvent {
total_token_count: usage_data?.totalTokenCount ?? 0,
};
this.finish_reasons = toFinishReasons(this.response.candidates);
+ this.role = role;
}
toLogRecord(config: Config): LogRecord {
@@ -639,6 +657,9 @@ export class ApiResponseEvent implements BaseTelemetryEvent {
status_code: this.status_code,
finish_reasons: this.finish_reasons,
};
+ if (this.role) {
+ attributes['role'] = this.role;
+ }
if (this.response_text) {
attributes['response_text'] = this.response_text;
}
diff --git a/packages/core/src/telemetry/uiTelemetry.test.ts b/packages/core/src/telemetry/uiTelemetry.test.ts
index 825852f507..52f0911730 100644
--- a/packages/core/src/telemetry/uiTelemetry.test.ts
+++ b/packages/core/src/telemetry/uiTelemetry.test.ts
@@ -181,6 +181,7 @@ describe('UiTelemetryService', () => {
thoughts: 2,
tool: 3,
},
+ roles: {},
});
expect(service.getLastPromptTokenCount()).toBe(0);
});
@@ -236,6 +237,7 @@ describe('UiTelemetryService', () => {
thoughts: 6,
tool: 9,
},
+ roles: {},
});
expect(service.getLastPromptTokenCount()).toBe(0);
});
@@ -311,6 +313,7 @@ describe('UiTelemetryService', () => {
thoughts: 0,
tool: 0,
},
+ roles: {},
});
});
@@ -356,6 +359,35 @@ describe('UiTelemetryService', () => {
thoughts: 2,
tool: 3,
},
+ roles: {},
+ });
+ });
+
+ it('should update role metrics when processing an ApiErrorEvent with a role', () => {
+ const event = {
+ 'event.name': EVENT_API_ERROR,
+ model: 'gemini-2.5-pro',
+ duration_ms: 300,
+ error: 'Something went wrong',
+ role: 'utility_tool',
+ } as unknown as ApiErrorEvent & { 'event.name': typeof EVENT_API_ERROR };
+
+ service.addEvent(event);
+
+ const metrics = service.getMetrics();
+ expect(metrics.models['gemini-2.5-pro'].roles['utility_tool']).toEqual({
+ totalRequests: 1,
+ totalErrors: 1,
+ totalLatencyMs: 300,
+ tokens: {
+ input: 0,
+ prompt: 0,
+ candidates: 0,
+ total: 0,
+ cached: 0,
+ thoughts: 0,
+ tool: 0,
+ },
});
});
});
diff --git a/packages/core/src/telemetry/uiTelemetry.ts b/packages/core/src/telemetry/uiTelemetry.ts
index 6caf2a8606..8c9f2adb83 100644
--- a/packages/core/src/telemetry/uiTelemetry.ts
+++ b/packages/core/src/telemetry/uiTelemetry.ts
@@ -18,6 +18,8 @@ import type {
ToolCallEvent,
} from './types.js';
+import type { LlmRole } from './types.js';
+
export type UiEvent =
| (ApiResponseEvent & { 'event.name': typeof EVENT_API_RESPONSE })
| (ApiErrorEvent & { 'event.name': typeof EVENT_API_ERROR })
@@ -36,6 +38,21 @@ export interface ToolCallStats {
};
}
+export interface RoleMetrics {
+ totalRequests: number;
+ totalErrors: number;
+ totalLatencyMs: number;
+ tokens: {
+ input: number;
+ prompt: number;
+ candidates: number;
+ total: number;
+ cached: number;
+ thoughts: number;
+ tool: number;
+ };
+}
+
export interface ModelMetrics {
api: {
totalRequests: number;
@@ -51,6 +68,7 @@ export interface ModelMetrics {
thoughts: number;
tool: number;
};
+ roles: Partial>;
}
export interface SessionMetrics {
@@ -74,6 +92,21 @@ export interface SessionMetrics {
};
}
+const createInitialRoleMetrics = (): RoleMetrics => ({
+ totalRequests: 0,
+ totalErrors: 0,
+ totalLatencyMs: 0,
+ tokens: {
+ input: 0,
+ prompt: 0,
+ candidates: 0,
+ total: 0,
+ cached: 0,
+ thoughts: 0,
+ tool: 0,
+ },
+});
+
const createInitialModelMetrics = (): ModelMetrics => ({
api: {
totalRequests: 0,
@@ -89,6 +122,7 @@ const createInitialModelMetrics = (): ModelMetrics => ({
thoughts: 0,
tool: 0,
},
+ roles: {},
});
const createInitialMetrics = (): SessionMetrics => ({
@@ -177,6 +211,25 @@ export class UiTelemetryService extends EventEmitter {
0,
modelMetrics.tokens.prompt - modelMetrics.tokens.cached,
);
+
+ if (event.role) {
+ if (!modelMetrics.roles[event.role]) {
+ modelMetrics.roles[event.role] = createInitialRoleMetrics();
+ }
+ const roleMetrics = modelMetrics.roles[event.role]!;
+ roleMetrics.totalRequests++;
+ roleMetrics.totalLatencyMs += event.duration_ms;
+ roleMetrics.tokens.prompt += event.usage.input_token_count;
+ roleMetrics.tokens.candidates += event.usage.output_token_count;
+ roleMetrics.tokens.total += event.usage.total_token_count;
+ roleMetrics.tokens.cached += event.usage.cached_content_token_count;
+ roleMetrics.tokens.thoughts += event.usage.thoughts_token_count;
+ roleMetrics.tokens.tool += event.usage.tool_token_count;
+ roleMetrics.tokens.input = Math.max(
+ 0,
+ roleMetrics.tokens.prompt - roleMetrics.tokens.cached,
+ );
+ }
}
private processApiError(event: ApiErrorEvent) {
@@ -184,6 +237,16 @@ export class UiTelemetryService extends EventEmitter {
modelMetrics.api.totalRequests++;
modelMetrics.api.totalErrors++;
modelMetrics.api.totalLatencyMs += event.duration_ms;
+
+ if (event.role) {
+ if (!modelMetrics.roles[event.role]) {
+ modelMetrics.roles[event.role] = createInitialRoleMetrics();
+ }
+ const roleMetrics = modelMetrics.roles[event.role]!;
+ roleMetrics.totalRequests++;
+ roleMetrics.totalErrors++;
+ roleMetrics.totalLatencyMs += event.duration_ms;
+ }
}
private processToolCall(event: ToolCallEvent) {
diff --git a/packages/core/src/tools/web-fetch.ts b/packages/core/src/tools/web-fetch.ts
index 396b99a6de..41d4b7a09d 100644
--- a/packages/core/src/tools/web-fetch.ts
+++ b/packages/core/src/tools/web-fetch.ts
@@ -27,6 +27,7 @@ import {
logWebFetchFallbackAttempt,
WebFetchFallbackAttemptEvent,
} from '../telemetry/index.js';
+import { LlmRole } from '../telemetry/llmRole.js';
import { WEB_FETCH_TOOL_NAME } from './tool-names.js';
import { debugLogger } from '../utils/debugLogger.js';
import { retryWithBackoff } from '../utils/retry.js';
@@ -189,6 +190,7 @@ ${textContent}
{ model: 'web-fetch-fallback' },
[{ role: 'user', parts: [{ text: fallbackPrompt }] }],
signal,
+ LlmRole.UTILITY_TOOL,
);
const resultText = getResponseText(result) || '';
return {
@@ -278,6 +280,7 @@ ${textContent}
{ model: 'web-fetch' },
[{ role: 'user', parts: [{ text: userPrompt }] }],
signal, // Pass signal
+ LlmRole.UTILITY_TOOL,
);
debugLogger.debug(
diff --git a/packages/core/src/tools/web-search.ts b/packages/core/src/tools/web-search.ts
index b4a064c768..a5ac9937b8 100644
--- a/packages/core/src/tools/web-search.ts
+++ b/packages/core/src/tools/web-search.ts
@@ -17,6 +17,7 @@ import { getResponseText } from '../utils/partUtils.js';
import { debugLogger } from '../utils/debugLogger.js';
import { WEB_SEARCH_DEFINITION } from './definitions/coreTools.js';
import { resolveToolDeclaration } from './definitions/resolver.js';
+import { LlmRole } from '../telemetry/llmRole.js';
interface GroundingChunkWeb {
uri?: string;
@@ -86,6 +87,7 @@ class WebSearchToolInvocation extends BaseToolInvocation<
{ model: 'web-search' },
[{ role: 'user', parts: [{ text: this.params.query }] }],
signal,
+ LlmRole.UTILITY_TOOL,
);
const responseText = getResponseText(response);
diff --git a/packages/core/src/utils/editCorrector.ts b/packages/core/src/utils/editCorrector.ts
index d61628ee4f..e15be8cfc4 100644
--- a/packages/core/src/utils/editCorrector.ts
+++ b/packages/core/src/utils/editCorrector.ts
@@ -23,6 +23,7 @@ import * as fs from 'node:fs';
import { promptIdContext } from './promptIdContext.js';
import { debugLogger } from './debugLogger.js';
import { LRUCache } from 'mnemonist';
+import { LlmRole } from '../telemetry/types.js';
const CODE_CORRECTION_SYSTEM_PROMPT = `
You are an expert code-editing assistant. Your task is to analyze a failed edit attempt and provide a corrected version of the text snippets.
@@ -439,6 +440,7 @@ Return ONLY the corrected target snippet in the specified JSON format with the k
abortSignal,
systemInstruction: CODE_CORRECTION_SYSTEM_PROMPT,
promptId: getPromptId(),
+ role: LlmRole.UTILITY_EDIT_CORRECTOR,
});
if (
@@ -528,6 +530,7 @@ Return ONLY the corrected string in the specified JSON format with the key 'corr
abortSignal,
systemInstruction: CODE_CORRECTION_SYSTEM_PROMPT,
promptId: getPromptId(),
+ role: LlmRole.UTILITY_EDIT_CORRECTOR,
});
if (
@@ -598,6 +601,7 @@ Return ONLY the corrected string in the specified JSON format with the key 'corr
abortSignal,
systemInstruction: CODE_CORRECTION_SYSTEM_PROMPT,
promptId: getPromptId(),
+ role: LlmRole.UTILITY_EDIT_CORRECTOR,
});
if (
@@ -665,6 +669,7 @@ Return ONLY the corrected string in the specified JSON format with the key 'corr
abortSignal,
systemInstruction: CODE_CORRECTION_SYSTEM_PROMPT,
promptId: getPromptId(),
+ role: LlmRole.UTILITY_EDIT_CORRECTOR,
});
if (
diff --git a/packages/core/src/utils/llm-edit-fixer.ts b/packages/core/src/utils/llm-edit-fixer.ts
index 05cd1b3e55..15bfb39e28 100644
--- a/packages/core/src/utils/llm-edit-fixer.ts
+++ b/packages/core/src/utils/llm-edit-fixer.ts
@@ -10,6 +10,7 @@ import { type BaseLlmClient } from '../core/baseLlmClient.js';
import { LRUCache } from 'mnemonist';
import { getPromptIdWithFallback } from './promptIdContext.js';
import { debugLogger } from './debugLogger.js';
+import { LlmRole } from '../telemetry/types.js';
const MAX_CACHE_SIZE = 50;
const GENERATE_JSON_TIMEOUT_MS = 40000; // 40 seconds
@@ -181,6 +182,7 @@ export async function FixLLMEditWithInstruction(
systemInstruction: EDIT_SYS_PROMPT,
promptId,
maxAttempts: 1,
+ role: LlmRole.UTILITY_EDIT_CORRECTOR,
},
GENERATE_JSON_TIMEOUT_MS,
);
diff --git a/packages/core/src/utils/nextSpeakerChecker.ts b/packages/core/src/utils/nextSpeakerChecker.ts
index 39d9c37f7a..a5ce286feb 100644
--- a/packages/core/src/utils/nextSpeakerChecker.ts
+++ b/packages/core/src/utils/nextSpeakerChecker.ts
@@ -9,6 +9,7 @@ import type { BaseLlmClient } from '../core/baseLlmClient.js';
import type { GeminiChat } from '../core/geminiChat.js';
import { isFunctionResponse } from './messageInspectors.js';
import { debugLogger } from './debugLogger.js';
+import { LlmRole } from '../telemetry/types.js';
const CHECK_PROMPT = `Analyze *only* the content and structure of your immediately preceding response (your last turn in the conversation history). Based *strictly* on that response, determine who should logically speak next: the 'user' or the 'model' (you).
**Decision Rules (apply in order):**
@@ -116,6 +117,7 @@ export async function checkNextSpeaker(
schema: RESPONSE_SCHEMA,
abortSignal,
promptId,
+ role: LlmRole.UTILITY_NEXT_SPEAKER,
})) as unknown as NextSpeakerResponse;
if (
diff --git a/packages/core/src/utils/summarizer.ts b/packages/core/src/utils/summarizer.ts
index b25961e149..99653d4c59 100644
--- a/packages/core/src/utils/summarizer.ts
+++ b/packages/core/src/utils/summarizer.ts
@@ -11,6 +11,7 @@ import { getResponseText, partToString } from './partUtils.js';
import { debugLogger } from './debugLogger.js';
import type { ModelConfigKey } from '../services/modelConfigService.js';
import type { Config } from '../config/config.js';
+import { LlmRole } from '../telemetry/llmRole.js';
/**
* A function that summarizes the result of a tool execution.
@@ -94,6 +95,7 @@ export async function summarizeToolOutput(
modelConfigKey,
contents,
abortSignal,
+ LlmRole.UTILITY_SUMMARIZER,
);
return getResponseText(parsedResponse) || textToSummarize;
} catch (error) {