Refactor atCommandProcessor (#18461)

This commit is contained in:
Tommaso Sciortino
2026-02-06 12:49:11 -08:00
committed by GitHub
parent 7a8d6f6095
commit e844d4f45f
2 changed files with 349 additions and 320 deletions

View File

@@ -179,9 +179,6 @@ describe('handleAtCommand', () => {
expect(result).toEqual({
processedQuery: [{ text: queryWithSpaces }],
});
expect(mockOnDebugMessage).toHaveBeenCalledWith(
'Lone @ detected, will be treated as text in the modified query.',
);
});
it('should process a valid text file path', async () => {
@@ -441,9 +438,6 @@ describe('handleAtCommand', () => {
expect(mockOnDebugMessage).toHaveBeenCalledWith(
`Glob search for '**/*${invalidFile}*' found no files or an error. Path ${invalidFile} will be skipped.`,
);
expect(mockOnDebugMessage).toHaveBeenCalledWith(
'Lone @ detected, will be treated as text in the modified query.',
);
});
it('should return original query if all @paths are invalid or lone @', async () => {

View File

@@ -7,11 +7,7 @@
import * as fs from 'node:fs/promises';
import * as path from 'node:path';
import type { PartListUnion, PartUnion } from '@google/genai';
import type {
AnyToolInvocation,
Config,
DiscoveredMCPResource,
} from '@google/gemini-cli-core';
import type { AnyToolInvocation, Config } from '@google/gemini-cli-core';
import {
debugLogger,
getErrorMessage,
@@ -122,111 +118,74 @@ function parseAllAtCommands(query: string): AtCommandPart[] {
);
}
/**
* Processes user input containing one or more '@<path>' commands.
* - Workspace paths are read via the 'read_many_files' tool.
* - MCP resource URIs are read via each server's `resources/read`.
* The user query is updated with inline content blocks so the LLM receives the
* referenced context directly.
*
* @returns An object indicating whether the main hook should proceed with an
* LLM call and the processed query parts (including file/resource content).
*/
export async function handleAtCommand({
query,
config,
addItem,
onDebugMessage,
messageId: userMessageTimestamp,
signal,
}: HandleAtCommandParams): Promise<HandleAtCommandResult> {
function categorizeAtCommands(
commandParts: AtCommandPart[],
config: Config,
): {
agentParts: AtCommandPart[];
resourceParts: AtCommandPart[];
fileParts: AtCommandPart[];
} {
const agentParts: AtCommandPart[] = [];
const resourceParts: AtCommandPart[] = [];
const fileParts: AtCommandPart[] = [];
const agentRegistry = config.getAgentRegistry?.();
const resourceRegistry = config.getResourceRegistry();
const mcpClientManager = config.getMcpClientManager();
const commandParts = parseAllAtCommands(query);
const atPathCommandParts = commandParts.filter(
(part) => part.type === 'atPath',
);
for (const part of commandParts) {
if (part.type !== 'atPath' || part.content === '@') {
continue;
}
if (atPathCommandParts.length === 0) {
return { processedQuery: [{ text: query }] };
const name = part.content.substring(1);
if (agentRegistry?.getDefinition(name)) {
agentParts.push(part);
} else if (resourceRegistry.findResourceByUri(name)) {
resourceParts.push(part);
} else {
fileParts.push(part);
}
}
// Get centralized file discovery service
return { agentParts, resourceParts, fileParts };
}
interface ResolvedFile {
part: AtCommandPart;
pathSpec: string;
displayLabel: string;
absolutePath?: string;
}
interface IgnoredFile {
path: string;
reason: 'git' | 'gemini' | 'both';
}
/**
* Resolves file paths from @ commands, handling globs, recursion, and ignores.
*/
async function resolveFilePaths(
fileParts: AtCommandPart[],
config: Config,
onDebugMessage: (message: string) => void,
signal: AbortSignal,
): Promise<{ resolvedFiles: ResolvedFile[]; ignoredFiles: IgnoredFile[] }> {
const fileDiscovery = config.getFileService();
const respectFileIgnore = config.getFileFilteringOptions();
const pathSpecsToRead: string[] = [];
const resourceAttachments: DiscoveredMCPResource[] = [];
const atPathToResolvedSpecMap = new Map<string, string>();
const agentsFound: string[] = [];
const fileLabelsForDisplay: string[] = [];
const absoluteToRelativePathMap = new Map<string, string>();
const ignoredByReason: Record<string, string[]> = {
git: [],
gemini: [],
both: [],
};
const toolRegistry = config.getToolRegistry();
const readManyFilesTool = new ReadManyFilesTool(
config,
config.getMessageBus(),
);
const globTool = toolRegistry.getTool('glob');
if (!readManyFilesTool) {
addItem(
{ type: 'error', text: 'Error: read_many_files tool not found.' },
userMessageTimestamp,
);
return {
processedQuery: null,
error: 'Error: read_many_files tool not found.',
};
}
for (const atPathPart of atPathCommandParts) {
const originalAtPath = atPathPart.content; // e.g., "@file.txt" or "@"
if (originalAtPath === '@') {
onDebugMessage(
'Lone @ detected, will be treated as text in the modified query.',
);
continue;
}
const resolvedFiles: ResolvedFile[] = [];
const ignoredFiles: IgnoredFile[] = [];
for (const part of fileParts) {
const originalAtPath = part.content;
const pathName = originalAtPath.substring(1);
if (!pathName) {
// This case should ideally not be hit if parseAllAtCommands ensures content after @
// but as a safeguard:
const errMsg = `Error: Invalid @ command '${originalAtPath}'. No path specified.`;
addItem(
{
type: 'error',
text: errMsg,
},
userMessageTimestamp,
);
// Decide if this is a fatal error for the whole command or just skip this @ part
// For now, let's be strict and fail the command if one @path is malformed.
return { processedQuery: null, error: errMsg };
}
// Check if this is an Agent reference
const agentRegistry = config.getAgentRegistry?.();
if (agentRegistry?.getDefinition(pathName)) {
agentsFound.push(pathName);
atPathToResolvedSpecMap.set(originalAtPath, pathName);
continue;
}
// Check if this is an MCP resource reference (serverName:uri format)
const resourceMatch = resourceRegistry.findResourceByUri(pathName);
if (resourceMatch) {
resourceAttachments.push(resourceMatch);
atPathToResolvedSpecMap.set(originalAtPath, pathName);
continue;
}
@@ -257,7 +216,7 @@ export async function handleAtCommand({
if (gitIgnored || geminiIgnored) {
const reason =
gitIgnored && geminiIgnored ? 'both' : gitIgnored ? 'git' : 'gemini';
ignoredByReason[reason].push(pathName);
ignoredFiles.push({ path: pathName, reason });
const reasonText =
reason === 'both'
? 'ignored by both git and gemini'
@@ -269,33 +228,39 @@ export async function handleAtCommand({
}
for (const dir of config.getWorkspaceContext().getDirectories()) {
let currentPathSpec = pathName;
let resolvedSuccessfully = false;
let relativePath = pathName;
try {
const absolutePath = path.isAbsolute(pathName)
? pathName
: path.resolve(dir, pathName);
const stats = await fs.stat(absolutePath);
// Convert absolute path to relative path
relativePath = path.isAbsolute(pathName)
const relativePath = path.isAbsolute(pathName)
? path.relative(dir, absolutePath)
: pathName;
if (stats.isDirectory()) {
currentPathSpec = path.join(relativePath, '**');
const pathSpec = path.join(relativePath, '**');
resolvedFiles.push({
part,
pathSpec,
displayLabel: path.isAbsolute(pathName) ? relativePath : pathName,
absolutePath,
});
onDebugMessage(
`Path ${pathName} resolved to directory, using glob: ${currentPathSpec}`,
`Path ${pathName} resolved to directory, using glob: ${pathSpec}`,
);
} else {
currentPathSpec = relativePath;
absoluteToRelativePathMap.set(absolutePath, relativePath);
resolvedFiles.push({
part,
pathSpec: relativePath,
displayLabel: path.isAbsolute(pathName) ? relativePath : pathName,
absolutePath,
});
onDebugMessage(
`Path ${pathName} resolved to file: ${absolutePath}, using relative path: ${relativePath}`,
);
}
resolvedSuccessfully = true;
break;
} catch (error) {
if (isNodeError(error) && error.code === 'ENOENT') {
if (config.getEnableRecursiveFileSearch() && globTool) {
@@ -319,15 +284,18 @@ export async function handleAtCommand({
const lines = globResult.llmContent.split('\n');
if (lines.length > 1 && lines[1]) {
const firstMatchAbsolute = lines[1].trim();
currentPathSpec = path.relative(dir, firstMatchAbsolute);
absoluteToRelativePathMap.set(
firstMatchAbsolute,
currentPathSpec,
);
const pathSpec = path.relative(dir, firstMatchAbsolute);
resolvedFiles.push({
part,
pathSpec,
displayLabel: path.isAbsolute(pathName)
? pathSpec
: pathName,
});
onDebugMessage(
`Glob search for ${pathName} found ${firstMatchAbsolute}, using relative path: ${currentPathSpec}`,
`Glob search for ${pathName} found ${firstMatchAbsolute}, using relative path: ${pathSpec}`,
);
resolvedSuccessfully = true;
break;
} else {
onDebugMessage(
`Glob search for '**/*${pathName}*' did not return a usable path. Path ${pathName} will be skipped.`,
@@ -360,112 +328,67 @@ export async function handleAtCommand({
);
}
}
if (resolvedSuccessfully) {
pathSpecsToRead.push(currentPathSpec);
atPathToResolvedSpecMap.set(originalAtPath, currentPathSpec);
const displayPath = path.isAbsolute(pathName) ? relativePath : pathName;
fileLabelsForDisplay.push(displayPath);
break;
}
}
}
// Construct the initial part of the query for the LLM
let initialQueryText = '';
return { resolvedFiles, ignoredFiles };
}
/**
* Rebuilds the user query, replacing @ commands with their resolved path specs or agent/resource names.
*/
function constructInitialQuery(
commandParts: AtCommandPart[],
resolvedFiles: ResolvedFile[],
): string {
const replacementMap = new Map<AtCommandPart, string>();
for (const rf of resolvedFiles) {
replacementMap.set(rf.part, rf.pathSpec);
}
let result = '';
for (let i = 0; i < commandParts.length; i++) {
const part = commandParts[i];
if (part.type === 'text') {
initialQueryText += part.content;
} else {
// type === 'atPath'
const resolvedSpec = atPathToResolvedSpecMap.get(part.content);
if (
i > 0 &&
initialQueryText.length > 0 &&
!initialQueryText.endsWith(' ')
) {
// Add space if previous part was text and didn't end with space, or if previous was @path
const prevPart = commandParts[i - 1];
if (
prevPart.type === 'text' ||
(prevPart.type === 'atPath' &&
atPathToResolvedSpecMap.has(prevPart.content))
) {
initialQueryText += ' ';
}
}
if (resolvedSpec) {
initialQueryText += `@${resolvedSpec}`;
} else {
// If not resolved for reading (e.g. lone @ or invalid path that was skipped),
// add the original @-string back, ensuring spacing if it's not the first element.
if (
i > 0 &&
initialQueryText.length > 0 &&
!initialQueryText.endsWith(' ') &&
!part.content.startsWith(' ')
) {
initialQueryText += ' ';
}
initialQueryText += part.content;
let content = part.content;
if (part.type === 'atPath') {
const resolved = replacementMap.get(part);
content = resolved ? `@${resolved}` : part.content;
if (i > 0 && result.length > 0 && !result.endsWith(' ')) {
result += ' ';
}
}
result += content;
}
initialQueryText = initialQueryText.trim();
return result.trim();
}
// Inform user about ignored paths
const totalIgnored =
ignoredByReason['git'].length +
ignoredByReason['gemini'].length +
ignoredByReason['both'].length;
/**
* Reads content from MCP resources.
*/
async function readMcpResources(
resourceParts: AtCommandPart[],
config: Config,
): Promise<{
parts: PartUnion[];
displays: IndividualToolCallDisplay[];
error?: string;
}> {
const resourceRegistry = config.getResourceRegistry();
const mcpClientManager = config.getMcpClientManager();
const parts: PartUnion[] = [];
const displays: IndividualToolCallDisplay[] = [];
if (totalIgnored > 0) {
const messages = [];
if (ignoredByReason['git'].length) {
messages.push(`Git-ignored: ${ignoredByReason['git'].join(', ')}`);
}
if (ignoredByReason['gemini'].length) {
messages.push(`Gemini-ignored: ${ignoredByReason['gemini'].join(', ')}`);
}
if (ignoredByReason['both'].length) {
messages.push(`Ignored by both: ${ignoredByReason['both'].join(', ')}`);
const resourcePromises = resourceParts.map(async (part) => {
const uri = part.content.substring(1);
const resource = resourceRegistry.findResourceByUri(uri);
if (!resource) {
// Should not happen as it was categorized as a resource
return { success: false, parts: [], uri };
}
const message = `Ignored ${totalIgnored} files:\n${messages.join('\n')}`;
debugLogger.log(message);
onDebugMessage(message);
}
// Fallback for lone "@" or completely invalid @-commands resulting in empty initialQueryText
if (
pathSpecsToRead.length === 0 &&
resourceAttachments.length === 0 &&
agentsFound.length === 0
) {
onDebugMessage('No valid file paths found in @ commands to read.');
if (initialQueryText === '@' && query.trim() === '@') {
// If the only thing was a lone @, pass original query (which might have spaces)
return { processedQuery: [{ text: query }] };
} else if (!initialQueryText && query) {
// If all @-commands were invalid and no surrounding text, pass original query
return { processedQuery: [{ text: query }] };
}
// Otherwise, proceed with the (potentially modified) query text that doesn't involve file reading
return { processedQuery: [{ text: initialQueryText || query }] };
}
const processedQueryParts: PartListUnion = [{ text: initialQueryText }];
if (agentsFound.length > 0) {
const toolsList = agentsFound.map((agent) => `'${agent}'`).join(', ');
const agentNudge = `\n<system_note>\nThe user has explicitly selected the following agent(s): ${agentsFound.join(
', ',
)}. Please use the following tool(s) to delegate the task: ${toolsList}.\n</system_note>\n`;
processedQueryParts.push({ text: agentNudge });
}
const resourcePromises = resourceAttachments.map(async (resource) => {
const uri = resource.uri;
const client = mcpClientManager?.getClient(resource.serverName);
try {
if (!client) {
@@ -473,18 +396,18 @@ export async function handleAtCommand({
`MCP client for server '${resource.serverName}' is not available or not connected.`,
);
}
const response = await client.readResource(uri);
const parts = convertResourceContentsToParts(response);
const response = await client.readResource(resource.uri);
const resourceParts = convertResourceContentsToParts(response);
return {
success: true,
parts,
uri,
parts: resourceParts,
uri: resource.uri,
display: {
callId: `mcp-resource-${resource.serverName}-${uri}`,
callId: `mcp-resource-${resource.serverName}-${resource.uri}`,
name: `resources/read (${resource.serverName})`,
description: uri,
description: resource.uri,
status: ToolCallStatus.Success,
resultDisplay: `Successfully read resource ${uri}`,
resultDisplay: `Successfully read resource ${resource.uri}`,
confirmationDetails: undefined,
} as IndividualToolCallDisplay,
};
@@ -492,13 +415,13 @@ export async function handleAtCommand({
return {
success: false,
parts: [],
uri,
uri: resource.uri,
display: {
callId: `mcp-resource-${resource.serverName}-${uri}`,
callId: `mcp-resource-${resource.serverName}-${resource.uri}`,
name: `resources/read (${resource.serverName})`,
description: uri,
description: resource.uri,
status: ToolCallStatus.Error,
resultDisplay: `Error reading resource ${uri}: ${getErrorMessage(error)}`,
resultDisplay: `Error reading resource ${resource.uri}: ${getErrorMessage(error)}`,
confirmationDetails: undefined,
} as IndividualToolCallDisplay,
};
@@ -506,77 +429,71 @@ export async function handleAtCommand({
});
const resourceResults = await Promise.all(resourcePromises);
const resourceReadDisplays: IndividualToolCallDisplay[] = [];
let resourceErrorOccurred = false;
let hasAddedReferenceHeader = false;
let hasError = false;
for (const result of resourceResults) {
resourceReadDisplays.push(result.display);
if (result.display) {
displays.push(result.display);
}
if (result.success) {
if (!hasAddedReferenceHeader) {
processedQueryParts.push({
text: REF_CONTENT_HEADER,
});
hasAddedReferenceHeader = true;
}
processedQueryParts.push({ text: `\nContent from @${result.uri}:\n` });
processedQueryParts.push(...result.parts);
parts.push({ text: `\nContent from @${result.uri}:\n` });
parts.push(...result.parts);
} else {
resourceErrorOccurred = true;
hasError = true;
}
}
if (resourceErrorOccurred) {
addItem(
{ type: 'tool_group', tools: resourceReadDisplays } as Omit<
HistoryItem,
'id'
>,
userMessageTimestamp,
);
// Find the first error to report
const firstError = resourceReadDisplays.find(
(d) => d.status === ToolCallStatus.Error,
)!;
const errorMessages = resourceReadDisplays
.filter((d) => d.status === ToolCallStatus.Error)
.map((d) => d.resultDisplay);
debugLogger.error(errorMessages);
const errorMsg = `Exiting due to an error processing the @ command: ${firstError.resultDisplay}`;
return { processedQuery: null, error: errorMsg };
if (hasError) {
const firstError = displays.find((d) => d.status === ToolCallStatus.Error);
return {
parts: [],
displays,
error: `Exiting due to an error processing the @ command: ${firstError?.resultDisplay}`,
};
}
if (pathSpecsToRead.length === 0) {
if (resourceReadDisplays.length > 0) {
addItem(
{ type: 'tool_group', tools: resourceReadDisplays } as Omit<
HistoryItem,
'id'
>,
userMessageTimestamp,
);
}
if (hasAddedReferenceHeader) {
processedQueryParts.push({ text: REF_CONTENT_FOOTER });
}
return { processedQuery: processedQueryParts };
return { parts, displays };
}
/**
* Reads content from local files using the ReadManyFilesTool.
*/
async function readLocalFiles(
resolvedFiles: ResolvedFile[],
config: Config,
signal: AbortSignal,
userMessageTimestamp: number,
): Promise<{
parts: PartUnion[];
display?: IndividualToolCallDisplay;
error?: string;
}> {
if (resolvedFiles.length === 0) {
return { parts: [] };
}
const readManyFilesTool = new ReadManyFilesTool(
config,
config.getMessageBus(),
);
const pathSpecsToRead = resolvedFiles.map((rf) => rf.pathSpec);
const fileLabelsForDisplay = resolvedFiles.map((rf) => rf.displayLabel);
const respectFileIgnore = config.getFileFilteringOptions();
const toolArgs = {
include: pathSpecsToRead,
file_filtering_options: {
respect_git_ignore: respectFileIgnore.respectGitIgnore,
respect_gemini_ignore: respectFileIgnore.respectGeminiIgnore,
},
// Use configuration setting
};
let readManyFilesDisplay: IndividualToolCallDisplay | undefined;
let invocation: AnyToolInvocation | undefined = undefined;
try {
invocation = readManyFilesTool.build(toolArgs);
const result = await invocation.execute(signal);
readManyFilesDisplay = {
const display: IndividualToolCallDisplay = {
callId: `client-read-${userMessageTimestamp}`,
name: readManyFilesTool.displayName,
description: invocation.getDescription(),
@@ -587,14 +504,9 @@ export async function handleAtCommand({
confirmationDetails: undefined,
};
const parts: PartUnion[] = [];
if (Array.isArray(result.llmContent)) {
const fileContentRegex = /^--- (.*?) ---\n\n([\s\S]*?)\n\n$/;
if (!hasAddedReferenceHeader) {
processedQueryParts.push({
text: REF_CONTENT_HEADER,
});
hasAddedReferenceHeader = true;
}
for (const part of result.llmContent) {
if (typeof part === 'string') {
const match = fileContentRegex.exec(part);
@@ -602,12 +514,17 @@ export async function handleAtCommand({
const filePathSpecInContent = match[1];
const fileActualContent = match[2].trim();
let displayPath = absoluteToRelativePathMap.get(
filePathSpecInContent,
// Find the display label for this path
const resolvedFile = resolvedFiles.find(
(rf) =>
rf.absolutePath === filePathSpecInContent ||
rf.pathSpec === filePathSpecInContent,
);
// Fallback: if no mapping found, try to convert absolute path to relative
let displayPath = resolvedFile?.displayLabel;
if (!displayPath) {
// Fallback: if no mapping found, try to convert absolute path to relative
for (const dir of config.getWorkspaceContext().getDirectories()) {
if (filePathSpecInContent.startsWith(dir)) {
displayPath = path.relative(dir, filePathSpecInContent);
@@ -618,39 +535,22 @@ export async function handleAtCommand({
displayPath = displayPath || filePathSpecInContent;
processedQueryParts.push({
parts.push({
text: `\nContent from @${displayPath}:\n`,
});
processedQueryParts.push({ text: fileActualContent });
parts.push({ text: fileActualContent });
} else {
processedQueryParts.push({ text: part });
parts.push({ text: part });
}
} else {
// part is a Part object.
processedQueryParts.push(part);
parts.push(part);
}
}
} else {
onDebugMessage(
'read_many_files tool returned no content or empty content.',
);
}
if (resourceReadDisplays.length > 0 || readManyFilesDisplay) {
addItem(
{
type: 'tool_group',
tools: [
...resourceReadDisplays,
...(readManyFilesDisplay ? [readManyFilesDisplay] : []),
],
} as Omit<HistoryItem, 'id'>,
userMessageTimestamp,
);
}
return { processedQuery: processedQueryParts };
return { parts, display };
} catch (error: unknown) {
readManyFilesDisplay = {
const errorDisplay: IndividualToolCallDisplay = {
callId: `client-read-${userMessageTimestamp}`,
name: readManyFilesTool.displayName,
description:
@@ -660,18 +560,153 @@ export async function handleAtCommand({
resultDisplay: `Error reading files (${fileLabelsForDisplay.join(', ')}): ${getErrorMessage(error)}`,
confirmationDetails: undefined,
};
return {
parts: [],
display: errorDisplay,
error: `Exiting due to an error processing the @ command: ${errorDisplay.resultDisplay}`,
};
}
}
/**
* Reports ignored files to the debug log and debug message callback.
*/
function reportIgnoredFiles(
ignoredFiles: IgnoredFile[],
onDebugMessage: (message: string) => void,
): void {
const totalIgnored = ignoredFiles.length;
if (totalIgnored === 0) {
return;
}
const ignoredByReason: Record<string, string[]> = {
git: [],
gemini: [],
both: [],
};
for (const file of ignoredFiles) {
ignoredByReason[file.reason].push(file.path);
}
const messages = [];
if (ignoredByReason['git'].length) {
messages.push(`Git-ignored: ${ignoredByReason['git'].join(', ')}`);
}
if (ignoredByReason['gemini'].length) {
messages.push(`Gemini-ignored: ${ignoredByReason['gemini'].join(', ')}`);
}
if (ignoredByReason['both'].length) {
messages.push(`Ignored by both: ${ignoredByReason['both'].join(', ')}`);
}
const message = `Ignored ${totalIgnored} files:\n${messages.join('\n')}`;
debugLogger.log(message);
onDebugMessage(message);
}
/**
* Processes user input containing one or more '@<path>' commands.
* - Workspace paths are read via the 'read_many_files' tool.
* - MCP resource URIs are read via each server's `resources/read`.
* The user query is updated with inline content blocks so the LLM receives the
* referenced context directly.
*
* @returns An object indicating whether the main hook should proceed with an
* LLM call and the processed query parts (including file/resource content).
*/
export async function handleAtCommand({
query,
config,
addItem,
onDebugMessage,
messageId: userMessageTimestamp,
signal,
}: HandleAtCommandParams): Promise<HandleAtCommandResult> {
const commandParts = parseAllAtCommands(query);
const { agentParts, resourceParts, fileParts } = categorizeAtCommands(
commandParts,
config,
);
const { resolvedFiles, ignoredFiles } = await resolveFilePaths(
fileParts,
config,
onDebugMessage,
signal,
);
reportIgnoredFiles(ignoredFiles, onDebugMessage);
if (
resolvedFiles.length === 0 &&
resourceParts.length === 0 &&
agentParts.length === 0
) {
onDebugMessage(
'No valid file paths, resources, or agents found in @ commands.',
);
return { processedQuery: [{ text: query }] };
}
const initialQueryText = constructInitialQuery(commandParts, resolvedFiles);
const processedQueryParts: PartListUnion = [{ text: initialQueryText }];
if (agentParts.length > 0) {
const agentNames = agentParts.map((p) => p.content.substring(1));
const toolsList = agentNames.map((agent) => `'${agent}'`).join(', ');
const agentNudge = `\n<system_note>\nThe user has explicitly selected the following agent(s): ${agentNames.join(
', ',
)}. Please use the following tool(s) to delegate the task: ${toolsList}.\n</system_note>\n`;
processedQueryParts.push({ text: agentNudge });
}
const [mcpResult, fileResult] = await Promise.all([
readMcpResources(resourceParts, config),
readLocalFiles(resolvedFiles, config, signal, userMessageTimestamp),
]);
const hasContent = mcpResult.parts.length > 0 || fileResult.parts.length > 0;
if (hasContent) {
processedQueryParts.push({ text: REF_CONTENT_HEADER });
processedQueryParts.push(...mcpResult.parts);
processedQueryParts.push(...fileResult.parts);
// Only add footer if we didn't read local files (because ReadManyFilesTool adds it)
// AND we read MCP resources (so we need to close the block).
if (fileResult.parts.length === 0 && mcpResult.parts.length > 0) {
processedQueryParts.push({ text: REF_CONTENT_FOOTER });
}
}
const allDisplays = [
...mcpResult.displays,
...(fileResult.display ? [fileResult.display] : []),
];
if (allDisplays.length > 0) {
addItem(
{
type: 'tool_group',
tools: [...resourceReadDisplays, readManyFilesDisplay],
tools: allDisplays,
} as Omit<HistoryItem, 'id'>,
userMessageTimestamp,
);
return {
processedQuery: null,
error: `Exiting due to an error processing the @ command: ${readManyFilesDisplay.resultDisplay}`,
};
}
if (mcpResult.error) {
debugLogger.error(mcpResult.error);
return { processedQuery: null, error: mcpResult.error };
}
if (fileResult.error) {
debugLogger.error(fileResult.error);
return { processedQuery: null, error: fileResult.error };
}
return { processedQuery: processedQueryParts };
}
function convertResourceContentsToParts(response: {
@@ -686,20 +721,20 @@ function convertResourceContentsToParts(response: {
};
}>;
}): PartUnion[] {
const parts: PartUnion[] = [];
for (const content of response.contents ?? []) {
return (response.contents ?? []).flatMap((content) => {
const candidate = content.resource ?? content;
if (candidate.text) {
parts.push({ text: candidate.text });
continue;
return [{ text: candidate.text }];
}
if (candidate.blob) {
const sizeBytes = Buffer.from(candidate.blob, 'base64').length;
const mimeType = candidate.mimeType ?? 'application/octet-stream';
parts.push({
text: `[Binary resource content ${mimeType}, ${sizeBytes} bytes]`,
});
return [
{
text: `[Binary resource content ${mimeType}, ${sizeBytes} bytes]`,
},
];
}
}
return parts;
return [];
});
}