Starting to modularize into separate cli / server packages. (#55)

* Starting to move a lot of code into packages/server

* More of the massive refactor, builds and runs, some issues though.

* Fixing outstanding issue with double messages.

* Fixing a minor UI issue.

* Fixing the build post-merge.

* Running formatting.

* Addressing comments.
This commit is contained in:
Evan Senter
2025-04-19 19:45:42 +01:00
committed by GitHub
parent 0c9e1ef61b
commit 3fce6cea27
46 changed files with 3946 additions and 3403 deletions
+31 -73
View File
@@ -6,61 +6,20 @@
import yargs from 'yargs/yargs';
import { hideBin } from 'yargs/helpers';
import * as dotenv from 'dotenv';
import * as fs from 'node:fs';
import * as path from 'node:path';
import process from 'node:process';
// Import server config logic
import {
Config,
loadEnvironment,
createServerConfig,
} from '@gemini-code/server';
const DEFAULT_GEMINI_MODEL = 'gemini-2.5-flash-preview-04-17';
export class Config {
private apiKey: string;
private model: string;
private targetDir: string;
constructor(apiKey: string, model: string, targetDir: string) {
this.apiKey = apiKey;
this.model = model;
this.targetDir = targetDir;
}
getApiKey(): string {
return this.apiKey;
}
getModel(): string {
return this.model;
}
getTargetDir(): string {
return this.targetDir;
}
}
export function loadConfig(): Config {
loadEnvironment();
if (!process.env.GEMINI_API_KEY) {
console.log(
'GEMINI_API_KEY is not set. See https://ai.google.dev/gemini-api/docs/api-key to obtain one. ' +
'Please set it in your .env file or as an environment variable.',
);
process.exit(1);
}
const argv = parseArguments();
return new Config(
process.env.GEMINI_API_KEY,
argv.model || DEFAULT_GEMINI_MODEL,
argv.target_dir || process.cwd(),
);
}
export const globalConfig = loadConfig(); // TODO(jbd): Remove global state.
// Keep CLI-specific argument parsing
interface CliArgs {
target_dir: string | undefined;
model: string | undefined;
// Add other expected args here if needed
// e.g., verbose?: boolean;
}
function parseArguments(): CliArgs {
@@ -79,35 +38,34 @@ function parseArguments(): CliArgs {
})
.help()
.alias('h', 'help')
.strict().argv; // Keep strict mode to error on unknown options
// Cast to the interface to ensure the structure aligns with expectations
// Use `unknown` first for safer casting if types might not perfectly match
.strict().argv;
return argv as unknown as CliArgs;
}
function findEnvFile(startDir: string): string | null {
// Start search from the provided directory (e.g., current working directory)
let currentDir = path.resolve(startDir); // Ensure absolute path
while (true) {
const envPath = path.join(currentDir, '.env');
if (fs.existsSync(envPath)) {
return envPath;
}
// Renamed function for clarity
export function loadCliConfig(): Config {
// Load .env file using logic from server package
loadEnvironment();
const parentDir = path.dirname(currentDir);
if (parentDir === currentDir || !parentDir) {
return null;
}
currentDir = parentDir;
// Check API key (CLI responsibility)
if (!process.env.GEMINI_API_KEY) {
console.log(
'GEMINI_API_KEY is not set. See https://ai.google.dev/gemini-api/docs/api-key to obtain one. ' +
'Please set it in your .env file or as an environment variable.',
);
process.exit(1);
}
// Parse CLI arguments
const argv = parseArguments();
// Create config using factory from server package
return createServerConfig(
process.env.GEMINI_API_KEY,
argv.model || DEFAULT_GEMINI_MODEL,
argv.target_dir || process.cwd(),
);
}
function loadEnvironment(): void {
// Start searching from the current working directory by default
const envFilePath = findEnvFile(process.cwd());
if (!envFilePath) {
return;
}
dotenv.config({ path: envFilePath });
}
// The globalConfig export is problematic, CLI entry point (gemini.ts) should call loadCliConfig
// export const globalConfig = loadCliConfig(); // Remove or replace global export
-165
View File
@@ -1,165 +0,0 @@
import { describe, it, expect, vi, beforeEach, Mock } from 'vitest';
import { GoogleGenAI, Type, Content } from '@google/genai';
import { GeminiClient } from './gemini-client.js';
import { Config } from '../config/config.js';
// Mock the entire @google/genai module
vi.mock('@google/genai');
// Mock the Config class and its methods
vi.mock('../config/config.js', () => {
// The mock constructor should accept the arguments but not explicitly return an object.
// vi.fn() will create a mock instance that inherits from the prototype.
const MockConfig = vi.fn();
// Methods are mocked on the prototype, so instances will inherit them.
MockConfig.prototype.getApiKey = vi.fn(() => 'mock-api-key');
MockConfig.prototype.getModel = vi.fn(() => 'mock-model');
MockConfig.prototype.getTargetDir = vi.fn(() => 'mock-target-dir');
return { Config: MockConfig };
});
// Define a type for the mocked GoogleGenAI instance structure
type MockGoogleGenAIType = {
models: {
generateContent: Mock;
};
chats: {
create: Mock;
};
};
describe('GeminiClient', () => {
// Use the specific types defined above
let mockGenerateContent: MockGoogleGenAIType['models']['generateContent'];
let mockGoogleGenAIInstance: MockGoogleGenAIType;
let config: Config;
let client: GeminiClient;
beforeEach(() => {
vi.clearAllMocks();
// Mock the generateContent method specifically
mockGenerateContent = vi.fn();
// Mock the chainable structure ai.models.generateContent
mockGoogleGenAIInstance = {
models: {
generateContent: mockGenerateContent,
},
chats: {
create: vi.fn(), // Mock create as well
},
};
// Configure the mocked GoogleGenAI constructor to return our mock instance
(GoogleGenAI as Mock).mockImplementation(() => mockGoogleGenAIInstance);
config = new Config('mock-api-key-arg', 'mock-model-arg', 'mock-dir-arg');
client = new GeminiClient(config);
});
describe('generateJson', () => {
it('should call ai.models.generateContent with correct parameters', async () => {
const mockContents: Content[] = [
{ role: 'user', parts: [{ text: 'test prompt' }] },
];
const mockSchema = {
type: Type.OBJECT,
properties: { key: { type: Type.STRING } },
};
const mockApiResponse = { text: JSON.stringify({ key: 'value' }) };
mockGenerateContent.mockResolvedValue(mockApiResponse);
await client.generateJson(mockContents, mockSchema);
expect(mockGenerateContent).toHaveBeenCalledTimes(1);
// Use expect.objectContaining for the config assertion
const expectedConfigMatcher = expect.objectContaining({
temperature: 0,
topP: 1,
systemInstruction: expect.any(String),
responseSchema: mockSchema,
responseMimeType: 'application/json',
});
expect(mockGenerateContent).toHaveBeenCalledWith({
model: 'mock-model',
config: expectedConfigMatcher,
contents: mockContents,
});
});
it('should return the parsed JSON response', async () => {
const mockContents: Content[] = [
{ role: 'user', parts: [{ text: 'test prompt' }] },
];
const mockSchema = {
type: Type.OBJECT,
properties: { key: { type: Type.STRING } },
};
const expectedJson = { key: 'value' };
const mockApiResponse = { text: JSON.stringify(expectedJson) };
mockGenerateContent.mockResolvedValue(mockApiResponse);
const result = await client.generateJson(mockContents, mockSchema);
expect(result).toEqual(expectedJson);
});
it('should throw an error if API returns empty response', async () => {
const mockContents: Content[] = [
{ role: 'user', parts: [{ text: 'test prompt' }] },
];
const mockSchema = {
type: Type.OBJECT,
properties: { key: { type: Type.STRING } },
};
const mockApiResponse = { text: '' }; // Empty response
mockGenerateContent.mockResolvedValue(mockApiResponse);
await expect(
client.generateJson(mockContents, mockSchema),
).rejects.toThrow(
'Failed to generate JSON content: API returned an empty response.',
);
});
it('should throw an error if API response is not valid JSON', async () => {
const mockContents: Content[] = [
{ role: 'user', parts: [{ text: 'test prompt' }] },
];
const mockSchema = {
type: Type.OBJECT,
properties: { key: { type: Type.STRING } },
};
const mockApiResponse = { text: 'invalid json' }; // Invalid JSON
mockGenerateContent.mockResolvedValue(mockApiResponse);
await expect(
client.generateJson(mockContents, mockSchema),
).rejects.toThrow('Failed to parse API response as JSON:');
});
it('should throw an error if generateContent rejects', async () => {
const mockContents: Content[] = [
{ role: 'user', parts: [{ text: 'test prompt' }] },
];
const mockSchema = {
type: Type.OBJECT,
properties: { key: { type: Type.STRING } },
};
const apiError = new Error('API call failed');
mockGenerateContent.mockRejectedValue(apiError);
await expect(
client.generateJson(mockContents, mockSchema),
).rejects.toThrow(`Failed to generate JSON content: ${apiError.message}`);
});
});
// TODO: Add tests for startChat and sendMessageStream later
});
-282
View File
@@ -1,282 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import {
GenerateContentConfig,
GoogleGenAI,
Part,
Chat,
Type,
SchemaUnion,
PartListUnion,
Content,
} from '@google/genai';
import { CoreSystemPrompt } from './prompts.js';
import process from 'node:process';
import { toolRegistry } from '../tools/tool-registry.js';
import { getFolderStructure } from '../utils/getFolderStructure.js';
import { GeminiEventType, GeminiStream } from './gemini-stream.js';
import { Config } from '../config/config.js';
import { Turn } from './turn.js';
export class GeminiClient {
private config: Config;
private ai: GoogleGenAI;
private generateContentConfig: GenerateContentConfig = {
temperature: 0,
topP: 1,
};
private readonly MAX_TURNS = 100;
constructor(config: Config) {
this.config = config;
this.ai = new GoogleGenAI({ apiKey: config.getApiKey() });
}
private async getEnvironment(): Promise<Part> {
const cwd = process.cwd();
const today = new Date().toLocaleDateString(undefined, {
weekday: 'long',
year: 'numeric',
month: 'long',
day: 'numeric',
});
const platform = process.platform;
const folderStructure = await getFolderStructure(cwd);
const context = `
Okay, just setting up the context for our chat.
Today is ${today}.
My operating system is: ${platform}
I'm currently working in the directory: ${cwd}
${folderStructure}
`.trim();
return { text: context };
}
async startChat(): Promise<Chat> {
const envPart = await this.getEnvironment();
const model = this.config.getModel();
const tools = toolRegistry.getToolSchemas();
try {
const chat = this.ai.chats.create({
model,
config: {
systemInstruction: CoreSystemPrompt,
...this.generateContentConfig,
tools,
},
history: [
// --- Add the context as a single part in the initial user message ---
{
role: 'user',
parts: [envPart], // Pass the single Part object in an array
},
// --- Add an empty model response to balance the history ---
{
role: 'model',
parts: [{ text: 'Got it. Thanks for the context!' }], // A slightly more conversational model response
},
// --- End history modification ---
],
});
return chat;
} catch (error) {
console.error('Error initializing Gemini chat session:', error);
const message = error instanceof Error ? error.message : 'Unknown error.';
throw new Error(`Failed to initialize chat: ${message}`);
}
}
async *sendMessageStream(
chat: Chat,
request: PartListUnion,
signal?: AbortSignal,
): GeminiStream {
let turns = 0;
try {
while (turns < this.MAX_TURNS) {
turns++;
// A turn either yields a text response or returns
// function responses to be used in the next turn.
// This callsite is responsible to handle the buffered
// function responses and use it on the next turn.
const turn = new Turn(chat);
const resultStream = turn.run(request, signal);
for await (const event of resultStream) {
yield event;
}
const fnResponses = turn.getFunctionResponses();
if (fnResponses.length > 0) {
request = fnResponses;
continue; // use the responses in the next turn
}
const history = chat.getHistory();
const checkPrompt = `Analyze *only* the content and structure of your immediately preceding response (your last turn in the conversation history). Based *strictly* on that response, determine who should logically speak next: the 'user' or the 'model' (you).
**Decision Rules (apply in order):**
1. **Model Continues:** If your last response explicitly states an immediate next action *you* intend to take (e.g., "Next, I will...", "Now I'll process...", "Moving on to analyze...", indicates an intended tool call that didn't execute), OR if the response seems clearly incomplete (cut off mid-thought without a natural conclusion), then the **'model'** should speak next.
2. **Question to User:** If your last response ends with a direct question specifically addressed *to the user*, then the **'user'** should speak next.
3. **Waiting for User:** If your last response completed a thought, statement, or task *and* does not meet the criteria for Rule 1 (Model Continues) or Rule 2 (Question to User), it implies a pause expecting user input or reaction. In this case, the **'user'** should speak next.
**Output Format:**
Respond *only* in JSON format according to the following schema. Do not include any text outside the JSON structure.
\`\`\`json
{
"type": "object",
"properties": {
"reasoning": {
"type": "string",
"description": "Brief explanation justifying the 'next_speaker' choice based *strictly* on the applicable rule and the content/structure of the preceding turn."
},
"next_speaker": {
"type": "string",
"enum": ["user", "model"],
"description": "Who should speak next based *only* on the preceding turn and the decision rules."
}
},
"required": ["next_speaker", "reasoning"]
\`\`\`
}`;
// Schema Idea
const responseSchema: SchemaUnion = {
type: Type.OBJECT,
properties: {
reasoning: {
type: Type.STRING,
description:
"Brief explanation justifying the 'next_speaker' choice based *strictly* on the applicable rule and the content/structure of the preceding turn.",
},
next_speaker: {
type: Type.STRING,
enum: ['user', 'model'], // Enforce the choices
description:
'Who should speak next based *only* on the preceding turn and the decision rules',
},
},
required: ['reasoning', 'next_speaker'],
};
try {
// Use the new generateJson method, passing the history and the check prompt
const parsedResponse = await this.generateJson(
[
...history,
{
role: 'user',
parts: [{ text: checkPrompt }],
},
],
responseSchema,
);
// Safely extract the next speaker value
const nextSpeaker: string | undefined =
typeof parsedResponse?.next_speaker === 'string'
? parsedResponse.next_speaker
: undefined;
if (nextSpeaker === 'model') {
request = { text: 'alright' }; // Or potentially a more meaningful continuation prompt
} else {
// 'user' should speak next, or value is missing/invalid. End the turn.
break;
}
} catch (error) {
console.error(
`[Turn ${turns}] Failed to get or parse next speaker check:`,
error,
);
// If the check fails, assume user should speak next to avoid infinite loops
break;
}
}
if (turns >= this.MAX_TURNS) {
console.warn(
'sendMessageStream: Reached maximum tool call turns limit.',
);
yield {
type: GeminiEventType.Content,
value:
'\n\n[System Notice: Maximum interaction turns reached. The conversation may be incomplete.]',
};
}
} catch (error: unknown) {
// TODO(jbd): There is so much of packing/unpacking of error types.
// Figure out a way to remove the redundant work.
if (error instanceof Error && error.name === 'AbortError') {
console.log('Gemini stream request aborted by user.');
throw error;
} else {
console.error(`Error during Gemini stream or tool interaction:`, error);
const message = error instanceof Error ? error.message : String(error);
yield {
type: GeminiEventType.Content,
value: `\n\n[Error: An unexpected error occurred during the chat: ${message}]`,
};
throw error;
}
}
}
/**
* Generates structured JSON content based on conversational history and a schema.
* @param contents The conversational history (Content array) to provide context.
* @param schema The SchemaUnion defining the desired JSON structure.
* @returns A promise that resolves to the parsed JSON object matching the schema.
* @throws Throws an error if the API call fails or the response is not valid JSON.
*/
async generateJson(
contents: Content[],
schema: SchemaUnion,
): Promise<Record<string, unknown>> {
const model = this.config.getModel();
try {
const result = await this.ai.models.generateContent({
model,
config: {
...this.generateContentConfig,
systemInstruction: CoreSystemPrompt,
responseSchema: schema,
responseMimeType: 'application/json',
},
contents, // Pass the full Content array
});
const responseText = result.text;
if (!responseText) {
throw new Error('API returned an empty response.');
}
try {
const parsedJson = JSON.parse(responseText);
// TODO: Add schema validation if needed
return parsedJson;
} catch (parseError) {
console.error('Failed to parse JSON response:', responseText);
throw new Error(
`Failed to parse API response as JSON: ${parseError instanceof Error ? parseError.message : String(parseError)}`,
);
}
} catch (error) {
console.error('Error generating JSON content:', error);
const message =
error instanceof Error ? error.message : 'Unknown API error.';
throw new Error(`Failed to generate JSON content: ${message}`);
}
}
}
+9 -174
View File
@@ -4,181 +4,16 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { ToolCallEvent, HistoryItem } from '../ui/types.js';
import { Part } from '@google/genai';
import {
handleToolCallChunk,
addErrorMessageToHistory,
} from './history-updater.js';
export enum GeminiEventType {
Content,
ToolCallInfo,
}
export interface GeminiContentEvent {
type: GeminiEventType.Content;
value: string;
}
export interface GeminiToolCallInfoEvent {
type: GeminiEventType.ToolCallInfo;
value: ToolCallEvent;
}
export type GeminiEvent = GeminiContentEvent | GeminiToolCallInfoEvent;
export type GeminiStream = AsyncIterable<GeminiEvent>;
// Only defining the state enum needed by the UI
export enum StreamingState {
Idle,
Responding,
Idle = 'idle',
Responding = 'responding',
WaitingForConfirmation = 'waiting_for_confirmation',
}
interface StreamProcessorParams {
stream: GeminiStream;
signal: AbortSignal;
setHistory: React.Dispatch<React.SetStateAction<HistoryItem[]>>;
submitQuery: (query: Part) => Promise<void>;
getNextMessageId: () => number;
addHistoryItem: (itemData: Omit<HistoryItem, 'id'>, id: number) => void;
currentToolGroupIdRef: React.MutableRefObject<number | null>;
// Copied from server/src/core/turn.ts for CLI usage
export enum GeminiEventType {
Content = 'content',
ToolCallRequest = 'tool_call_request',
// Add other event types if the UI hook needs to handle them
}
/**
* Processes the Gemini stream, managing text buffering, adaptive rendering,
* and delegating history updates for tool calls and errors.
*/
export const processGeminiStream = async ({
// Renamed function for clarity
stream,
signal,
setHistory,
submitQuery,
getNextMessageId,
addHistoryItem,
currentToolGroupIdRef,
}: StreamProcessorParams): Promise<void> => {
// --- State specific to this stream processing invocation ---
let textBuffer = '';
let renderTimeoutId: NodeJS.Timeout | null = null;
let isStreamComplete = false;
let currentGeminiMessageId: number | null = null;
const render = (content: string) => {
if (currentGeminiMessageId === null) {
return;
}
setHistory((prev) =>
prev.map((item) =>
item.id === currentGeminiMessageId && item.type === 'gemini'
? { ...item, text: (item.text ?? '') + content }
: item,
),
);
};
// --- Adaptive Rendering Logic (nested) ---
const renderBufferedText = () => {
if (signal.aborted) {
if (renderTimeoutId) clearTimeout(renderTimeoutId);
renderTimeoutId = null;
return;
}
const bufferLength = textBuffer.length;
let chunkSize = 0;
let delay = 50;
if (bufferLength > 150) {
chunkSize = Math.min(bufferLength, 30);
delay = 5;
} else if (bufferLength > 30) {
chunkSize = Math.min(bufferLength, 10);
delay = 10;
} else if (bufferLength > 0) {
chunkSize = 2;
delay = 20;
}
if (chunkSize > 0) {
const chunkToRender = textBuffer.substring(0, chunkSize);
textBuffer = textBuffer.substring(chunkSize);
render(chunkToRender);
renderTimeoutId = setTimeout(renderBufferedText, delay);
} else {
renderTimeoutId = null; // Clear timeout ID if nothing to render
if (!isStreamComplete) {
// Buffer empty, but stream might still send data, check again later
renderTimeoutId = setTimeout(renderBufferedText, 50);
}
}
};
const scheduleRender = () => {
if (renderTimeoutId === null) {
renderTimeoutId = setTimeout(renderBufferedText, 0);
}
};
// --- Stream Processing Loop ---
try {
for await (const chunk of stream) {
if (signal.aborted) break;
if (chunk.type === GeminiEventType.Content) {
currentToolGroupIdRef.current = null; // Reset tool group on text
if (currentGeminiMessageId === null) {
currentGeminiMessageId = getNextMessageId();
addHistoryItem({ type: 'gemini', text: '' }, currentGeminiMessageId);
textBuffer = '';
}
textBuffer += chunk.value;
scheduleRender();
} else if (chunk.type === GeminiEventType.ToolCallInfo) {
if (renderTimeoutId) {
// Stop rendering loop
clearTimeout(renderTimeoutId);
renderTimeoutId = null;
}
// Flush any text buffer content.
render(textBuffer);
currentGeminiMessageId = null; // End text message context
textBuffer = ''; // Clear buffer
// Delegate history update for tool call
handleToolCallChunk(
chunk.value,
setHistory,
submitQuery,
getNextMessageId,
currentToolGroupIdRef,
);
}
}
if (signal.aborted) {
throw new Error('Request cancelled by user');
}
} catch (error: unknown) {
if (renderTimeoutId) {
// Ensure render loop stops on error
clearTimeout(renderTimeoutId);
renderTimeoutId = null;
}
// Delegate history update for error message
addErrorMessageToHistory(
error as Error | DOMException,
setHistory,
getNextMessageId,
);
} finally {
isStreamComplete = true; // Signal stream end for render loop completion
if (renderTimeoutId) {
clearTimeout(renderTimeoutId);
renderTimeoutId = null;
}
renderBufferedText(); // Force final render
}
};
+2 -2
View File
@@ -54,9 +54,9 @@ export const handleToolCallChunk = (
handleToolCallChunk(
{
...chunk,
status: ToolCallStatus.Canceled,
status: ToolCallStatus.Error,
confirmationDetails: undefined,
resultDisplay,
resultDisplay: resultDisplay ?? 'Canceled by user.',
},
setHistory,
submitQuery,
-100
View File
@@ -1,100 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { ReadFileTool } from '../tools/read-file.tool.js';
import { TerminalTool } from '../tools/terminal.tool.js';
const MEMORY_FILE_NAME = 'GEMINI.md';
const contactEmail = 'ntaylormullen@google.com';
export const CoreSystemPrompt = `
You are an interactive CLI tool assistant specializing in software engineering tasks. Your primary goal is to help users safely and efficiently, adhering strictly to the following instructions and utilizing your available tools.
# Core Directives & Safety Rules
1. **Explain Critical Commands:** Before executing any command (especially using \`${TerminalTool.Name}\`) that modifies the file system, codebase, or system state, you *must* provide a brief explanation of the command's purpose and potential impact. Prioritize user understanding and safety.
2. **NEVER Commit Changes:** Unless explicitly instructed by the user to do so, you MUST NOT commit changes to version control (e.g., git commit). This is critical for user control over their repository.
3. **Security First:** Always apply security best practices. Never introduce code that exposes, logs, or commits secrets, API keys, or other sensitive information.
# Primary Workflow: Software Engineering Tasks
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this sequence:
1. **Understand:** Analyze the user's request and the relevant codebase context. Check for project-specific information in \`${MEMORY_FILE_NAME}\` if it exists. Use search tools extensively (in parallel if independent) to understand file structures, existing code patterns, and conventions.
2. **Implement:** Use the available tools (e.g., file editing, \`${TerminalTool.Name}\`) to construct the solution, strictly adhering to the project's established conventions (see 'Following Conventions' below).
- If creating a new project rely on scaffolding commands do lay out the initial project structure (i.e. npm init ...)
3. **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining \`README\` files, \`${MEMORY_FILE_NAME}\`, build/package configuration (e.g., \`package.json\`), or existing test execution patterns. NEVER assume standard test commands.
4. **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific linting and type-checking commands (e.g., \`npm run lint\`, \`ruff check .\`, \`tsc\`) that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, ask the user and propose adding them to \`${MEMORY_FILE_NAME}\` for future reference.
# Key Operating Principles
## Following Conventions
Rigorously adhere to existing project conventions when reading or modifying code. Analyze surrounding code and configuration first.
- **Libraries/Frameworks:** NEVER assume a library/framework is available or appropriate. Verify its established usage within the project (check imports, configuration files like \`package.json\`, \`Cargo.toml\`, \`requirements.txt\`, \`build.gradle\`, etc., or observe neighboring files) before employing it.
- **Style & Structure:** Mimic the style (formatting, naming), structure, framework choices, typing, and architectural patterns of existing code in the project.
- **Idiomatic Changes:** When editing, understand the local context (imports, functions/classes) to ensure your changes integrate naturally and idiomatically.
- **Comments:** Add code comments sparingly. Focus on *why* something is done, especially for complex logic, rather than *what* is done. Only add comments if necessary for clarity or if requested by the user.
## Memory (${MEMORY_FILE_NAME})
Utilize the \`${MEMORY_FILE_NAME}\` file in the current working directory for project-specific context:
- Reference stored commands, style preferences, and codebase notes when performing tasks.
- When you discover frequently used commands (build, test, lint, typecheck) or learn about specific project conventions or style preferences, proactively propose adding them to \`${MEMORY_FILE_NAME}\` for future sessions.
## Tone and Style (CLI Interaction)
- **Concise & Direct:** Adopt a professional, direct, and concise tone suitable for a CLI environment.
- **Minimal Output:** Aim for fewer than 4 lines of text output (excluding tool use/code generation) per response whenever practical. Focus strictly on the user's query.
- **Clarity over Brevity (When Needed):** While conciseness is key, prioritize clarity for essential explanations (like pre-command warnings) or when seeking necessary clarification if a request is ambiguous.
- **No Chitchat:** Avoid conversational filler, preambles ("Okay, I will now..."), or postambles ("I have finished the changes..."). Get straight to the action or answer.
- **Formatting:** Use GitHub-flavored Markdown. Responses will be rendered in monospace.
- **Tools vs. Text:** Use tools for actions, text output *only* for communication. Do not add explanatory comments within tool calls or code blocks unless specifically part of the required code/command itself.
- **Handling Inability:** If unable/unwilling to fulfill a request, state so briefly (1-2 sentences) without excessive justification. Offer alternatives if appropriate.
## Proactiveness
- **Act within Scope:** Fulfill the user's request thoroughly, including reasonable, directly implied follow-up actions.
- **Confirm Ambiguity/Expansion:** Do not take significant actions beyond the clear scope of the request without confirming with the user. If asked *how* to do something, explain first, don't just do it.
- **Stop After Action:** After completing a code modification or file operation, simply stop. Do not provide summaries unless asked.
# Tool Usage
- **Search:** Prefer the Agent tool for file searching to optimize context usage.
- **Parallelism:** Execute multiple independent tool calls in parallel when feasible.
- **Command Execution:** Use the \`${TerminalTool.Name}\` tool for running shell commands, remembering the safety rule to explain modifying commands first.
# Interaction Details
- **Help Command:** Use \`/help\` to display Gemini Code help. To get specific command/flag info, execute \`gemini -h\` via \`${TerminalTool.Name}\` and show the output.
- **Synthetic Messages:** Ignore system messages like \`++Request Cancelled++\`. Do not generate them.
- **Feedback:** Direct feedback to ${contactEmail}.
# Examples (Illustrating Tone and Workflow)
<example>
user: 1 + 2
assistant: 3
</example>
<example>
user: is 13 a prime number?
assistant: true
</example>
<example>
user: List files here.
assistant: [tool_call: execute_bash_command for 'ls -la']))]
</example>
<example>
user: Refactor the auth logic in src/auth.py to use the 'requests' library.
assistant: Okay, I see src/auth.py currently uses 'urllib'. Before changing it, I need to check if 'requests' is already a project dependency. [tool_call: ${TerminalTool.Name} for grep 'requests', 'requirements.txt']
(After confirming dependency or asking user to add it)
Okay, 'requests' is available. I will now refactor src/auth.py.
[tool_call: Uses read, edit tools following conventions]
(After editing)
[tool_call: Runs project-specific lint/typecheck commands found previously, e.g., ${TerminalTool.Name} for 'ruff', 'check', 'src/auth.py']
</example>
<example>
user: Delete the temp directory.
assistant: I can run \`rm -rf ./temp\`. This will permanently delete the directory and all its contents. Is it okay to proceed?
</example>
# Final Reminder
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions on the contents of files; instead use the ${ReadFileTool.Name} to ensure you aren't making too broad of assumptions.
`;
-233
View File
@@ -1,233 +0,0 @@
import {
Part,
Chat,
PartListUnion,
GenerateContentResponse,
FunctionCall,
} from '@google/genai';
import {
type ToolCallConfirmationDetails,
ToolCallStatus,
ToolCallEvent,
} from '../ui/types.js';
import { ToolResult } from '../tools/tools.js';
import { toolRegistry } from '../tools/tool-registry.js';
import { GeminiEventType, GeminiStream } from './gemini-stream.js';
export type ToolExecutionOutcome = {
callId: string;
name: string;
args: Record<string, never>;
result?: ToolResult;
error?: Error;
confirmationDetails?: ToolCallConfirmationDetails;
};
// TODO(jbd): Move ToolExecutionOutcome to somewhere else?
// A turn manages the agentic loop turn.
// Turn.run emits throught the turn events that could be used
// as immediate feedback to the user.
export class Turn {
private readonly chat: Chat;
private pendingToolCalls: Array<{
callId: string;
name: string;
args: Record<string, never>;
}>;
private fnResponses: Part[];
private debugResponses: GenerateContentResponse[];
constructor(chat: Chat) {
this.chat = chat;
this.pendingToolCalls = [];
this.fnResponses = [];
this.debugResponses = [];
}
async *run(req: PartListUnion, signal?: AbortSignal): GeminiStream {
const responseStream = await this.chat.sendMessageStream({
message: req,
});
for await (const resp of responseStream) {
this.debugResponses.push(resp);
if (signal?.aborted) {
throw this.abortError();
}
if (resp.text) {
yield {
type: GeminiEventType.Content,
value: resp.text,
};
continue;
}
if (!resp.functionCalls) {
continue;
}
for (const fnCall of resp.functionCalls) {
for await (const event of this.handlePendingFunctionCall(fnCall)) {
yield event;
}
}
// Create promises to be able to wait for executions to complete.
const toolPromises = this.pendingToolCalls.map(
async (pendingToolCall) => {
const tool = toolRegistry.getTool(pendingToolCall.name);
if (!tool) {
return {
...pendingToolCall,
error: new Error(
`Tool "${pendingToolCall.name}" not found or is not registered.`,
),
};
}
const shouldConfirm = await tool.shouldConfirmExecute(
pendingToolCall.args,
);
if (shouldConfirm) {
return {
// TODO(jbd): Should confirm isn't confirmation details.
...pendingToolCall,
confirmationDetails: shouldConfirm,
};
}
const result = await tool.execute(pendingToolCall.args);
return { ...pendingToolCall, result };
},
);
const outcomes = await Promise.all(toolPromises);
for await (const event of this.handleToolOutcomes(outcomes)) {
yield event;
}
this.pendingToolCalls = [];
// TODO(jbd): Make it harder for the caller to ignore the
// buffered function responses.
this.fnResponses = this.buildFunctionResponses(outcomes);
}
}
private async *handlePendingFunctionCall(fnCall: FunctionCall): GeminiStream {
const callId =
fnCall.id ??
`${fnCall.name}-${Date.now()}-${Math.random().toString(16).slice(2)}`;
// TODO(jbd): replace with uuid.
const name = fnCall.name || 'undefined_tool_name';
const args = (fnCall.args || {}) as Record<string, never>;
this.pendingToolCalls.push({ callId, name, args });
const value: ToolCallEvent = {
type: 'tool_call',
status: ToolCallStatus.Pending,
callId,
name,
args,
resultDisplay: undefined,
confirmationDetails: undefined,
};
yield {
type: GeminiEventType.ToolCallInfo,
value,
};
}
private async *handleToolOutcomes(
outcomes: ToolExecutionOutcome[],
): GeminiStream {
for (const outcome of outcomes) {
const { callId, name, args, result, error, confirmationDetails } =
outcome;
if (error) {
// TODO(jbd): Error handling needs a cleanup.
const errorMessage = error?.message || String(error);
yield {
type: GeminiEventType.Content,
value: `[Error invoking tool ${name}: ${errorMessage}]`,
};
return;
}
if (
result &&
typeof result === 'object' &&
result !== null &&
'error' in result
) {
const errorMessage = String(result.error);
yield {
type: GeminiEventType.Content,
value: `[Error executing tool ${name}: ${errorMessage}]`,
};
return;
}
const status = confirmationDetails
? ToolCallStatus.Confirming
: ToolCallStatus.Invoked;
const value: ToolCallEvent = {
type: 'tool_call',
status,
callId,
name,
args,
resultDisplay: result?.returnDisplay,
confirmationDetails,
};
yield {
type: GeminiEventType.ToolCallInfo,
value,
};
}
}
private buildFunctionResponses(outcomes: ToolExecutionOutcome[]): Part[] {
return outcomes.map((outcome: ToolExecutionOutcome): Part => {
const { name, result, error } = outcome;
const output = { output: result?.llmContent };
let fnResponse: Record<string, unknown>;
if (error) {
const errorMessage = error?.message || String(error);
fnResponse = {
error: `Invocation failed: ${errorMessage}`,
};
console.error(`[Turn] Critical error invoking tool ${name}:`, error);
} else if (
result &&
typeof result === 'object' &&
result !== null &&
'error' in result
) {
fnResponse = output;
console.warn(
`[Turn] Tool ${name} returned an error structure:`,
result.error,
);
} else {
fnResponse = output;
}
return {
functionResponse: {
name,
id: outcome.callId,
response: fnResponse,
},
};
});
}
private abortError(): Error {
// TODO(jbd): Move it out of this class.
const error = new Error('Request cancelled by user during stream.');
error.name = 'AbortError';
throw error;
}
getFunctionResponses(): Part[] {
return this.fnResponses;
}
getDebugResponses(): GenerateContentResponse[] {
return this.debugResponses;
}
}
+10 -10
View File
@@ -16,20 +16,19 @@ import { EditTool } from './tools/edit.tool.js';
import { TerminalTool } from './tools/terminal.tool.js';
import { WriteFileTool } from './tools/write-file.tool.js';
import { WebFetchTool } from './tools/web-fetch.tool.js';
import { globalConfig } from './config/config.js';
// TODO(b/411707095): remove. left here as an example of how to pull in inter-package deps
import { helloServer } from '@gemini-code/server';
helloServer();
import { loadCliConfig } from './config/config.js';
async function main() {
// Configure tools
registerTools(globalConfig.getTargetDir());
// Load configuration
const config = loadCliConfig();
// Render UI
// Configure tools using the loaded config
registerTools(config.getTargetDir());
// Render UI, passing necessary config values
render(
React.createElement(App, {
directory: globalConfig.getTargetDir(),
config,
}),
);
}
@@ -81,12 +80,13 @@ main().catch((error) => {
});
function registerTools(targetDir: string) {
const config = loadCliConfig();
const lsTool = new LSTool(targetDir);
const readFileTool = new ReadFileTool(targetDir);
const grepTool = new GrepTool(targetDir);
const globTool = new GlobTool(targetDir);
const editTool = new EditTool(targetDir);
const terminalTool = new TerminalTool(targetDir);
const terminalTool = new TerminalTool(targetDir, config);
const writeFileTool = new WriteFileTool(targetDir);
const webFetchTool = new WebFetchTool();
+71 -331
View File
@@ -6,233 +6,63 @@
import fs from 'fs';
import path from 'path';
import * as Diff from 'diff';
import { SchemaValidator } from '../utils/schemaValidator.js';
import { BaseTool, ToolResult } from './tools.js';
import {
EditLogic,
EditToolParams,
ToolResult,
makeRelative,
shortenPath,
isNodeError,
} from '@gemini-code/server';
import { BaseTool } from './tools.js';
import {
ToolCallConfirmationDetails,
ToolConfirmationOutcome,
ToolEditConfirmationDetails,
} from '../ui/types.js';
import { makeRelative, shortenPath } from '../utils/paths.js';
import { ReadFileTool } from './read-file.tool.js';
import { WriteFileTool } from './write-file.tool.js';
import { isNodeError } from '../utils/errors.js';
import * as Diff from 'diff';
/**
* Parameters for the Edit tool
*/
export interface EditToolParams {
/**
* The absolute path to the file to modify
*/
file_path: string;
/**
* The text to replace
*/
old_string: string;
/**
* The text to replace it with
*/
new_string: string;
/**
* The expected number of replacements to perform (optional, defaults to 1)
*/
expected_replacements?: number;
}
interface CalculatedEdit {
currentContent: string | null;
newContent: string;
occurrences: number;
error?: { display: string; raw: string };
isNewFile: boolean;
}
/**
* Implementation of the Edit tool that modifies files.
* This tool maintains state for the "Always Edit" confirmation preference.
* CLI wrapper for the Edit tool.
* Handles confirmation prompts and potentially UI-specific state like 'Always Edit'.
*/
export class EditTool extends BaseTool<EditToolParams, ToolResult> {
static readonly Name: string = EditLogic.Name;
private coreLogic: EditLogic;
private shouldAlwaysEdit = false;
private readonly rootDirectory: string;
/**
* Creates a new instance of the EditTool
* Creates a new instance of the EditTool CLI wrapper
* @param rootDirectory Root directory to ground this tool in.
*/
constructor(rootDirectory: string) {
const coreLogicInstance = new EditLogic(rootDirectory);
super(
'replace',
EditTool.Name,
'Edit',
`Replaces a SINGLE, UNIQUE occurrence of text within a file. Requires providing significant context around the change to ensure uniqueness. For moving/renaming files, use the Bash tool with \`mv\`. For replacing entire file contents or creating new files use the ${WriteFileTool.Name} tool. Always use the ${ReadFileTool.Name} tool to examine the file before using this tool.`,
{
properties: {
file_path: {
description:
'The absolute path to the file to modify. Must start with /. When creating a new file, ensure the parent directory exists (use the `LS` tool to verify).',
type: 'string',
},
old_string: {
description:
'The exact text to replace. CRITICAL: Must uniquely identify the single instance to change. Include at least 3-5 lines of context BEFORE and AFTER the target text, matching whitespace and indentation precisely. If this string matches multiple locations or does not match exactly, the tool will fail. Use an empty string ("") when creating a new file.',
type: 'string',
},
new_string: {
description:
'The text to replace the `old_string` with. When creating a new file (using an empty `old_string`), this should contain the full desired content of the new file. Ensure the resulting code is correct and idiomatic.',
type: 'string',
},
},
required: ['file_path', 'old_string', 'new_string'],
type: 'object',
},
`Replaces a SINGLE, UNIQUE occurrence of text within a file. Requires providing significant context around the change to ensure uniqueness. For moving/renaming files, use the Bash tool with \`mv\`. For replacing entire file contents or creating new files use the WriteFile tool. Always use the ReadFile tool to examine the file before using this tool.`,
(coreLogicInstance.schema.parameters as Record<string, unknown>) ?? {},
);
this.rootDirectory = path.resolve(rootDirectory);
this.coreLogic = coreLogicInstance;
}
/**
* Checks if a path is within the root directory.
* @param pathToCheck The absolute path to check.
* @returns True if the path is within the root directory, false otherwise.
* Delegates validation to the core logic
*/
private isWithinRoot(pathToCheck: string): boolean {
const normalizedPath = path.normalize(pathToCheck);
const normalizedRoot = this.rootDirectory;
const rootWithSep = normalizedRoot.endsWith(path.sep)
? normalizedRoot
: normalizedRoot + path.sep;
return (
normalizedPath === normalizedRoot ||
normalizedPath.startsWith(rootWithSep)
);
validateToolParams(params: EditToolParams): string | null {
return this.coreLogic.validateParams(params);
}
/**
* Validates the parameters for the Edit tool
* @param params Parameters to validate
* @returns True if parameters are valid, false otherwise
* Delegates getting description to the core logic
*/
validateParams(params: EditToolParams): boolean {
if (
this.schema.parameters &&
!SchemaValidator.validate(
this.schema.parameters as Record<string, unknown>,
params,
)
) {
return false;
}
// Ensure path is absolute
if (!path.isAbsolute(params.file_path)) {
console.error(`File path must be absolute: ${params.file_path}`);
return false;
}
// Ensure path is within the root directory
if (!this.isWithinRoot(params.file_path)) {
console.error(
`File path must be within the root directory (${this.rootDirectory}): ${params.file_path}`,
);
return false;
}
// Validate expected_replacements if provided
if (
params.expected_replacements !== undefined &&
params.expected_replacements < 0
) {
console.error('Expected replacements must be a non-negative number');
return false;
}
return true;
getDescription(params: EditToolParams): string {
return this.coreLogic.getDescription(params);
}
/**
* Calculates the potential outcome of an edit operation.
* @param params Parameters for the edit operation
* @returns An object describing the potential edit outcome
* @throws File system errors if reading the file fails unexpectedly (e.g., permissions)
*/
private calculateEdit(params: EditToolParams): CalculatedEdit {
const expectedReplacements =
params.expected_replacements === undefined
? 1
: params.expected_replacements;
let currentContent: string | null = null;
let fileExists = false;
let isNewFile = false;
let newContent = '';
let occurrences = 0;
let error: { display: string; raw: string } | undefined = undefined;
try {
currentContent = fs.readFileSync(params.file_path, 'utf8');
fileExists = true;
} catch (err: unknown) {
if (!isNodeError(err) || err.code !== 'ENOENT') {
throw err;
}
fileExists = false;
}
if (params.old_string === '' && !fileExists) {
isNewFile = true;
newContent = params.new_string;
occurrences = 0;
} else if (!fileExists) {
error = {
display: `File not found.`,
raw: `File not found: ${params.file_path}`,
};
} else if (currentContent !== null) {
occurrences = this.countOccurrences(currentContent, params.old_string);
if (occurrences === 0) {
error = {
display: `No edits made`,
raw: `Failed to edit, 0 occurrences found`,
};
} else if (occurrences !== expectedReplacements) {
error = {
display: `Failed to edit, expected ${expectedReplacements} occurrences but found ${occurrences}`,
raw: `Failed to edit, Expected ${expectedReplacements} occurrences but found ${occurrences} in file: ${params.file_path}`,
};
} else {
newContent = this.replaceAll(
currentContent,
params.old_string,
params.new_string,
);
}
} else {
error = {
display: `Failed to read content`,
raw: `Failed to read content of existing file: ${params.file_path}`,
};
}
return {
currentContent,
newContent,
occurrences,
error,
isNewFile,
};
}
/**
* Determines if confirmation is needed and prepares the confirmation details.
* This method performs the calculation needed to generate the diff and respects the `shouldAlwaysEdit` state.
* @param params Parameters for the potential edit operation
* @returns Confirmation details object or false if no confirmation is needed/possible.
* Handles the confirmation prompt for the Edit tool in the CLI.
* It needs to calculate the diff to show the user.
*/
async shouldConfirmExecute(
params: EditToolParams,
@@ -240,40 +70,62 @@ export class EditTool extends BaseTool<EditToolParams, ToolResult> {
if (this.shouldAlwaysEdit) {
return false;
}
if (!this.validateParams(params)) {
const validationError = this.validateToolParams(params);
if (validationError) {
console.error(
'[EditTool] Attempted confirmation with invalid parameters.',
`[EditTool Wrapper] Attempted confirmation with invalid parameters: ${validationError}`,
);
return false;
}
let calculatedEdit: CalculatedEdit;
let currentContent: string | null = null;
let fileExists = false;
let newContent = '';
try {
calculatedEdit = this.calculateEdit(params);
} catch (error) {
console.error(
`Error calculating edit for confirmation: ${error instanceof Error ? error.message : String(error)}`,
currentContent = fs.readFileSync(params.file_path, 'utf8');
fileExists = true;
} catch (err: unknown) {
if (isNodeError(err) && err.code === 'ENOENT') {
fileExists = false;
} else {
console.error(`Error reading file for confirmation diff: ${err}`);
return false;
}
}
if (params.old_string === '' && !fileExists) {
newContent = params.new_string;
} else if (!fileExists) {
return false;
} else if (currentContent !== null) {
const occurrences = this.coreLogic['countOccurrences'](
currentContent,
params.old_string,
);
const expectedReplacements =
params.expected_replacements === undefined
? 1
: params.expected_replacements;
if (occurrences === 0 || occurrences !== expectedReplacements) {
return false;
}
newContent = this.coreLogic['replaceAll'](
currentContent,
params.old_string,
params.new_string,
);
} else {
return false;
}
if (calculatedEdit.error) {
return false;
}
const fileName = path.basename(params.file_path);
const fileDiff = Diff.createPatch(
fileName,
calculatedEdit.currentContent ?? '',
calculatedEdit.newContent,
currentContent ?? '',
newContent,
'Current',
'Proposed',
{ context: 3, ignoreWhitespace: true },
{ context: 3 },
);
const confirmationDetails: ToolEditConfirmationDetails = {
title: `Confirm Edit: ${shortenPath(makeRelative(params.file_path, this.rootDirectory))}`,
title: `Confirm Edit: ${shortenPath(makeRelative(params.file_path, this.coreLogic['rootDirectory']))}`,
fileName,
fileDiff,
onConfirm: async (outcome: ToolConfirmationOutcome) => {
@@ -285,122 +137,10 @@ export class EditTool extends BaseTool<EditToolParams, ToolResult> {
return confirmationDetails;
}
getDescription(params: EditToolParams): string {
const relativePath = makeRelative(params.file_path, this.rootDirectory);
const oldStringSnippet =
params.old_string.split('\n')[0].substring(0, 30) +
(params.old_string.length > 30 ? '...' : '');
const newStringSnippet =
params.new_string.split('\n')[0].substring(0, 30) +
(params.new_string.length > 30 ? '...' : '');
return `${shortenPath(relativePath)}: ${oldStringSnippet} => ${newStringSnippet}`;
}
/**
* Executes the edit operation with the given parameters.
* This method recalculates the edit operation before execution.
* @param params Parameters for the edit operation
* @returns Result of the edit operation
* Delegates execution to the core logic
*/
async execute(params: EditToolParams): Promise<ToolResult> {
if (!this.validateParams(params)) {
return {
llmContent: 'Invalid parameters for file edit operation',
returnDisplay: '**Error:** Invalid parameters for file edit operation',
};
}
let editData: CalculatedEdit;
try {
editData = this.calculateEdit(params);
} catch (error) {
return {
llmContent: `Error preparing edit: ${error instanceof Error ? error.message : String(error)}`,
returnDisplay: 'Failed to prepare edit',
};
}
if (editData.error) {
return {
llmContent: editData.error.raw,
returnDisplay: editData.error.display,
};
}
try {
this.ensureParentDirectoriesExist(params.file_path);
fs.writeFileSync(params.file_path, editData.newContent, 'utf8');
if (editData.isNewFile) {
return {
llmContent: `Created new file: ${params.file_path} with provided content.`,
returnDisplay: `Created ${shortenPath(makeRelative(params.file_path, this.rootDirectory))}`,
};
} else {
const fileName = path.basename(params.file_path);
const fileDiff = Diff.createPatch(
fileName,
editData.currentContent ?? '',
editData.newContent,
'Current',
'Proposed',
{ context: 3, ignoreWhitespace: true },
);
return {
llmContent: `Successfully modified file: ${params.file_path} (${editData.occurrences} replacements).`,
returnDisplay: { fileDiff },
};
}
} catch (error) {
return {
llmContent: `Error executing edit: ${error instanceof Error ? error.message : String(error)}`,
returnDisplay: `Failed to edit file`,
};
}
}
/**
* Counts occurrences of a substring in a string
* @param str String to search in
* @param substr Substring to count
* @returns Number of occurrences
*/
private countOccurrences(str: string, substr: string): number {
if (substr === '') {
return 0;
}
let count = 0;
let pos = str.indexOf(substr);
while (pos !== -1) {
count++;
pos = str.indexOf(substr, pos + substr.length);
}
return count;
}
/**
* Replaces all occurrences of a substring in a string
* @param str String to modify
* @param find Substring to find
* @param replace Replacement string
* @returns Modified string
*/
private replaceAll(str: string, find: string, replace: string): string {
if (find === '') {
return str;
}
return str.split(find).join(replace);
}
/**
* Creates parent directories if they don't exist
* @param filePath Path to ensure parent directories exist
*/
private ensureParentDirectoriesExist(filePath: string): void {
const dirName = path.dirname(filePath);
if (!fs.existsSync(dirName)) {
fs.mkdirSync(dirName, { recursive: true });
}
return this.coreLogic.execute(params);
}
}
+41 -216
View File
@@ -4,247 +4,72 @@
* SPDX-License-Identifier: Apache-2.0
*/
import fs from 'fs';
import path from 'path';
import fg from 'fast-glob';
import { SchemaValidator } from '../utils/schemaValidator.js';
import { BaseTool, ToolResult } from './tools.js';
import { shortenPath, makeRelative } from '../utils/paths.js';
// Import core logic and types from the server package
import { GlobLogic, GlobToolParams, ToolResult } from '@gemini-code/server';
// Import CLI-specific base class and types
import { BaseTool } from './tools.js';
import { ToolCallConfirmationDetails } from '../ui/types.js';
/**
* Parameters for the GlobTool
*/
export interface GlobToolParams {
/**
* The glob pattern to match files against
*/
pattern: string;
/**
* The directory to search in (optional, defaults to current directory)
*/
path?: string;
}
/**
* Implementation of the GlobTool that finds files matching patterns,
* sorted by modification time (newest first).
* CLI wrapper for the Glob tool
*/
export class GlobTool extends BaseTool<GlobToolParams, ToolResult> {
/**
* The root directory that this tool is grounded in.
* All file operations will be restricted to this directory.
*/
private rootDirectory: string;
static readonly Name: string = GlobLogic.Name; // Use name from logic
// Core logic instance from the server package
private coreLogic: GlobLogic;
/**
* Creates a new instance of the GlobTool
* @param rootDirectory Root directory to ground this tool in. All operations will be restricted to this directory.
* Creates a new instance of the GlobTool CLI wrapper
* @param rootDirectory Root directory to ground this tool in.
*/
constructor(rootDirectory: string) {
// Instantiate the core logic from the server package
const coreLogicInstance = new GlobLogic(rootDirectory);
// Initialize the CLI BaseTool
super(
'glob',
'FindFiles',
'Efficiently finds files matching specific glob patterns (e.g., `src/**/*.ts`, `**/*.md`), returning absolute paths sorted by modification time (newest first). Ideal for quickly locating files based on their name or path structure, especially in large codebases.',
{
properties: {
pattern: {
description:
"The glob pattern to match against (e.g., '*.py', 'src/**/*.js', 'docs/*.md').",
type: 'string',
},
path: {
description:
'Optional: The absolute path to the directory to search within. If omitted, searches the root directory.',
type: 'string',
},
},
required: ['pattern'],
type: 'object',
},
GlobTool.Name,
'FindFiles', // Define display name here
'Efficiently finds files matching specific glob patterns (e.g., `src/**/*.ts`, `**/*.md`), returning absolute paths sorted by modification time (newest first). Ideal for quickly locating files based on their name or path structure, especially in large codebases.', // Define description here
(coreLogicInstance.schema.parameters as Record<string, unknown>) ?? {},
);
// Set the root directory
this.rootDirectory = path.resolve(rootDirectory);
this.coreLogic = coreLogicInstance;
}
/**
* Checks if a path is within the root directory.
* This is a security measure to prevent the tool from accessing files outside of its designated root.
* @param pathToCheck The path to check (expects an absolute path)
* @returns True if the path is within the root directory, false otherwise
*/
private isWithinRoot(pathToCheck: string): boolean {
const absolutePathToCheck = path.resolve(pathToCheck);
const normalizedPath = path.normalize(absolutePathToCheck);
const normalizedRoot = path.normalize(this.rootDirectory);
// Ensure the normalizedRoot ends with a path separator for proper prefix comparison
const rootWithSep = normalizedRoot.endsWith(path.sep)
? normalizedRoot
: normalizedRoot + path.sep;
// Check if it's the root itself or starts with the root path followed by a separator.
// This ensures that we don't accidentally allow access to parent directories.
return (
normalizedPath === normalizedRoot ||
normalizedPath.startsWith(rootWithSep)
);
}
/**
* Validates the parameters for the tool.
* Ensures that the provided parameters adhere to the expected schema and that the search path is valid and within the tool's root directory.
* @param params Parameters to validate
* @returns An error message string if invalid, null otherwise
* Delegates validation to the core logic
*/
validateToolParams(params: GlobToolParams): string | null {
if (
this.schema.parameters &&
!SchemaValidator.validate(
this.schema.parameters as Record<string, unknown>,
params,
)
) {
return "Parameters failed schema validation. Ensure 'pattern' is a string and 'path' (if provided) is a string.";
}
// Determine the absolute path to check
const searchDirAbsolute = params.path ?? this.rootDirectory;
// Validate path is within root directory
if (!this.isWithinRoot(searchDirAbsolute)) {
return `Search path ("${searchDirAbsolute}") resolves outside the tool's root directory ("${this.rootDirectory}").`;
}
// Validate path exists and is a directory using the absolute path.
// These checks prevent the tool from attempting to search in non-existent or non-directory paths, which would lead to errors.
try {
if (!fs.existsSync(searchDirAbsolute)) {
return `Search path does not exist: ${shortenPath(makeRelative(searchDirAbsolute, this.rootDirectory))} (absolute: ${searchDirAbsolute})`;
}
if (!fs.statSync(searchDirAbsolute).isDirectory()) {
return `Search path is not a directory: ${shortenPath(makeRelative(searchDirAbsolute, this.rootDirectory))} (absolute: ${searchDirAbsolute})`;
}
} catch (e: unknown) {
// Catch potential permission errors during sync checks
return `Error accessing search path: ${e}`;
}
// Validate glob pattern (basic non-empty check)
if (
!params.pattern ||
typeof params.pattern !== 'string' ||
params.pattern.trim() === ''
) {
return "The 'pattern' parameter cannot be empty.";
}
// Could add more sophisticated glob pattern validation if needed
return null; // Parameters are valid
return this.coreLogic.validateToolParams(params);
}
/**
* Gets a description of the glob operation.
* @param params Parameters for the glob operation.
* @returns A string describing the glob operation.
* Delegates getting description to the core logic
*/
getDescription(params: GlobToolParams): string {
let description = `'${params.pattern}'`;
if (params.path) {
const searchDir = params.path || this.rootDirectory;
const relativePath = makeRelative(searchDir, this.rootDirectory);
description += ` within ${shortenPath(relativePath)}`;
}
return description;
return this.coreLogic.getDescription(params);
}
/**
* Executes the glob search with the given parameters
* @param params Parameters for the glob search
* @returns Result of the glob search
* Define confirmation behavior (Glob likely doesn't need confirmation)
*/
shouldConfirmExecute(
// eslint-disable-next-line @typescript-eslint/no-unused-vars
params: GlobToolParams,
): Promise<ToolCallConfirmationDetails | false> {
return Promise.resolve(false);
}
/**
* Delegates execution to the core logic
*/
async execute(params: GlobToolParams): Promise<ToolResult> {
const validationError = this.validateToolParams(params);
if (validationError) {
return {
llmContent: `Error: Invalid parameters provided. Reason: ${validationError}`,
returnDisplay: `**Error:** Failed to execute tool.`,
};
}
try {
// 1. Resolve the absolute search directory. Validation ensures it exists and is a directory.
const searchDirAbsolute = params.path ?? this.rootDirectory;
// 2. Perform Glob Search using fast-glob
// We use fast-glob because it's performant and supports glob patterns.
const entries = await fg(params.pattern, {
cwd: searchDirAbsolute, // Search within this absolute directory
absolute: true, // Return absolute paths
onlyFiles: true, // Match only files
stats: true, // Include file stats object for sorting
dot: true, // Include files starting with a dot
ignore: ['**/node_modules/**', '**/.git/**'], // Common sensible default, adjust as needed
followSymbolicLinks: false, // Avoid potential issues with symlinks unless specifically needed
suppressErrors: true, // Suppress EACCES errors for individual files (we handle dir access in validation)
});
// 3. Handle No Results
if (!entries || entries.length === 0) {
return {
llmContent: `No files found matching pattern "${params.pattern}" within ${searchDirAbsolute}.`,
returnDisplay: `No files found`,
};
}
// 4. Sort Results by Modification Time (Newest First)
// Sorting by modification time ensures that the most recently modified files are listed first.
// This can be useful for quickly identifying the files that have been recently changed.
// The stats object is guaranteed by the `stats: true` option in the fast-glob configuration.
entries.sort((a, b) => {
// Ensure stats exist before accessing mtime (though fg should provide them)
const mtimeA = a.stats?.mtime?.getTime() ?? 0;
const mtimeB = b.stats?.mtime?.getTime() ?? 0;
return mtimeB - mtimeA; // Descending order
});
// 5. Format Output
const sortedAbsolutePaths = entries.map((entry) => entry.path);
// Convert absolute paths to relative paths (to rootDir) for clearer display
const sortedRelativePaths = sortedAbsolutePaths.map((absPath) =>
makeRelative(absPath, this.rootDirectory),
);
// Construct the result message
const fileListDescription = sortedRelativePaths
.map((p) => ` - ${shortenPath(p)}`)
.join('\n');
const fileCount = sortedRelativePaths.length;
const relativeSearchDir = makeRelative(
searchDirAbsolute,
this.rootDirectory,
);
const displayPath = shortenPath(
relativeSearchDir === '.' ? 'root directory' : relativeSearchDir,
);
return {
llmContent: `Found ${fileCount} file(s) matching "${params.pattern}" within ${displayPath}, sorted by modification time (newest first):\n${fileListDescription}`,
returnDisplay: `Found ${fileCount} matching file(s)`,
};
} catch (error) {
// Catch unexpected errors during glob execution (less likely with suppressErrors=true, but possible)
const errorMessage =
error instanceof Error ? error.message : String(error);
console.error(`GlobTool execute Error: ${errorMessage}`, error);
return {
llmContent: `Error during glob search operation: ${errorMessage}`,
returnDisplay: `**Error:** An unexpected error occurred.`,
};
}
return this.coreLogic.execute(params);
}
// Removed private methods (isWithinRoot)
// as they are now part of GlobLogic in the server package.
}
+45 -547
View File
@@ -4,578 +4,76 @@
* SPDX-License-Identifier: Apache-2.0
*/
import fs from 'fs'; // Used for sync checks in validation
import fsPromises from 'fs/promises'; // Used for async operations in fallback
import path from 'path';
import { EOL } from 'os'; // Used for parsing grep output lines
import { spawn } from 'child_process'; // Used for git grep and system grep
import fastGlob from 'fast-glob'; // Used for JS fallback file searching
import { BaseTool, ToolResult } from './tools.js';
import { SchemaValidator } from '../utils/schemaValidator.js';
import { makeRelative, shortenPath } from '../utils/paths.js';
import { getErrorMessage, isNodeError } from '../utils/errors.js';
// Import core logic and types from the server package
import { GrepLogic, GrepToolParams, ToolResult } from '@gemini-code/server';
// --- Interfaces (kept separate for clarity) ---
// Import CLI-specific base class and types
import { BaseTool } from './tools.js';
import { ToolCallConfirmationDetails } from '../ui/types.js';
// --- Interfaces (Params defined in server package) ---
// --- GrepTool CLI Wrapper Class ---
/**
* Parameters for the GrepTool
*/
export interface GrepToolParams {
/**
* The regular expression pattern to search for in file contents
*/
pattern: string;
/**
* The directory to search in (optional, defaults to current directory relative to root)
*/
path?: string;
/**
* File pattern to include in the search (e.g. "*.js", "*.{ts,tsx}")
*/
include?: string;
}
/**
* Result object for a single grep match
*/
interface GrepMatch {
filePath: string;
lineNumber: number;
line: string;
}
// --- GrepTool Class ---
/**
* Implementation of the GrepTool that searches file contents using git grep, system grep, or JS fallback.
* CLI wrapper for the Grep tool
*/
export class GrepTool extends BaseTool<GrepToolParams, ToolResult> {
private rootDirectory: string;
static readonly Name: string = GrepLogic.Name; // Use name from logic
// Core logic instance from the server package
private coreLogic: GrepLogic;
/**
* Creates a new instance of the GrepTool
* @param rootDirectory Root directory to ground this tool in. All operations will be restricted to this directory.
* Creates a new instance of the GrepTool CLI wrapper
* @param rootDirectory Root directory to ground this tool in.
*/
constructor(rootDirectory: string) {
// Instantiate the core logic from the server package
const coreLogicInstance = new GrepLogic(rootDirectory);
// Initialize the CLI BaseTool
super(
'search_file_content',
'SearchText',
'Searches for a regular expression pattern within the content of files in a specified directory (or current working directory). Can filter files by a glob pattern. Returns the lines containing matches, along with their file paths and line numbers.',
{
properties: {
pattern: {
description:
"The regular expression (regex) pattern to search for within file contents (e.g., 'function\\s+myFunction', 'import\\s+\\{.*\\}\\s+from\\s+.*').",
type: 'string',
},
path: {
description:
'Optional: The absolute path to the directory to search within. If omitted, searches the current working directory.',
type: 'string',
},
include: {
description:
"Optional: A glob pattern to filter which files are searched (e.g., '*.js', '*.{ts,tsx}', 'src/**'). If omitted, searches all files (respecting potential global ignores).",
type: 'string',
},
},
required: ['pattern'],
type: 'object',
},
GrepTool.Name,
'SearchText', // Define display name here
'Searches for a regular expression pattern within the content of files in a specified directory (or current working directory). Can filter files by a glob pattern. Returns the lines containing matches, along with their file paths and line numbers.', // Define description here
(coreLogicInstance.schema.parameters as Record<string, unknown>) ?? {},
);
// Ensure rootDirectory is absolute and normalized
this.rootDirectory = path.resolve(rootDirectory);
}
// --- Validation Methods ---
/**
* Checks if a path is within the root directory and resolves it.
* @param relativePath Path relative to the root directory (or undefined for root).
* @returns The absolute path if valid and exists.
* @throws {Error} If path is outside root, doesn't exist, or isn't a directory.
*/
private resolveAndValidatePath(relativePath?: string): string {
const targetPath = path.resolve(this.rootDirectory, relativePath || '.');
// Security Check: Ensure the resolved path is still within the root directory.
if (
!targetPath.startsWith(this.rootDirectory) &&
targetPath !== this.rootDirectory
) {
throw new Error(
`Path validation failed: Attempted path "${relativePath || '.'}" resolves outside the allowed root directory "${this.rootDirectory}".`,
);
}
// Check existence and type after resolving
try {
const stats = fs.statSync(targetPath);
if (!stats.isDirectory()) {
throw new Error(`Path is not a directory: ${targetPath}`);
}
} catch (error: unknown) {
if (isNodeError(error) && error.code !== 'ENOENT') {
throw new Error(`Path does not exist: ${targetPath}`);
}
throw new Error(
`Failed to access path stats for ${targetPath}: ${error}`,
);
}
return targetPath;
this.coreLogic = coreLogicInstance;
}
/**
* Validates the parameters for the tool
* @param params Parameters to validate
* @returns An error message string if invalid, null otherwise
* Delegates validation to the core logic
*/
validateToolParams(params: GrepToolParams): string | null {
if (
this.schema.parameters &&
!SchemaValidator.validate(
this.schema.parameters as Record<string, unknown>,
params,
)
) {
return 'Parameters failed schema validation.';
}
try {
new RegExp(params.pattern);
} catch (error) {
return `Invalid regular expression pattern provided: ${params.pattern}. Error: ${error instanceof Error ? error.message : String(error)}`;
}
try {
this.resolveAndValidatePath(params.path);
} catch (error) {
return error instanceof Error ? error.message : String(error);
}
return null; // Parameters are valid
}
// --- Core Execution ---
/**
* Executes the grep search with the given parameters
* @param params Parameters for the grep search
* @returns Result of the grep search
*/
async execute(params: GrepToolParams): Promise<ToolResult> {
const validationError = this.validateToolParams(params);
if (validationError) {
console.error(`GrepTool Parameter Validation Failed: ${validationError}`);
return {
llmContent: `Error: Invalid parameters provided. Reason: ${validationError}`,
returnDisplay: `**Error:** Failed to execute tool.`,
};
}
let searchDirAbs: string;
try {
searchDirAbs = this.resolveAndValidatePath(params.path);
const searchDirDisplay = params.path || '.';
const matches: GrepMatch[] = await this.performGrepSearch({
pattern: params.pattern,
path: searchDirAbs,
include: params.include,
});
if (matches.length === 0) {
const noMatchMsg = `No matches found for pattern "${params.pattern}" in path "${searchDirDisplay}"${params.include ? ` (filter: "${params.include}")` : ''}.`;
const noMatchUser = `No matches found`;
return { llmContent: noMatchMsg, returnDisplay: noMatchUser };
}
const matchesByFile = matches.reduce(
(acc, match) => {
const relativeFilePath =
path.relative(
searchDirAbs,
path.resolve(searchDirAbs, match.filePath),
) || path.basename(match.filePath);
if (!acc[relativeFilePath]) {
acc[relativeFilePath] = [];
}
acc[relativeFilePath].push(match);
acc[relativeFilePath].sort((a, b) => a.lineNumber - b.lineNumber);
return acc;
},
{} as Record<string, GrepMatch[]>,
);
let llmContent = `Found ${matches.length} match(es) for pattern "${params.pattern}" in path "${searchDirDisplay}"${params.include ? ` (filter: "${params.include}")` : ''}:\n---\n`;
for (const filePath in matchesByFile) {
llmContent += `File: ${filePath}\n`;
matchesByFile[filePath].forEach((match) => {
const trimmedLine = match.line.trim();
llmContent += `L${match.lineNumber}: ${trimmedLine}\n`;
});
llmContent += '---\n';
}
return {
llmContent: llmContent.trim(),
returnDisplay: `Found ${matches.length} matche(s)`,
};
} catch (error) {
console.error(`Error during GrepTool execution: ${error}`);
const errorMessage =
error instanceof Error ? error.message : String(error);
return {
llmContent: `Error during grep search operation: ${errorMessage}`,
returnDisplay: errorMessage,
};
}
}
// --- Inlined Grep Logic and Helpers ---
/**
* Checks if a command is available in the system's PATH.
* @param {string} command The command name (e.g., 'git', 'grep').
* @returns {Promise<boolean>} True if the command is available, false otherwise.
*/
private isCommandAvailable(command: string): Promise<boolean> {
return new Promise((resolve) => {
const checkCommand = process.platform === 'win32' ? 'where' : 'command';
const checkArgs =
process.platform === 'win32' ? [command] : ['-v', command];
try {
const child = spawn(checkCommand, checkArgs, {
stdio: 'ignore',
shell: process.platform === 'win32',
});
child.on('close', (code) => resolve(code === 0));
child.on('error', () => resolve(false));
} catch {
resolve(false);
}
});
return this.coreLogic.validateToolParams(params);
}
/**
* Checks if a directory or its parent directories contain a .git folder.
* @param {string} dirPath Absolute path to the directory to check.
* @returns {Promise<boolean>} True if it's a Git repository, false otherwise.
*/
private async isGitRepository(dirPath: string): Promise<boolean> {
let currentPath = path.resolve(dirPath);
const root = path.parse(currentPath).root;
try {
while (true) {
const gitPath = path.join(currentPath, '.git');
try {
const stats = await fsPromises.stat(gitPath);
if (stats.isDirectory() || stats.isFile()) {
return true;
}
return false;
} catch (error: unknown) {
if (!isNodeError(error) || error.code !== 'ENOENT') {
console.error(
`Error checking for .git in ${currentPath}: ${error}`,
);
return false;
}
}
if (currentPath === root) {
break;
}
currentPath = path.dirname(currentPath);
}
} catch (error: unknown) {
console.error(
`Error traversing directory structure upwards from ${dirPath}: ${error instanceof Error ? error.message : error}`,
);
}
return false;
}
/**
* Parses the standard output of grep-like commands (git grep, system grep).
* Expects format: filePath:lineNumber:lineContent
* Handles colons within file paths and line content correctly.
* @param {string} output The raw stdout string.
* @param {string} basePath The absolute directory the search was run from, for relative paths.
* @returns {GrepMatch[]} Array of match objects.
*/
private parseGrepOutput(output: string, basePath: string): GrepMatch[] {
const results: GrepMatch[] = [];
if (!output) return results;
const lines = output.split(EOL); // Use OS-specific end-of-line
for (const line of lines) {
if (!line.trim()) continue;
// Find the index of the first colon.
const firstColonIndex = line.indexOf(':');
if (firstColonIndex === -1) {
// Malformed line: Does not contain any colon. Skip.
continue;
}
// Find the index of the second colon, searching *after* the first one.
const secondColonIndex = line.indexOf(':', firstColonIndex + 1);
if (secondColonIndex === -1) {
// Malformed line: Contains only one colon (e.g., filename:content). Skip.
// Grep output with -n should always have file:line:content.
continue;
}
// Extract parts based on the found colon indices
const filePathRaw = line.substring(0, firstColonIndex);
const lineNumberStr = line.substring(
firstColonIndex + 1,
secondColonIndex,
);
// The rest of the line, starting after the second colon, is the content.
const lineContent = line.substring(secondColonIndex + 1);
const lineNumber = parseInt(lineNumberStr, 10);
if (!isNaN(lineNumber)) {
// Resolve the raw path relative to the base path where grep ran
const absoluteFilePath = path.resolve(basePath, filePathRaw);
// Make the final path relative to the basePath for consistency
const relativeFilePath = path.relative(basePath, absoluteFilePath);
results.push({
// Use relative path, or just the filename if it's in the base path itself
filePath: relativeFilePath || path.basename(absoluteFilePath),
lineNumber,
line: lineContent, // Use the full extracted line content
});
}
// Silently ignore lines where the line number isn't parsable
}
return results;
}
/**
* Gets a description of the grep operation
* @param params Parameters for the grep operation
* @returns A string describing the grep
* Delegates getting description to the core logic
*/
getDescription(params: GrepToolParams): string {
let description = `'${params.pattern}'`;
if (params.include) {
description += ` in ${params.include}`;
}
if (params.path) {
const searchDir = params.path || this.rootDirectory;
const relativePath = makeRelative(searchDir, this.rootDirectory);
description += ` within ${shortenPath(relativePath || './')}`;
}
return description;
return this.coreLogic.getDescription(params);
}
/**
* Performs the actual search using the prioritized strategies.
* @param options Search options including pattern, absolute path, and include glob.
* @returns A promise resolving to an array of match objects.
* Define confirmation behavior (Grep likely doesn't need confirmation)
*/
private async performGrepSearch(options: {
pattern: string;
path: string; // Expects absolute path
include?: string;
}): Promise<GrepMatch[]> {
const { pattern, path: absolutePath, include } = options;
let strategyUsed = 'none'; // Keep track for potential error reporting
try {
// --- Strategy 1: git grep ---
const isGit = await this.isGitRepository(absolutePath);
const gitAvailable = isGit && (await this.isCommandAvailable('git'));
if (gitAvailable) {
strategyUsed = 'git grep';
const gitArgs = [
'grep',
'--untracked',
'-n',
'-E',
'--ignore-case',
pattern,
];
if (include) {
gitArgs.push('--', include);
}
try {
const output = await new Promise<string>((resolve, reject) => {
const child = spawn('git', gitArgs, {
cwd: absolutePath,
windowsHide: true,
});
const stdoutChunks: Buffer[] = [];
const stderrChunks: Buffer[] = [];
child.stdout.on('data', (chunk) => {
stdoutChunks.push(chunk);
});
child.stderr.on('data', (chunk) => {
stderrChunks.push(chunk);
});
child.on('error', (err) =>
reject(new Error(`Failed to start git grep: ${err.message}`)),
);
child.on('close', (code) => {
const stdoutData = Buffer.concat(stdoutChunks).toString('utf8');
const stderrData = Buffer.concat(stderrChunks).toString('utf8');
if (code === 0) resolve(stdoutData);
else if (code === 1)
resolve(''); // No matches is not an error
else
reject(
new Error(`git grep exited with code ${code}: ${stderrData}`),
);
});
});
return this.parseGrepOutput(output, absolutePath);
} catch (gitError: unknown) {
console.error(
`GrepTool: git grep strategy failed: ${getErrorMessage(gitError)}. Falling back...`,
);
}
}
// --- Strategy 2: System grep ---
const grepAvailable = await this.isCommandAvailable('grep');
if (grepAvailable) {
strategyUsed = 'system grep';
const grepArgs = ['-r', '-n', '-H', '-E'];
const commonExcludes = ['.git', 'node_modules', 'bower_components'];
commonExcludes.forEach((dir) => grepArgs.push(`--exclude-dir=${dir}`));
if (include) {
grepArgs.push(`--include=${include}`);
}
grepArgs.push(pattern);
grepArgs.push('.');
try {
const output = await new Promise<string>((resolve, reject) => {
const child = spawn('grep', grepArgs, {
cwd: absolutePath,
windowsHide: true,
});
const stdoutChunks: Buffer[] = [];
const stderrChunks: Buffer[] = [];
child.stdout.on('data', (chunk) => {
stdoutChunks.push(chunk);
});
child.stderr.on('data', (chunk) => {
const stderrStr = chunk.toString();
if (
!stderrStr.includes('Permission denied') &&
!/grep:.*: Is a directory/i.test(stderrStr)
) {
stderrChunks.push(chunk);
}
});
child.on('error', (err) =>
reject(new Error(`Failed to start system grep: ${err.message}`)),
);
child.on('close', (code) => {
const stdoutData = Buffer.concat(stdoutChunks).toString('utf8');
const stderrData = Buffer.concat(stderrChunks)
.toString('utf8')
.trim();
if (code === 0) resolve(stdoutData);
else if (code === 1)
resolve(''); // No matches
else {
if (stderrData)
reject(
new Error(
`System grep exited with code ${code}: ${stderrData}`,
),
);
else resolve('');
}
});
});
return this.parseGrepOutput(output, absolutePath);
} catch (grepError: unknown) {
console.error(
`GrepTool: System grep strategy failed: ${getErrorMessage(grepError)}. Falling back...`,
);
}
}
// --- Strategy 3: Pure JavaScript Fallback ---
strategyUsed = 'javascript fallback';
const globPattern = include ? include : '**/*';
const ignorePatterns = [
'.git',
'node_modules',
'bower_components',
'.svn',
'.hg',
];
const filesStream = fastGlob.stream(globPattern, {
cwd: absolutePath,
dot: true,
ignore: ignorePatterns,
absolute: true,
onlyFiles: true,
suppressErrors: true,
stats: false,
});
const regex = new RegExp(pattern, 'i');
const allMatches: GrepMatch[] = [];
for await (const filePath of filesStream) {
const fileAbsolutePath = filePath as string;
try {
const content = await fsPromises.readFile(fileAbsolutePath, 'utf8');
const lines = content.split(/\r?\n/);
lines.forEach((line, index) => {
if (regex.test(line)) {
allMatches.push({
filePath:
path.relative(absolutePath, fileAbsolutePath) ||
path.basename(fileAbsolutePath),
lineNumber: index + 1,
line,
});
}
});
} catch (readError: unknown) {
if (!isNodeError(readError) || readError.code !== 'ENOENT') {
console.error(
`GrepTool: Could not read or process file ${fileAbsolutePath}: ${getErrorMessage(readError)}`,
);
}
}
}
return allMatches;
} catch (error: unknown) {
console.error(
`GrepTool: Error during performGrepSearch (Strategy: ${strategyUsed}): ${getErrorMessage(error)}`,
);
throw error; // Re-throw to be caught by the execute method's handler
}
shouldConfirmExecute(
// eslint-disable-next-line @typescript-eslint/no-unused-vars
params: GrepToolParams,
): Promise<ToolCallConfirmationDetails | false> {
return Promise.resolve(false);
}
/**
* Delegates execution to the core logic
*/
async execute(params: GrepToolParams): Promise<ToolResult> {
return this.coreLogic.execute(params);
}
// Removed private methods (resolveAndValidatePath, performGrepSearch, etc.)
// as they are now part of GrepLogic in the server package.
}
+44 -236
View File
@@ -4,267 +4,75 @@
* SPDX-License-Identifier: Apache-2.0
*/
import fs from 'fs';
import path from 'path';
import { BaseTool, ToolResult } from './tools.js';
import { SchemaValidator } from '../utils/schemaValidator.js';
import { makeRelative, shortenPath } from '../utils/paths.js';
// Import core logic and types from the server package
import { LSLogic, LSToolParams, ToolResult } from '@gemini-code/server';
// Import CLI-specific base class and types
import { BaseTool } from './tools.js';
import { ToolCallConfirmationDetails } from '../ui/types.js';
/**
* Parameters for the LS tool
*/
export interface LSToolParams {
/**
* The absolute path to the directory to list
*/
path: string;
/**
* List of glob patterns to ignore
*/
ignore?: string[];
}
/**
* File entry returned by LS tool
*/
export interface FileEntry {
/**
* Name of the file or directory
*/
name: string;
/**
* Absolute path to the file or directory
*/
path: string;
/**
* Whether this entry is a directory
*/
isDirectory: boolean;
/**
* Size of the file in bytes (0 for directories)
*/
size: number;
/**
* Last modified timestamp
*/
modifiedTime: Date;
}
/**
* Implementation of the LS tool that lists directory contents
* CLI wrapper for the LS tool
*/
export class LSTool extends BaseTool<LSToolParams, ToolResult> {
/**
* The root directory that this tool is grounded in.
* All path operations will be restricted to this directory.
*/
private rootDirectory: string;
static readonly Name: string = LSLogic.Name; // Use name from logic
// Core logic instance from the server package
private coreLogic: LSLogic;
/**
* Creates a new instance of the LSTool
* @param rootDirectory Root directory to ground this tool in. All operations will be restricted to this directory.
* Creates a new instance of the LSTool CLI wrapper
* @param rootDirectory Root directory to ground this tool in.
*/
constructor(rootDirectory: string) {
// Instantiate the core logic from the server package
const coreLogicInstance = new LSLogic(rootDirectory);
// Initialize the CLI BaseTool
super(
'list_directory',
'ReadFolder',
'Lists the names of files and subdirectories directly within a specified directory path. Can optionally ignore entries matching provided glob patterns.',
{
properties: {
path: {
description:
'The absolute path to the directory to list (must be absolute, not relative)',
type: 'string',
},
ignore: {
description: 'List of glob patterns to ignore',
items: {
type: 'string',
},
type: 'array',
},
},
required: ['path'],
type: 'object',
},
LSTool.Name,
'ReadFolder', // Define display name here
'Lists the names of files and subdirectories directly within a specified directory path. Can optionally ignore entries matching provided glob patterns.', // Define description here
(coreLogicInstance.schema.parameters as Record<string, unknown>) ?? {},
);
// Set the root directory
this.rootDirectory = path.resolve(rootDirectory);
this.coreLogic = coreLogicInstance;
}
/**
* Checks if a path is within the root directory
* @param dirpath The path to check
* @returns True if the path is within the root directory, false otherwise
*/
private isWithinRoot(dirpath: string): boolean {
const normalizedPath = path.normalize(dirpath);
const normalizedRoot = path.normalize(this.rootDirectory);
// Ensure the normalizedRoot ends with a path separator for proper path comparison
const rootWithSep = normalizedRoot.endsWith(path.sep)
? normalizedRoot
: normalizedRoot + path.sep;
return (
normalizedPath === normalizedRoot ||
normalizedPath.startsWith(rootWithSep)
);
}
/**
* Validates the parameters for the tool
* @param params Parameters to validate
* @returns An error message string if invalid, null otherwise
* Delegates validation to the core logic
*/
validateToolParams(params: LSToolParams): string | null {
if (
this.schema.parameters &&
!SchemaValidator.validate(
this.schema.parameters as Record<string, unknown>,
params,
)
) {
return 'Parameters failed schema validation.';
}
if (!path.isAbsolute(params.path)) {
return `Path must be absolute: ${params.path}`;
}
if (!this.isWithinRoot(params.path)) {
return `Path must be within the root directory (${this.rootDirectory}): ${params.path}`;
}
return null;
return this.coreLogic.validateToolParams(params);
}
/**
* Checks if a filename matches any of the ignore patterns
* @param filename Filename to check
* @param patterns Array of glob patterns to check against
* @returns True if the filename should be ignored
*/
private shouldIgnore(filename: string, patterns?: string[]): boolean {
if (!patterns || patterns.length === 0) {
return false;
}
for (const pattern of patterns) {
// Convert glob pattern to RegExp
const regexPattern = pattern
.replace(/[.+^${}()|[\]\\]/g, '\\$&')
.replace(/\*/g, '.*')
.replace(/\?/g, '.');
const regex = new RegExp(`^${regexPattern}$`);
if (regex.test(filename)) {
return true;
}
}
return false;
}
/**
* Gets a description of the file reading operation
* @param params Parameters for the file reading
* @returns A string describing the file being read
* Delegates getting description to the core logic
*/
getDescription(params: LSToolParams): string {
const relativePath = makeRelative(params.path, this.rootDirectory);
return shortenPath(relativePath);
}
private errorResult(llmContent: string, returnDisplay: string): ToolResult {
return {
llmContent,
returnDisplay: `**Error:** ${returnDisplay}`,
};
return this.coreLogic.getDescription(params);
}
/**
* Executes the LS operation with the given parameters
* @param params Parameters for the LS operation
* @returns Result of the LS operation
* Define confirmation behavior (LS likely doesn't need confirmation)
*/
shouldConfirmExecute(
// eslint-disable-next-line @typescript-eslint/no-unused-vars
params: LSToolParams,
): Promise<ToolCallConfirmationDetails | false> {
return Promise.resolve(false);
}
/**
* Delegates execution to the core logic
*/
async execute(params: LSToolParams): Promise<ToolResult> {
const validationError = this.validateToolParams(params);
if (validationError) {
return this.errorResult(
`Error: Invalid parameters provided. Reason: ${validationError}`,
`Failed to execute tool.`,
);
}
try {
const stats = fs.statSync(params.path);
if (!stats) {
return this.errorResult(
`Directory does not exist: ${params.path}`,
`Directory does not exist.`,
);
}
if (!stats.isDirectory()) {
return this.errorResult(
`Path is not a directory: ${params.path}`,
`Path is not a directory.`,
);
}
const files = fs.readdirSync(params.path);
const entries: FileEntry[] = [];
if (files.length === 0) {
return this.errorResult(
`Directory is empty: ${params.path}`,
`Directory is empty.`,
);
}
for (const file of files) {
if (this.shouldIgnore(file, params.ignore)) {
continue;
}
const fullPath = path.join(params.path, file);
try {
const stats = fs.statSync(fullPath);
const isDir = stats.isDirectory();
entries.push({
name: file,
path: fullPath,
isDirectory: isDir,
size: isDir ? 0 : stats.size,
modifiedTime: stats.mtime,
});
} catch (error) {
console.error(`Error accessing ${fullPath}: ${error}`);
}
}
// Sort entries (directories first, then alphabetically)
entries.sort((a, b) => {
if (a.isDirectory && !b.isDirectory) return -1;
if (!a.isDirectory && b.isDirectory) return 1;
return a.name.localeCompare(b.name);
});
// Create formatted content for display
const directoryContent = entries
.map((entry) => {
const typeIndicator = entry.isDirectory ? 'd' : '-';
const sizeInfo = entry.isDirectory ? '' : ` (${entry.size} bytes)`;
return `${typeIndicator} ${entry.name}${sizeInfo}`;
})
.join('\n');
return {
llmContent: `Directory listing for ${params.path}:\n${directoryContent}`,
returnDisplay: `Found ${entries.length} item(s).`,
};
} catch (error) {
return this.errorResult(
`Error listing directory: ${error instanceof Error ? error.message : String(error)}`,
'Failed to list directory.',
);
}
// The CLI wrapper could potentially modify the returnDisplay
// from the core logic if needed, but for LS, the core logic's
// display might be sufficient.
return this.coreLogic.execute(params);
}
// Removed private methods (isWithinRoot, shouldIgnore, errorResult)
// as they are now part of LSLogic in the server package.
}
+29 -252
View File
@@ -4,288 +4,65 @@
* SPDX-License-Identifier: Apache-2.0
*/
import fs from 'fs';
import path from 'path';
import { SchemaValidator } from '../utils/schemaValidator.js';
import { makeRelative, shortenPath } from '../utils/paths.js';
import { BaseTool, ToolResult } from './tools.js';
import {
ReadFileLogic,
ReadFileToolParams,
ToolResult,
} from '@gemini-code/server';
import { BaseTool } from './tools.js';
import { ToolCallConfirmationDetails } from '../ui/types.js';
/**
* Parameters for the ReadFile tool
*/
export interface ReadFileToolParams {
/**
* The absolute path to the file to read
*/
file_path: string;
/**
* The line number to start reading from (optional)
*/
offset?: number;
/**
* The number of lines to read (optional)
*/
limit?: number;
}
/**
* Implementation of the ReadFile tool that reads files from the filesystem
* CLI wrapper for the ReadFile tool
*/
export class ReadFileTool extends BaseTool<ReadFileToolParams, ToolResult> {
static readonly Name: string = 'read_file';
// Maximum number of lines to read by default
private static readonly DEFAULT_MAX_LINES = 2000;
// Maximum length of a line before truncating
private static readonly MAX_LINE_LENGTH = 2000;
static readonly Name: string = ReadFileLogic.Name;
private coreLogic: ReadFileLogic;
/**
* The root directory that this tool is grounded in.
* All file operations will be restricted to this directory.
*/
private rootDirectory: string;
/**
* Creates a new instance of the ReadFileTool
* @param rootDirectory Root directory to ground this tool in. All operations will be restricted to this directory.
* Creates a new instance of the ReadFileTool CLI wrapper
* @param rootDirectory Root directory to ground this tool in.
*/
constructor(rootDirectory: string) {
const coreLogicInstance = new ReadFileLogic(rootDirectory);
super(
ReadFileTool.Name,
'ReadFile',
'Reads and returns the content of a specified file from the local filesystem. Handles large files by allowing reading specific line ranges.',
{
properties: {
file_path: {
description:
"The absolute path to the file to read (e.g., '/home/user/project/file.txt'). Relative paths are not supported.",
type: 'string',
},
offset: {
description:
"Optional: The 0-based line number to start reading from. Requires 'limit' to be set. Use for paginating through large files.",
type: 'number',
},
limit: {
description:
"Optional: Maximum number of lines to read. Use with 'offset' to paginate through large files. If omitted, reads the entire file (if feasible).",
type: 'number',
},
},
required: ['file_path'],
type: 'object',
},
(coreLogicInstance.schema.parameters as Record<string, unknown>) ?? {},
);
// Set the root directory
this.rootDirectory = path.resolve(rootDirectory);
this.coreLogic = coreLogicInstance;
}
/**
* Checks if a path is within the root directory
* @param pathToCheck The path to check
* @returns True if the path is within the root directory, false otherwise
* Delegates validation to the core logic
*/
private isWithinRoot(pathToCheck: string): boolean {
const normalizedPath = path.normalize(pathToCheck);
const normalizedRoot = path.normalize(this.rootDirectory);
// Ensure the normalizedRoot ends with a path separator for proper path comparison
const rootWithSep = normalizedRoot.endsWith(path.sep)
? normalizedRoot
: normalizedRoot + path.sep;
return (
normalizedPath === normalizedRoot ||
normalizedPath.startsWith(rootWithSep)
);
}
/**
* Validates the parameters for the ReadFile tool
* @param params Parameters to validate
* @returns True if parameters are valid, false otherwise
*/
validateToolParams(params: ReadFileToolParams): string | null {
if (
this.schema.parameters &&
!SchemaValidator.validate(
this.schema.parameters as Record<string, unknown>,
params,
)
) {
return 'Parameters failed schema validation.';
}
const filePath = params.file_path;
if (!path.isAbsolute(filePath)) {
return `File path must be absolute: ${filePath}`;
}
if (!this.isWithinRoot(filePath)) {
return `File path must be within the root directory (${this.rootDirectory}): ${filePath}`;
}
if (params.offset !== undefined && params.offset < 0) {
return 'Offset must be a non-negative number';
}
if (params.limit !== undefined && params.limit <= 0) {
return 'Limit must be a positive number';
}
validateToolParams(_params: ReadFileToolParams): string | null {
// Currently allowing any path. Add validation if needed.
return null;
}
/**
* Determines if a file is likely binary based on content sampling
* @param filePath Path to the file
* @returns True if the file appears to be binary
* Delegates getting description to the core logic
*/
private isBinaryFile(filePath: string): boolean {
try {
// Read the first 4KB of the file
const fd = fs.openSync(filePath, 'r');
const buffer = Buffer.alloc(4096);
const bytesRead = fs.readSync(fd, buffer, 0, 4096, 0);
fs.closeSync(fd);
// Check for null bytes or high concentration of non-printable characters
let nonPrintableCount = 0;
for (let i = 0; i < bytesRead; i++) {
// Null byte is a strong indicator of binary data
if (buffer[i] === 0) {
return true;
}
// Count non-printable characters
if (buffer[i] < 9 || (buffer[i] > 13 && buffer[i] < 32)) {
nonPrintableCount++;
}
}
// If more than 30% are non-printable, likely binary
return nonPrintableCount / bytesRead > 0.3;
} catch {
return false;
}
getDescription(_params: ReadFileToolParams): string {
return this.coreLogic.getDescription(_params);
}
/**
* Detects the type of file based on extension and content
* @param filePath Path to the file
* @returns File type description
* Define confirmation behavior here in the CLI wrapper if needed
* For ReadFile, we likely don't need confirmation.
*/
private detectFileType(filePath: string): string {
const ext = path.extname(filePath).toLowerCase();
// Common image formats
if (
['.jpg', '.jpeg', '.png', '.gif', '.bmp', '.webp', '.svg'].includes(ext)
) {
return 'image';
}
// Other known binary formats
if (['.pdf', '.zip', '.tar', '.gz', '.exe', '.dll', '.so'].includes(ext)) {
return 'binary';
}
// Check content for binary indicators
if (this.isBinaryFile(filePath)) {
return 'binary';
}
return 'text';
shouldConfirmExecute(
_params: ReadFileToolParams,
): Promise<ToolCallConfirmationDetails | false> {
return Promise.resolve(false);
}
/**
* Gets a description of the file reading operation
* @param params Parameters for the file reading
* @returns A string describing the file being read
*/
getDescription(params: ReadFileToolParams): string {
const relativePath = makeRelative(params.file_path, this.rootDirectory);
return shortenPath(relativePath);
}
/**
* Reads a file and returns its contents with line numbers
* @param params Parameters for the file reading
* @returns Result with file contents
* Delegates execution to the core logic
*/
async execute(params: ReadFileToolParams): Promise<ToolResult> {
const validationError = this.validateToolParams(params);
if (validationError) {
return {
llmContent: `Error: Invalid parameters provided. Reason: ${validationError}`,
returnDisplay: '**Error:** Failed to execute tool.',
};
}
const filePath = params.file_path;
try {
if (!fs.existsSync(filePath)) {
return {
llmContent: `File not found: ${filePath}`,
returnDisplay: `File not found.`,
};
}
const stats = fs.statSync(filePath);
if (stats.isDirectory()) {
return {
llmContent: `Path is a directory, not a file: ${filePath}`,
returnDisplay: `File is directory.`,
};
}
const fileType = this.detectFileType(filePath);
if (fileType !== 'text') {
return {
llmContent: `Binary file: ${filePath} (${fileType})`,
returnDisplay: ``,
};
}
const content = fs.readFileSync(filePath, 'utf8');
const lines = content.split('\n');
const startLine = params.offset || 0;
const endLine = params.limit
? startLine + params.limit
: Math.min(startLine + ReadFileTool.DEFAULT_MAX_LINES, lines.length);
const selectedLines = lines.slice(startLine, endLine);
let truncated = false;
const formattedLines = selectedLines.map((line) => {
let processedLine = line;
if (line.length > ReadFileTool.MAX_LINE_LENGTH) {
processedLine =
line.substring(0, ReadFileTool.MAX_LINE_LENGTH) + '... [truncated]';
truncated = true;
}
return processedLine;
});
const contentTruncated = endLine < lines.length || truncated;
let llmContent = '';
if (contentTruncated) {
llmContent += `[File truncated: showing lines ${startLine + 1}-${endLine} of ${lines.length} total lines. Use offset parameter to view more.]\n`;
}
llmContent += formattedLines.join('\n');
return {
llmContent,
returnDisplay: '',
};
} catch (error) {
const errorMsg = `Error reading file: ${error instanceof Error ? error.message : String(error)}`;
return {
llmContent: `Error reading file ${filePath}: ${errorMsg}`,
returnDisplay: `Failed to read file: ${errorMsg}`,
};
}
return this.coreLogic.execute(params);
}
}
File diff suppressed because it is too large Load Diff
+19 -9
View File
@@ -25,27 +25,37 @@ class ToolRegistry {
}
/**
* Retrieves the list of tool schemas in the format required by Gemini.
* @returns A ToolListUnion containing the function declarations.
* Retrieves the list of tool schemas (FunctionDeclaration array).
* Extracts the declarations from the ToolListUnion structure.
* @returns An array of FunctionDeclarations.
*/
getToolSchemas(): ToolListUnion {
getFunctionDeclarations(): FunctionDeclaration[] {
const declarations: FunctionDeclaration[] = [];
this.tools.forEach((tool) => {
declarations.push(tool.schema);
});
return declarations;
}
// Return Gemini's expected format. Handle the case of no tools.
/**
* Deprecated/Internal? Retrieves schemas in the ToolListUnion format.
* Kept for reference, prefer getFunctionDeclarations.
*/
getToolSchemas(): ToolListUnion {
const declarations = this.getFunctionDeclarations();
if (declarations.length === 0) {
// Depending on the SDK version, you might need `undefined`, `[]`, or `[{ functionDeclarations: [] }]`
// Check the documentation for your @google/genai version.
// Let's assume an empty array works or signifies no tools.
return [];
// Or if it requires the structure:
// return [{ functionDeclarations: [] }];
}
return [{ functionDeclarations: declarations }];
}
/**
* Returns an array of all registered tool instances.
*/
getAllTools(): Tool[] {
return Array.from(this.tools.values());
}
/**
* Optional: Get a list of registered tool names.
*/
+28 -136
View File
@@ -4,169 +4,61 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { SchemaValidator } from '../utils/schemaValidator.js';
import { BaseTool, ToolResult } from './tools.js';
import { ToolCallConfirmationDetails } from '../ui/types.js'; // Added for shouldConfirmExecute
import { getErrorMessage } from '../utils/errors.js';
// Import core logic and types from the server package
import {
WebFetchLogic,
WebFetchToolParams,
ToolResult,
} from '@gemini-code/server';
// Import CLI-specific base class and UI types
import { BaseTool } from './tools.js';
import { ToolCallConfirmationDetails } from '../ui/types.js';
/**
* Parameters for the WebFetch tool
*/
export interface WebFetchToolParams {
/**
* The URL to fetch content from.
*/
url: string;
}
/**
* Implementation of the WebFetch tool that reads content from a URL.
* CLI wrapper for the WebFetch tool.
*/
export class WebFetchTool extends BaseTool<WebFetchToolParams, ToolResult> {
static readonly Name: string = 'web_fetch';
static readonly Name: string = WebFetchLogic.Name; // Use name from logic
// Core logic instance from the server package
private coreLogic: WebFetchLogic;
/**
* Creates a new instance of the WebFetchTool
*/
constructor() {
const coreLogicInstance = new WebFetchLogic();
super(
WebFetchTool.Name,
'WebFetch',
'Fetches text content from a given URL. Handles potential network errors and non-success HTTP status codes.',
{
properties: {
url: {
description:
"The URL to fetch. Must be an absolute URL (e.g., 'https://example.com/file.txt').",
type: 'string',
},
},
required: ['url'],
type: 'object',
},
'WebFetch', // Define display name here
'Fetches text content from a given URL. Handles potential network errors and non-success HTTP status codes.', // Define description here
(coreLogicInstance.schema.parameters as Record<string, unknown>) ?? {},
);
// No rootDirectory needed for web fetching
this.coreLogic = coreLogicInstance;
}
/**
* Validates the parameters for the WebFetch tool
* @param params Parameters to validate
* @returns An error message string if invalid, null otherwise
*/
invalidParams(params: WebFetchToolParams): string | null {
// 1. Validate against the basic schema first
if (
this.schema.parameters &&
!SchemaValidator.validate(
this.schema.parameters as Record<string, unknown>,
params,
)
) {
return 'Parameters failed schema validation.';
}
// 2. Validate the URL format and protocol
try {
const parsedUrl = new URL(params.url);
// Ensure it's an HTTP or HTTPS URL
if (!['http:', 'https:'].includes(parsedUrl.protocol)) {
return `Invalid URL protocol: "${parsedUrl.protocol}". Only 'http:' and 'https:' are supported.`;
}
} catch {
// The URL constructor throws if the format is invalid
return `Invalid URL format: "${params.url}". Please provide a valid absolute URL (e.g., 'https://example.com').`;
}
// If all checks pass, the parameters are valid
return null;
validateToolParams(params: WebFetchToolParams): string | null {
// Delegate validation to core logic
return this.coreLogic.validateParams(params);
}
/**
* Gets a description of the web fetch operation.
* @param params Parameters for the web fetch.
* @returns A string describing the operation.
*/
getDescription(params: WebFetchToolParams): string {
// Shorten long URLs for display
const displayUrl =
params.url.length > 80 ? params.url.substring(0, 77) + '...' : params.url;
return `Fetching content from ${displayUrl}`;
// Delegate description generation to core logic
return this.coreLogic.getDescription(params);
}
/**
* Determines if the tool should prompt for confirmation before execution.
* Web fetches are generally safe, so default to false.
* @param params Parameters for the tool execution
* @returns Whether execute should be confirmed.
* Define confirmation behavior (WebFetch likely doesn't need confirmation)
*/
async shouldConfirmExecute(
// eslint-disable-next-line @typescript-eslint/no-unused-vars
params: WebFetchToolParams,
): Promise<ToolCallConfirmationDetails | false> {
// Could add logic here to confirm based on domain, etc. if needed
return Promise.resolve(false);
}
/**
* Fetches content from the specified URL.
* @param params Parameters for the web fetch operation.
* @returns Result with the fetched content or an error message.
* Delegates execution to the core logic.
*/
async execute(params: WebFetchToolParams): Promise<ToolResult> {
const validationError = this.invalidParams(params);
if (validationError) {
return {
llmContent: `Error: Invalid parameters provided. Reason: ${validationError}`,
returnDisplay: `**Error:** Invalid parameters. ${validationError}`,
};
}
const url = params.url;
try {
const response = await fetch(url, {
headers: {
'User-Agent': 'GeminiCode-CLI/1.0',
},
signal: AbortSignal.timeout(15000), // 15 seconds timeout
});
if (!response.ok) {
// fetch doesn't throw on bad HTTP status codes (4xx, 5xx)
const errorText = `Failed to fetch data from ${url}. Status: ${response.status} ${response.statusText}`;
return {
llmContent: `Error: ${errorText}`,
returnDisplay: `**Error:** ${errorText}`,
};
}
// Assuming the response is text. Add checks for content-type if needed.
const data = await response.text();
let llmContent = '';
// Truncate very large responses for the LLM context
const MAX_LLM_CONTENT_LENGTH = 200000;
if (data) {
llmContent = `Fetched data from ${url}:\n\n${
data.length > MAX_LLM_CONTENT_LENGTH
? data.substring(0, MAX_LLM_CONTENT_LENGTH) +
'\n... [Content truncated]'
: data
}`;
} else {
llmContent = `No data fetched from ${url}. Status: ${response.status}`;
}
return {
llmContent,
returnDisplay: `Fetched content from ${url}`, // Simple display message
};
} catch (error: unknown) {
// This catches network errors (DNS resolution, connection refused, etc.)
// and errors from the URL constructor if somehow bypassed validation (unlikely)
const errorMessage = `Failed to fetch data from ${url}. Error: ${getErrorMessage(error)}`;
return {
llmContent: `Error: ${errorMessage}`,
returnDisplay: `**Error:** ${errorMessage}`,
};
}
return this.coreLogic.execute(params);
}
}
+36 -141
View File
@@ -6,127 +6,51 @@
import fs from 'fs';
import path from 'path';
import { BaseTool, ToolResult } from './tools.js';
import { SchemaValidator } from '../utils/schemaValidator.js';
import { makeRelative, shortenPath } from '../utils/paths.js';
import * as Diff from 'diff';
import {
WriteFileLogic,
WriteFileToolParams,
ToolResult,
makeRelative,
shortenPath,
} from '@gemini-code/server';
import { BaseTool } from './tools.js';
import {
ToolCallConfirmationDetails,
ToolConfirmationOutcome,
ToolEditConfirmationDetails,
} from '../ui/types.js';
import * as Diff from 'diff';
/**
* Parameters for the WriteFile tool
*/
export interface WriteFileToolParams {
/**
* The absolute path to the file to write to
*/
file_path: string;
/**
* The content to write to the file
*/
content: string;
}
/**
* Implementation of the WriteFile tool that writes files to the filesystem
* CLI wrapper for the WriteFile tool.
*/
export class WriteFileTool extends BaseTool<WriteFileToolParams, ToolResult> {
static readonly Name: string = 'write_file';
static readonly Name: string = WriteFileLogic.Name;
private shouldAlwaysWrite = false;
/**
* The root directory that this tool is grounded in.
* All file operations will be restricted to this directory.
*/
private rootDirectory: string;
private coreLogic: WriteFileLogic;
/**
* Creates a new instance of the WriteFileTool
* @param rootDirectory Root directory to ground this tool in. All operations will be restricted to this directory.
*/
constructor(rootDirectory: string) {
const coreLogicInstance = new WriteFileLogic(rootDirectory);
super(
WriteFileTool.Name,
'WriteFile',
'Writes content to a specified file in the local filesystem.',
{
properties: {
filePath: {
description:
"The absolute path to the file to write to (e.g., '/home/user/project/file.txt'). Relative paths are not supported.",
type: 'string',
},
content: {
description: 'The content to write to the file.',
type: 'string',
},
},
required: ['filePath', 'content'],
type: 'object',
},
(coreLogicInstance.schema.parameters as Record<string, unknown>) ?? {},
);
// Set the root directory
this.rootDirectory = path.resolve(rootDirectory);
this.coreLogic = coreLogicInstance;
}
/**
* Checks if a path is within the root directory
* @param pathToCheck The path to check
* @returns True if the path is within the root directory, false otherwise
*/
private isWithinRoot(pathToCheck: string): boolean {
const normalizedPath = path.normalize(pathToCheck);
const normalizedRoot = path.normalize(this.rootDirectory);
// Ensure the normalizedRoot ends with a path separator for proper path comparison
const rootWithSep = normalizedRoot.endsWith(path.sep)
? normalizedRoot
: normalizedRoot + path.sep;
return (
normalizedPath === normalizedRoot ||
normalizedPath.startsWith(rootWithSep)
);
}
/**
* Validates the parameters for the WriteFile tool
* @param params Parameters to validate
* @returns True if parameters are valid, false otherwise
*/
validateToolParams(params: WriteFileToolParams): string | null {
if (
this.schema.parameters &&
!SchemaValidator.validate(
this.schema.parameters as Record<string, unknown>,
params,
)
) {
return 'Parameters failed schema validation.';
}
return this.coreLogic.validateParams(params);
}
// Ensure path is absolute
if (!path.isAbsolute(params.file_path)) {
return `File path must be absolute: ${params.file_path}`;
}
// Ensure path is within the root directory
if (!this.isWithinRoot(params.file_path)) {
return `File path must be within the root directory (${this.rootDirectory}): ${params.file_path}`;
}
return null;
getDescription(params: WriteFileToolParams): string {
return this.coreLogic.getDescription(params);
}
/**
* Determines if the tool should prompt for confirmation before execution
* @param params Parameters for the tool execution
* @returns Whether or not execute should be confirmed by the user.
* Handles the confirmation prompt for the WriteFile tool in the CLI.
*/
async shouldConfirmExecute(
params: WriteFileToolParams,
@@ -135,14 +59,25 @@ export class WriteFileTool extends BaseTool<WriteFileToolParams, ToolResult> {
return false;
}
const relativePath = makeRelative(params.file_path, this.rootDirectory);
const validationError = this.validateToolParams(params);
if (validationError) {
console.error(
`[WriteFile Wrapper] Attempted confirmation with invalid parameters: ${validationError}`,
);
return false;
}
const relativePath = makeRelative(
params.file_path,
this.coreLogic['rootDirectory'],
);
const fileName = path.basename(params.file_path);
let currentContent = '';
try {
currentContent = fs.readFileSync(params.file_path, 'utf8');
} catch {
// File may not exist, which is fine
// File might not exist, that's okay for write/create
}
const fileDiff = Diff.createPatch(
@@ -151,7 +86,7 @@ export class WriteFileTool extends BaseTool<WriteFileToolParams, ToolResult> {
params.content,
'Current',
'Proposed',
{ context: 3, ignoreWhitespace: true },
{ context: 3 },
);
const confirmationDetails: ToolEditConfirmationDetails = {
@@ -168,49 +103,9 @@ export class WriteFileTool extends BaseTool<WriteFileToolParams, ToolResult> {
}
/**
* Gets a description of the file writing operation
* @param params Parameters for the file writing
* @returns A string describing the file being written to
*/
getDescription(params: WriteFileToolParams): string {
const relativePath = makeRelative(params.file_path, this.rootDirectory);
return `Writing to ${shortenPath(relativePath)}`;
}
/**
* Executes the file writing operation
* @param params Parameters for the file writing
* @returns Result of the file writing operation
* Delegates execution to the core logic.
*/
async execute(params: WriteFileToolParams): Promise<ToolResult> {
const validationError = this.validateToolParams(params);
if (validationError) {
return {
llmContent: `Error: Invalid parameters provided. Reason: ${validationError}`,
returnDisplay: '**Error:** Failed to execute tool.',
};
}
try {
// Ensure parent directories exist
const dirName = path.dirname(params.file_path);
if (!fs.existsSync(dirName)) {
fs.mkdirSync(dirName, { recursive: true });
}
// Write the file
fs.writeFileSync(params.file_path, params.content, 'utf8');
return {
llmContent: `Successfully wrote to file: ${params.file_path}`,
returnDisplay: `Wrote to ${shortenPath(makeRelative(params.file_path, this.rootDirectory))}`,
};
} catch (error) {
const errorMsg = `Error writing to file: ${error instanceof Error ? error.message : String(error)}`;
return {
llmContent: `Error writing to file ${params.file_path}: ${errorMsg}`,
returnDisplay: `Failed to write to file: ${errorMsg}`,
};
}
return this.coreLogic.execute(params);
}
}
+11 -20
View File
@@ -22,16 +22,20 @@ import {
useStartupWarnings,
useInitializationErrorEffect,
} from './hooks/useAppEffects.js';
import type { Config } from '@gemini-code/server';
interface AppProps {
directory: string;
config: Config;
}
export const App = ({ directory }: AppProps) => {
export const App = ({ config }: AppProps) => {
const [history, setHistory] = useState<HistoryItem[]>([]);
const [startupWarnings, setStartupWarnings] = useState<string[]>([]);
const { streamingState, submitQuery, initError } =
useGeminiStream(setHistory);
const { streamingState, submitQuery, initError } = useGeminiStream(
setHistory,
config.getApiKey(),
config.getModel(),
);
const { elapsedTime, currentLoadingPhrase } =
useLoadingIndicator(streamingState);
@@ -61,12 +65,7 @@ export const App = ({ directory }: AppProps) => {
!initError &&
!isWaitingForToolConfirmation;
const {
query,
setQuery,
handleSubmit: handleHistorySubmit,
inputKey,
} = useInputHistory({
const { query, handleSubmit: handleHistorySubmit } = useInputHistory({
userMessages,
onSubmit: submitQuery,
isActive: isInputActive,
@@ -74,7 +73,7 @@ export const App = ({ directory }: AppProps) => {
return (
<Box flexDirection="column" padding={1} marginBottom={1} width="100%">
<Header cwd={directory} />
<Header cwd={config.getTargetDir()} />
{startupWarnings.length > 0 && (
<Box
@@ -135,15 +134,7 @@ export const App = ({ directory }: AppProps) => {
/>
</Box>
{isInputActive && (
<InputPrompt
query={query}
setQuery={setQuery}
onSubmit={handleHistorySubmit}
isActive={isInputActive}
forceKey={inputKey}
/>
)}
{isInputActive && <InputPrompt onSubmit={handleHistorySubmit} />}
<Footer queryLength={query.length} />
<ITermDetectionWarning />
+1 -1
View File
@@ -7,7 +7,7 @@
import React from 'react';
import { Box, Text } from 'ink';
import { UI_WIDTH, BOX_PADDING_X } from '../constants.js';
import { shortenPath } from '../../utils/paths.js';
import { shortenPath } from '@gemini-code/server';
interface HeaderProps {
cwd: string;
+29 -27
View File
@@ -5,41 +5,43 @@
*/
import React from 'react';
import { Box, Text } from 'ink';
import { Box, useInput, useFocus } from 'ink';
import TextInput from 'ink-text-input';
import { globalConfig } from '../../config/config.js';
interface InputPromptProps {
query: string;
setQuery: (value: string) => void;
onSubmit: (value: string) => void;
isActive: boolean;
forceKey?: number;
}
export const InputPrompt: React.FC<InputPromptProps> = ({
query,
setQuery,
onSubmit,
isActive,
forceKey,
}) => {
const model = globalConfig.getModel();
export const InputPrompt: React.FC<InputPromptProps> = ({ onSubmit }) => {
const [value, setValue] = React.useState('');
const { isFocused } = useFocus({ autoFocus: true });
useInput(
(input, key) => {
if (key.return) {
if (value.trim()) {
onSubmit(value);
setValue('');
}
}
},
{ isActive: isFocused },
);
return (
<Box marginTop={1} borderStyle="round" borderColor={'white'} paddingX={1}>
<Text color={'white'}>&gt; </Text>
<Box flexGrow={1}>
<TextInput
key={forceKey?.toString()}
value={query}
onChange={setQuery}
onSubmit={onSubmit}
showCursor={true}
focus={isActive}
placeholder={`Ask Gemini (${model})... (try "/init" or "/help")`}
/>
</Box>
<Box
borderStyle="round"
borderColor={isFocused ? 'blue' : 'gray'}
paddingX={1}
>
<TextInput
value={value}
onChange={setValue}
placeholder="Enter your message or use tools..."
onSubmit={() => {
/* Empty to prevent double submission */
}}
/>
</Box>
);
};
@@ -30,10 +30,12 @@ export const ToolGroupMessage: React.FC<ToolGroupMessageProps> = ({
<React.Fragment key={tool.callId}>
<ToolMessage
key={tool.callId} // Use callId as the key
callId={tool.callId} // Pass callId
name={tool.name}
description={tool.description}
resultDisplay={tool.resultDisplay}
status={tool.status}
confirmationDetails={tool.confirmationDetails} // Pass confirmationDetails
/>
{tool.status === ToolCallStatus.Confirming &&
tool.confirmationDetails && (
@@ -7,70 +7,110 @@
import React from 'react';
import { Box, Text } from 'ink';
import Spinner from 'ink-spinner';
import { ToolCallStatus } from '../../types.js';
import { ToolResultDisplay } from '../../../tools/tools.js';
import {
IndividualToolCallDisplay,
ToolCallStatus,
ToolCallConfirmationDetails,
ToolEditConfirmationDetails,
ToolExecuteConfirmationDetails,
} from '../../types.js';
import { DiffRenderer } from './DiffRenderer.js';
import { MarkdownRenderer } from '../../utils/MarkdownRenderer.js';
import { FileDiff, ToolResultDisplay } from '../../../tools/tools.js';
interface ToolMessageProps {
name: string;
description: string;
resultDisplay: ToolResultDisplay | undefined;
status: ToolCallStatus;
}
export const ToolMessage: React.FC<ToolMessageProps> = ({
export const ToolMessage: React.FC<IndividualToolCallDisplay> = ({
callId,
name,
description,
resultDisplay,
status,
confirmationDetails,
}) => {
const statusIndicatorWidth = 3;
const hasResult =
(status === ToolCallStatus.Invoked || status === ToolCallStatus.Canceled) &&
resultDisplay &&
resultDisplay.toString().trim().length > 0;
// Explicitly type the props to help the type checker
const typedConfirmationDetails = confirmationDetails as
| ToolCallConfirmationDetails
| undefined;
const typedResultDisplay = resultDisplay as ToolResultDisplay | undefined;
let color = 'gray';
let prefix = '';
switch (status) {
case ToolCallStatus.Pending:
prefix = 'Pending:';
break;
case ToolCallStatus.Invoked:
prefix = 'Executing:';
break;
case ToolCallStatus.Confirming:
color = 'yellow';
prefix = 'Confirm:';
break;
case ToolCallStatus.Success:
color = 'green';
prefix = 'Success:';
break;
case ToolCallStatus.Error:
color = 'red';
prefix = 'Error:';
break;
default:
// Handle unexpected status if necessary, or just break
break;
}
const title = `${prefix} ${name}`;
return (
<Box paddingX={1} paddingY={0} flexDirection="column">
{/* Row for Status Indicator and Tool Info */}
<Box minHeight={1}>
{/* Status Indicator */}
<Box minWidth={statusIndicatorWidth}>
{status === ToolCallStatus.Pending && <Spinner type="dots" />}
{status === ToolCallStatus.Invoked && <Text color="green"></Text>}
{status === ToolCallStatus.Confirming && <Text color="blue">?</Text>}
{status === ToolCallStatus.Canceled && (
<Text color="red" bold>
-
<Box key={callId} borderStyle="round" paddingX={1} flexDirection="column">
<Box>
{status === ToolCallStatus.Invoked && (
<Box marginRight={1}>
<Text color="blue">
<Spinner type="dots" />
</Text>
</Box>
)}
<Text bold color={color}>
{title}
</Text>
<Text color={color}>
{status === ToolCallStatus.Error && typedResultDisplay
? `: ${typedResultDisplay}`
: ` - ${description}`}
</Text>
</Box>
{status === ToolCallStatus.Confirming && typedConfirmationDetails && (
<Box flexDirection="column" marginLeft={2}>
{/* Display diff for edit/write */}
{'fileDiff' in typedConfirmationDetails && (
<DiffRenderer
diffContent={
(typedConfirmationDetails as ToolEditConfirmationDetails)
.fileDiff
}
/>
)}
{/* Display command for execute */}
{'command' in typedConfirmationDetails && (
<Text color="yellow">
Command:{' '}
{
(typedConfirmationDetails as ToolExecuteConfirmationDetails)
.command
}
</Text>
)}
{/* <ConfirmInput onConfirm={handleConfirm} isFocused={isFocused} /> */}
</Box>
<Box>
<Text
color="blue"
wrap="truncate-end"
strikethrough={status === ToolCallStatus.Canceled}
>
<Text bold>{name}</Text> <Text color="gray">{description}</Text>
</Text>
</Box>
</Box>
{hasResult && (
<Box paddingLeft={statusIndicatorWidth}>
<Box flexShrink={1} flexDirection="row">
<Text color="gray"> </Text>
{/* Use default text color (white) or gray instead of dimColor */}
{typeof resultDisplay === 'string' && (
<Box flexDirection="column">
{MarkdownRenderer.render(resultDisplay)}
</Box>
)}
{typeof resultDisplay === 'object' && (
<DiffRenderer diffContent={resultDisplay.fileDiff} />
)}
</Box>
)}
{status === ToolCallStatus.Success && typedResultDisplay && (
<Box flexDirection="column" marginLeft={2}>
{typeof typedResultDisplay === 'string' ? (
<Text>{typedResultDisplay}</Text>
) : (
<DiffRenderer
diffContent={(typedResultDisplay as FileDiff).fileDiff}
/>
)}
</Box>
)}
</Box>
+2 -2
View File
@@ -8,8 +8,8 @@ import { useEffect } from 'react';
import fs from 'fs';
import path from 'path';
import os from 'os';
import type { HistoryItem } from '../types.js';
import { getErrorMessage } from '../../utils/errors.js';
import { HistoryItem } from '../types.js';
import { getErrorMessage } from '@gemini-code/server';
const warningsFilePath = path.join(os.tmpdir(), 'gemini-code-cli-warnings.txt');
+322 -95
View File
@@ -4,20 +4,30 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { exec } from 'child_process';
import { exec as _exec } from 'child_process';
import { useState, useRef, useCallback, useEffect } from 'react';
import { useInput } from 'ink';
import { GeminiClient } from '../../core/gemini-client.js';
import { type Chat, type PartListUnion } from '@google/genai';
import { HistoryItem } from '../types.js';
// Import server-side client and types
import {
processGeminiStream,
StreamingState,
} from '../../core/gemini-stream.js';
import { globalConfig } from '../../config/config.js';
import { getErrorMessage, isNodeError } from '../../utils/errors.js';
GeminiClient,
GeminiEventType as ServerGeminiEventType, // Rename to avoid conflict
getErrorMessage,
isNodeError,
ToolResult,
} from '@gemini-code/server';
import type { Chat, PartListUnion, FunctionDeclaration } from '@google/genai';
// Import CLI types
import {
HistoryItem,
IndividualToolCallDisplay,
ToolCallStatus,
} from '../types.js';
import { Tool } from '../../tools/tools.js'; // CLI Tool definition
import { StreamingState } from '../../core/gemini-stream.js';
// Import CLI tool registry
import { toolRegistry } from '../../tools/tool-registry.js';
const allowlistedCommands = ['ls']; // TODO: make this configurable
const _allowlistedCommands = ['ls']; // Prefix with underscore since it's unused
const addHistoryItem = (
setHistory: React.Dispatch<React.SetStateAction<HistoryItem[]>>,
@@ -30,32 +40,36 @@ const addHistoryItem = (
]);
};
// Hook now accepts apiKey and model
export const useGeminiStream = (
setHistory: React.Dispatch<React.SetStateAction<HistoryItem[]>>,
apiKey: string,
model: string,
) => {
const [streamingState, setStreamingState] = useState<StreamingState>(
StreamingState.Idle,
);
const [initError, setInitError] = useState<string | null>(null);
const abortControllerRef = useRef<AbortController | null>(null);
const currentToolGroupIdRef = useRef<number | null>(null);
const chatSessionRef = useRef<Chat | null>(null);
const geminiClientRef = useRef<GeminiClient | null>(null);
const messageIdCounterRef = useRef(0);
const currentGeminiMessageIdRef = useRef<number | null>(null);
// Initialize Client Effect (remains the same)
// Initialize Client Effect - uses props now
useEffect(() => {
setInitError(null);
if (!geminiClientRef.current) {
try {
geminiClientRef.current = new GeminiClient(globalConfig);
geminiClientRef.current = new GeminiClient(apiKey, model);
} catch (error: unknown) {
setInitError(
`Failed to initialize client: ${getErrorMessage(error) || 'Unknown error'}`,
);
}
}
}, []);
// Dependency array includes apiKey and model now
}, [apiKey, model]);
// Input Handling Effect (remains the same)
useInput((input, key) => {
@@ -70,17 +84,25 @@ export const useGeminiStream = (
return baseTimestamp + messageIdCounterRef.current;
}, []);
// Submit Query Callback (updated to call processGeminiStream)
// Helper function to update Gemini message content
const updateGeminiMessage = useCallback(
(messageId: number, newContent: string) => {
setHistory((prevHistory) =>
prevHistory.map((item) =>
item.id === messageId && item.type === 'gemini'
? { ...item, text: newContent }
: item,
),
);
},
[setHistory],
);
// Improved submit query function
const submitQuery = useCallback(
async (query: PartListUnion) => {
if (streamingState === StreamingState.Responding) {
// No-op if already going.
return;
}
if (typeof query === 'string' && query.toString().trim().length === 0) {
return;
}
if (streamingState === StreamingState.Responding) return;
if (typeof query === 'string' && query.trim().length === 0) return;
const userMessageTimestamp = Date.now();
const client = geminiClientRef.current;
@@ -90,101 +112,306 @@ export const useGeminiStream = (
}
if (!chatSessionRef.current) {
chatSessionRef.current = await client.startChat();
}
// Reset state
setStreamingState(StreamingState.Responding);
setInitError(null);
currentToolGroupIdRef.current = null;
messageIdCounterRef.current = 0;
const chat = chatSessionRef.current;
try {
// Add user message
if (typeof query === 'string') {
const trimmedQuery = query.toString();
addHistoryItem(
setHistory,
{ type: 'user', text: trimmedQuery },
userMessageTimestamp,
);
const maybeCommand = trimmedQuery.split(/\s+/)[0];
if (allowlistedCommands.includes(maybeCommand)) {
exec(trimmedQuery, (error, stdout) => {
const timestamp = getNextMessageId(userMessageTimestamp);
// TODO: handle stderr, error
addHistoryItem(
setHistory,
{ type: 'info', text: stdout },
timestamp,
);
});
return;
}
} else if (
// HACK to detect errored function responses.
typeof query === 'object' &&
query !== null &&
!Array.isArray(query) && // Ensure it's a single Part object
'functionResponse' in query && // Check if it's a function response Part
query.functionResponse?.response && // Check if response object exists
'error' in query.functionResponse.response // Check specifically for the 'error' key
) {
const history = chat.getHistory();
history.push({ role: 'user', parts: [query] });
try {
// Use getFunctionDeclarations for startChat
const toolSchemas = toolRegistry.getFunctionDeclarations();
chatSessionRef.current = await client.startChat(toolSchemas);
} catch (err: unknown) {
setInitError(`Failed to start chat: ${getErrorMessage(err)}`);
setStreamingState(StreamingState.Idle);
return;
}
}
// Prepare for streaming
setStreamingState(StreamingState.Responding);
setInitError(null);
messageIdCounterRef.current = 0; // Reset counter for new submission
const chat = chatSessionRef.current;
let currentToolGroupId: number | null = null;
// For function responses, we don't need to add a user message
if (typeof query === 'string') {
// Only add user message for string queries, not for function responses
addHistoryItem(
setHistory,
{ type: 'user', text: query },
userMessageTimestamp,
);
}
try {
abortControllerRef.current = new AbortController();
const signal = abortControllerRef.current.signal;
// --- Delegate to Stream Processor ---
// Get ServerTool descriptions for the server call
const serverTools: ServerTool[] = toolRegistry
.getAllTools()
.map((cliTool: Tool) => ({
name: cliTool.name,
schema: cliTool.schema,
execute: (args: Record<string, unknown>) =>
cliTool.execute(args as ToolArgs), // Pass execution
}));
const stream = client.sendMessageStream(chat, query, signal);
const addHistoryItemFromStream = (
itemData: Omit<HistoryItem, 'id'>,
id: number,
) => {
addHistoryItem(setHistory, itemData, id);
};
const getStreamMessageId = () => getNextMessageId(userMessageTimestamp);
// Call the renamed processor function
await processGeminiStream({
stream,
const stream = client.sendMessageStream(
chat,
query,
serverTools,
signal,
setHistory,
submitQuery,
getNextMessageId: getStreamMessageId,
addHistoryItem: addHistoryItemFromStream,
currentToolGroupIdRef,
});
);
// Process the stream events from the server logic
let currentGeminiText = ''; // To accumulate message content
let hasInitialGeminiResponse = false;
for await (const event of stream) {
if (signal.aborted) break;
if (event.type === ServerGeminiEventType.Content) {
// For content events, accumulate the text and update an existing message or create a new one
currentGeminiText += event.value;
if (!hasInitialGeminiResponse) {
// Create a new Gemini message if this is the first content event
hasInitialGeminiResponse = true;
const eventTimestamp = getNextMessageId(userMessageTimestamp);
currentGeminiMessageIdRef.current = eventTimestamp;
addHistoryItem(
setHistory,
{ type: 'gemini', text: currentGeminiText },
eventTimestamp,
);
} else if (currentGeminiMessageIdRef.current !== null) {
// Update the existing message with accumulated content
updateGeminiMessage(
currentGeminiMessageIdRef.current,
currentGeminiText,
);
}
} else if (event.type === ServerGeminiEventType.ToolCallRequest) {
// Reset the Gemini message tracking for the next response
currentGeminiText = '';
hasInitialGeminiResponse = false;
currentGeminiMessageIdRef.current = null;
const { callId, name, args } = event.value;
const cliTool = toolRegistry.getTool(name); // Get the full CLI tool
if (!cliTool) {
console.error(`CLI Tool "${name}" not found!`);
continue;
}
if (currentToolGroupId === null) {
currentToolGroupId = getNextMessageId(userMessageTimestamp);
// Add explicit cast to Omit<HistoryItem, 'id'>
addHistoryItem(
setHistory,
{ type: 'tool_group', tools: [] } as Omit<HistoryItem, 'id'>,
currentToolGroupId,
);
}
// Create the UI display object matching IndividualToolCallDisplay
const toolCallDisplay: IndividualToolCallDisplay = {
callId,
name,
description: cliTool.getDescription(args as ToolArgs),
status: ToolCallStatus.Pending,
resultDisplay: undefined,
confirmationDetails: undefined,
};
// Add pending tool call to the UI history group
setHistory((prevHistory) =>
prevHistory.map((item) => {
if (
item.id === currentToolGroupId &&
item.type === 'tool_group'
) {
// Ensure item.tools exists and is an array before spreading
const currentTools = Array.isArray(item.tools)
? item.tools
: [];
return {
...item,
tools: [...currentTools, toolCallDisplay], // Add the complete display object
};
}
return item;
}),
);
// --- Tool Execution & Confirmation Logic ---
const confirmationDetails = await cliTool.shouldConfirmExecute(
args as ToolArgs,
);
if (confirmationDetails) {
setHistory((prevHistory) =>
prevHistory.map((item) => {
if (
item.id === currentToolGroupId &&
item.type === 'tool_group'
) {
return {
...item,
tools: item.tools.map((tool) =>
tool.callId === callId
? {
...tool,
status: ToolCallStatus.Confirming,
confirmationDetails,
}
: tool,
),
};
}
return item;
}),
);
setStreamingState(StreamingState.WaitingForConfirmation);
return;
}
try {
setHistory((prevHistory) =>
prevHistory.map((item) => {
if (
item.id === currentToolGroupId &&
item.type === 'tool_group'
) {
return {
...item,
tools: item.tools.map((tool) =>
tool.callId === callId
? { ...tool, status: ToolCallStatus.Invoked }
: tool,
),
};
}
return item;
}),
);
const result: ToolResult = await cliTool.execute(
args as ToolArgs,
);
const resultPart = {
functionResponse: {
name,
id: callId,
response: { output: result.llmContent },
},
};
setHistory((prevHistory) =>
prevHistory.map((item) => {
if (
item.id === currentToolGroupId &&
item.type === 'tool_group'
) {
return {
...item,
tools: item.tools.map((tool) =>
tool.callId === callId
? {
...tool,
status: ToolCallStatus.Success,
resultDisplay: result.returnDisplay,
}
: tool,
),
};
}
return item;
}),
);
// Execute the function and continue the stream
await submitQuery(resultPart);
return;
} catch (execError: unknown) {
const error = new Error(
`Tool execution failed: ${execError instanceof Error ? execError.message : String(execError)}`,
);
const errorPart = {
functionResponse: {
name,
id: callId,
response: {
error: `Tool execution failed: ${error.message}`,
},
},
};
setHistory((prevHistory) =>
prevHistory.map((item) => {
if (
item.id === currentToolGroupId &&
item.type === 'tool_group'
) {
return {
...item,
tools: item.tools.map((tool) =>
tool.callId === callId
? {
...tool,
status: ToolCallStatus.Error,
resultDisplay: `Error: ${error.message}`,
}
: tool,
),
};
}
return item;
}),
);
await submitQuery(errorPart);
return;
}
}
}
} catch (error: unknown) {
// (Error handling for stream initiation remains the same)
console.error('Error initiating stream:', error);
if (!isNodeError(error) || error.name !== 'AbortError') {
// Use historyUpdater's function potentially? Or keep addHistoryItem here?
// Keeping addHistoryItem here for direct errors from this scope.
console.error('Error processing stream or executing tool:', error);
addHistoryItem(
setHistory,
{
type: 'error',
text: `[Error starting stream: ${getErrorMessage(error)}]`,
text: `[Error: ${getErrorMessage(error)}]`,
},
getNextMessageId(userMessageTimestamp),
);
}
} finally {
abortControllerRef.current = null;
setStreamingState(StreamingState.Idle);
// Only set to Idle if not waiting for confirmation
if (streamingState !== StreamingState.WaitingForConfirmation) {
setStreamingState(StreamingState.Idle);
}
}
},
[setStreamingState, setHistory, initError, getNextMessageId],
// Dependencies need careful review - including updateGeminiMessage
[
streamingState,
setHistory,
apiKey,
model,
getNextMessageId,
updateGeminiMessage,
],
);
return { streamingState, submitQuery, initError };
};
// Define ServerTool interface here if not importing from server (circular dep issue?)
interface ServerTool {
name: string;
schema: FunctionDeclaration;
execute(params: Record<string, unknown>): Promise<ToolResult>;
}
// Define a more specific type for tool arguments to replace 'any'
type ToolArgs = Record<string, unknown>;
+5 -4
View File
@@ -7,10 +7,11 @@
import { ToolResultDisplay } from '../tools/tools.js';
export enum ToolCallStatus {
Pending,
Invoked,
Confirming,
Canceled,
Pending = 'Pending',
Invoked = 'Invoked',
Confirming = 'Confirming',
Success = 'Success',
Error = 'Error',
}
export interface ToolCallEvent {
@@ -4,16 +4,27 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { Content, SchemaUnion, Type } from '@google/genai';
import {
Config,
getErrorMessage,
isNodeError,
GeminiClient,
} from '@gemini-code/server';
import { promises as fs } from 'fs';
import { Content, SchemaUnion, Type } from '@google/genai'; // Assuming these types exist
import { GeminiClient } from '../core/gemini-client.js'; // Assuming this path
import { exec } from 'child_process'; // Needed for Windows process check
import { promisify } from 'util'; // To promisify exec
import { globalConfig } from '../config/config.js';
import { getErrorMessage, isNodeError } from './errors.js';
import { exec as _exec } from 'child_process';
import { promisify } from 'util';
// Define the AnalysisStatus type alias
type AnalysisStatus =
| 'Running'
| 'SuccessReported'
| 'ErrorReported'
| 'Unknown'
| 'AnalysisFailed';
// Promisify child_process.exec for easier async/await usage
const execAsync = promisify(exec);
const execAsync = promisify(_exec);
// Define the expected interface for the AI client dependency
export interface AiClient {
@@ -49,27 +60,39 @@ function isAnalysisFailure(
// Represents the final outcome after polling is complete (or failed/timed out)
export interface FinalAnalysisOutcome {
status: string; // e.g., 'SuccessReported', 'ErrorReported', 'ProcessEnded_SuccessReported', 'TimedOut_Running', 'AnalysisFailed'
status: string; // e.g., 'Completed_SuccessReported', 'TimedOut_Running', 'AnalysisFailed'
summary: string; // Final summary or error message
}
export class BackgroundTerminalAnalyzer {
private ai: AiClient;
// Make polling parameters configurable via constructor
private geminiClient: GeminiClient | null = null;
private readonly maxOutputAnalysisLength = 20000;
private pollIntervalMs: number;
private maxAttempts: number;
private initialDelayMs: number;
// --- Dependency Injection & Configuration ---
constructor(
aiClient?: AiClient, // Allow injecting AiClient, default to GeminiClient
config: Config, // Accept Config object
options: {
pollIntervalMs?: number;
maxAttempts?: number;
initialDelayMs?: number;
} = {}, // Provide default options
} = {},
) {
this.ai = aiClient || new GeminiClient(globalConfig); // Call constructor without model
try {
// Initialize Gemini client using config
this.geminiClient = new GeminiClient(
config.getApiKey(),
config.getModel(),
);
} catch (error) {
console.error(
'Failed to initialize GeminiClient in BackgroundTerminalAnalyzer:',
error,
);
// Set client to null so analyzeOutput handles it
this.geminiClient = null;
}
this.pollIntervalMs = options.pollIntervalMs ?? 5000; // Default 5 seconds
this.maxAttempts = options.maxAttempts ?? 6; // Default 6 attempts (approx 30s total)
this.initialDelayMs = options.initialDelayMs ?? 500; // Default 0.5s initial delay
@@ -90,6 +113,17 @@ export class BackgroundTerminalAnalyzer {
tempStderrFilePath: string,
command: string,
): Promise<FinalAnalysisOutcome> {
// --- Validate PID ---
if (typeof pid !== 'number' || !Number.isInteger(pid) || pid <= 0) {
console.error(
`BackgroundTerminalAnalyzer: Invalid or non-numeric PID provided (${pid}). Analysis cannot proceed.`,
);
return {
status: 'AnalysisFailed',
summary: 'Invalid PID provided for analysis.',
};
}
// --- Initial Delay ---
// Wait briefly before the first check to allow the process to initialize
// and potentially write initial output.
@@ -142,21 +176,22 @@ export class BackgroundTerminalAnalyzer {
/* ignore */
}
lastAnalysisResult = await this.analyzeOutputWithLLM(
lastAnalysisResult = await this.performLlmAnalysis(
currentStdout,
currentStderr,
command,
pid,
);
if (isAnalysisFailure(lastAnalysisResult)) {
return {
status: 'ProcessEnded_AnalysisFailed',
status: 'Completed_AnalysisFailed',
summary: `Process ended. Final analysis failed: ${lastAnalysisResult.error}`,
};
}
// Append ProcessEnded to the status determined by the final analysis
return {
status: 'ProcessEnded_' + lastAnalysisResult.inferredStatus,
status: 'Completed_' + lastAnalysisResult.inferredStatus,
summary: `Process ended. Final analysis summary: ${lastAnalysisResult.summary}`,
};
}
@@ -170,10 +205,11 @@ export class BackgroundTerminalAnalyzer {
}
// --- LLM Analysis ---
lastAnalysisResult = await this.analyzeOutputWithLLM(
lastAnalysisResult = await this.performLlmAnalysis(
currentStdout,
currentStderr,
command,
pid,
);
if (isAnalysisFailure(lastAnalysisResult)) {
@@ -293,31 +329,31 @@ export class BackgroundTerminalAnalyzer {
}
// --- LLM Analysis Method (largely unchanged but added validation robustness) ---
private async analyzeOutputWithLLM(
stdout: string,
stderr: string,
private async performLlmAnalysis(
stdoutContent: string,
stderrContent: string,
command: string,
pid: number,
): Promise<AnalysisResult | AnalysisFailure> {
try {
const schema: SchemaUnion = {
/* ... schema definition remains the same ... */ type: Type.OBJECT,
properties: {
summary: {
type: Type.STRING,
description:
"A concise interpretation of significant events, progress, final results, or errors found in the process's stdout and stderr. Summarizes what the logs indicate happened. Should be formatted as markdown.",
},
inferredStatus: {
type: Type.STRING,
description:
"The inferred status based *only* on analyzing the provided log content. Possible values: 'Running' (logs show ongoing activity without completion/error), 'SuccessReported' (logs indicate successful completion or final positive result), 'ErrorReported' (logs indicate an error or failure), 'Unknown' (status cannot be clearly determined from the log content).",
enum: ['Running', 'SuccessReported', 'ErrorReported', 'Unknown'],
},
},
required: ['summary', 'inferredStatus'],
if (!this.geminiClient) {
return {
error: '[Analysis unavailable: Gemini client not initialized]',
inferredStatus: 'AnalysisFailed',
};
}
const prompt = `**Analyze Background Process Logs**
const truncatedStdout =
stdoutContent.substring(0, this.maxOutputAnalysisLength) +
(stdoutContent.length > this.maxOutputAnalysisLength
? '... [truncated]'
: '');
const truncatedStderr =
stderrContent.substring(0, this.maxOutputAnalysisLength) +
(stderrContent.length > this.maxOutputAnalysisLength
? '... [truncated]'
: '');
const analysisPrompt = `**Analyze Background Process Logs**
**Context:** A command (\`${command}\`) was executed in the background. You are analyzing the standard output (stdout) and standard error (stderr) collected so far to understand its progress and outcome. This analysis will be used to inform a user about what the command did.
@@ -325,11 +361,11 @@ export class BackgroundTerminalAnalyzer {
* **Command:** \`${command}\`
* **Stdout:**
\`\`\`
${stdout.slice(-2000) || '(empty)'} ${stdout.length > 2000 ? '\n... (truncated)' : ''}
${truncatedStdout}
\`\`\`
* **Stderr:**
\`\`\`
${stderr.slice(-2000) || '(empty)'} ${stderr.length > 2000 ? '\n... (truncated)' : ''}
${truncatedStderr}
\`\`\`
**Task:**
@@ -354,12 +390,14 @@ Based *only* on the provided stdout and stderr:
properties: {
summary: {
type: 'string',
description: 'Concise markdown summary of log interpretation.',
description:
'Concise markdown summary (1-3 sentences) of log interpretation.',
},
inferredStatus: {
type: 'string',
enum: ['Running', 'SuccessReported', 'ErrorReported', 'Unknown'],
description: 'Status inferred *only* from log content.',
description:
'Status inferred from logs: Running, SuccessReported, ErrorReported, Unknown',
},
},
required: ['summary', 'inferredStatus'],
@@ -373,57 +411,63 @@ Based *only* on the provided stdout and stderr:
* The \`summary\` must be an interpretation of the logs, focusing on key outcomes or activities. Prioritize recent events if logs are extensive.
* The \`inferredStatus\` should reflect the most likely state *deduced purely from the log text provided*. Ensure it is one of the specified enum values.`;
const response = await this.ai.generateJson(
[{ role: 'user', parts: [{ text: prompt }] }],
const schema: SchemaUnion = {
type: Type.OBJECT,
properties: {
summary: {
type: Type.STRING,
description:
'Concise markdown summary (1-3 sentences) of log interpretation.',
},
inferredStatus: {
type: Type.STRING,
description:
'Status inferred from logs: Running, SuccessReported, ErrorReported, Unknown',
enum: ['Running', 'SuccessReported', 'ErrorReported', 'Unknown'],
},
},
required: ['summary', 'inferredStatus'],
};
try {
const resultJson = await this.geminiClient.generateJson(
[{ role: 'user', parts: [{ text: analysisPrompt }] }],
schema,
);
// --- Enhanced Validation ---
if (typeof response !== 'object' || response === null) {
throw new Error(
`LLM returned non-object response: ${JSON.stringify(response)}`,
);
}
if (
typeof response.summary !== 'string' ||
response.summary.trim() === ''
) {
// Ensure summary is a non-empty string
console.warn(
"LLM response validation warning: 'summary' field is missing, empty or not a string. Raw response:",
response,
);
// Decide how to handle: throw error, or assign default? Let's throw for now.
throw new Error(
`LLM response missing or invalid 'summary'. Got: ${JSON.stringify(response.summary)}`,
);
}
if (
!['Running', 'SuccessReported', 'ErrorReported', 'Unknown'].includes(
response.inferredStatus,
)
) {
console.warn(
`LLM response validation warning: 'inferredStatus' is invalid ('${response.inferredStatus}'). Raw response:`,
response,
);
// Decide how to handle: throw error, or default to 'Unknown'? Let's throw.
throw new Error(
`LLM returned invalid 'inferredStatus': ${JSON.stringify(response.inferredStatus)}`,
);
}
// Validate and construct the AnalysisResult object
const summary =
typeof resultJson?.summary === 'string'
? resultJson.summary
: '[Summary unavailable]';
return response as AnalysisResult; // Cast after validation
// Define valid statuses using the AnalysisStatus type (ensure it's defined above)
const validStatuses: Array<Exclude<AnalysisStatus, 'AnalysisFailed'>> = [
'Running',
'SuccessReported',
'ErrorReported',
'Unknown',
];
// Cast the unknown value to string before checking with includes
const statusString = resultJson?.inferredStatus as string;
const inferredStatus = validStatuses.includes(
statusString as Exclude<AnalysisStatus, 'AnalysisFailed'>,
)
? (statusString as Exclude<AnalysisStatus, 'AnalysisFailed'>)
: 'Unknown';
// Explicitly construct the object matching AnalysisResult type
const analysisResult: AnalysisResult = { summary, inferredStatus };
return analysisResult;
} catch (error: unknown) {
console.error(
`LLM analysis call failed for command "${command}":`,
error,
);
// Ensure the error message passed back is helpful
return {
error: `LLM analysis call encountered an error: ${getErrorMessage(error)}`,
inferredStatus: 'AnalysisFailed',
console.error(`LLM Analysis Request Failed for PID ${pid}:`, error);
// Return the AnalysisFailure type
const analysisFailure: AnalysisFailure = {
error: `[Analysis failed: ${getErrorMessage(error)}]`,
inferredStatus: 'AnalysisFailed', // This matches the AnalysisStatus type
};
return analysisFailure;
}
}
}
-24
View File
@@ -1,24 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
export function isNodeError(error: unknown): error is NodeJS.ErrnoException {
return error instanceof Error && 'code' in error;
}
export function getErrorMessage(error: unknown): string {
if (error instanceof Error) {
return error.message;
} else {
// Attempt to convert the non-Error value to a string for logging
try {
const errorMessage = String(error);
return errorMessage;
} catch {
// If String() itself fails (highly unlikely)
return 'Failed to get error details';
}
}
}
@@ -1,389 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import * as fs from 'fs/promises';
import * as path from 'path';
import { getErrorMessage, isNodeError } from './errors.js';
const MAX_ITEMS = 200;
const TRUNCATION_INDICATOR = '...';
const DEFAULT_IGNORED_FOLDERS = new Set(['node_modules', '.git', 'dist']);
// --- Interfaces ---
/** Options for customizing folder structure retrieval. */
interface FolderStructureOptions {
/** Maximum number of files and folders combined to display. Defaults to 200. */
maxItems?: number;
/** Set of folder names to ignore completely. Case-sensitive. */
ignoredFolders?: Set<string>;
/** Optional regex to filter included files by name. */
fileIncludePattern?: RegExp;
}
// Define a type for the merged options where fileIncludePattern remains optional
type MergedFolderStructureOptions = Required<
Omit<FolderStructureOptions, 'fileIncludePattern'>
> & {
fileIncludePattern?: RegExp;
};
/** Represents the full, unfiltered information about a folder and its contents. */
interface FullFolderInfo {
name: string;
path: string;
files: string[];
subFolders: FullFolderInfo[];
totalChildren: number; // Total files + subfolders recursively
totalFiles: number; // Total files recursively
isIgnored?: boolean; // Flag to easily identify ignored folders later
}
/** Represents the potentially truncated structure used for display. */
interface ReducedFolderNode {
name: string; // Folder name
isRoot?: boolean;
files: string[]; // File names, might end with '...'
subFolders: ReducedFolderNode[]; // Subfolders, might be truncated
hasMoreFiles?: boolean; // Indicates if files were truncated for this specific folder
hasMoreSubfolders?: boolean; // Indicates if subfolders were truncated for this specific folder
}
// --- Helper Functions ---
/**
* Recursively reads the full directory structure without truncation.
* Ignored folders are included but not recursed into.
* @param folderPath The absolute path to the folder.
* @param options Configuration options.
* @returns A promise resolving to the FullFolderInfo or null if access denied/not found.
*/
async function readFullStructure(
folderPath: string,
options: MergedFolderStructureOptions,
): Promise<FullFolderInfo | null> {
const name = path.basename(folderPath);
// Initialize with isIgnored: false
const folderInfo: Omit<FullFolderInfo, 'totalChildren' | 'totalFiles'> = {
name,
path: folderPath,
files: [],
subFolders: [],
isIgnored: false,
};
let totalChildrenCount = 0;
let totalFileCount = 0;
try {
const entries = await fs.readdir(folderPath, { withFileTypes: true });
// Process directories first
for (const entry of entries) {
if (entry.isDirectory()) {
const subFolderName = entry.name;
const subFolderPath = path.join(folderPath, subFolderName);
// Check if the folder should be ignored
if (options.ignoredFolders.has(subFolderName)) {
// Add ignored folder node but don't recurse
const ignoredFolderInfo: FullFolderInfo = {
name: subFolderName,
path: subFolderPath,
files: [],
subFolders: [],
totalChildren: 0, // No children explored
totalFiles: 0, // No files explored
isIgnored: true, // Mark as ignored
};
folderInfo.subFolders.push(ignoredFolderInfo);
// Skip recursion for this folder
continue;
}
// If not ignored, recurse as before
const subFolderInfo = await readFullStructure(subFolderPath, options);
// Add non-empty folders OR explicitly ignored folders
if (
subFolderInfo &&
(subFolderInfo.totalChildren > 0 ||
subFolderInfo.files.length > 0 ||
subFolderInfo.isIgnored)
) {
folderInfo.subFolders.push(subFolderInfo);
}
}
}
// Then process files (only if the current folder itself isn't marked as ignored)
for (const entry of entries) {
if (entry.isFile()) {
const fileName = entry.name;
// Include if no pattern or if pattern matches
if (
!options.fileIncludePattern ||
options.fileIncludePattern.test(fileName)
) {
folderInfo.files.push(fileName);
}
}
}
// Calculate totals *after* processing children
// Ignored folders contribute 0 to counts here because we didn't look inside.
totalFileCount =
folderInfo.files.length +
folderInfo.subFolders.reduce((sum, sf) => sum + sf.totalFiles, 0);
// Count the ignored folder itself as one child item in the parent's count.
totalChildrenCount =
folderInfo.files.length +
folderInfo.subFolders.length +
folderInfo.subFolders.reduce((sum, sf) => sum + sf.totalChildren, 0);
} catch (error: unknown) {
if (
isNodeError(error) &&
(error.code === 'EACCES' || error.code === 'ENOENT')
) {
console.warn(
`Warning: Could not read directory ${folderPath}: ${error.message}`,
);
return null;
}
throw error;
}
return {
...(folderInfo as FullFolderInfo), // Cast needed after conditional assignment check
totalChildren: totalChildrenCount,
totalFiles: totalFileCount,
};
}
/**
* Reduces the full folder structure based on the maxItems limit using BFS.
* Handles explicitly ignored folders by showing them with a truncation indicator.
* @param fullInfo The complete folder structure info.
* @param maxItems The maximum number of items (files + folders) to include.
* @param ignoredFolders The set of folder names that were ignored during the read phase.
* @returns The root node of the reduced structure.
*/
function reduceStructure(
fullInfo: FullFolderInfo,
maxItems: number,
): ReducedFolderNode {
const rootReducedNode: ReducedFolderNode = {
name: fullInfo.name,
files: [],
subFolders: [],
isRoot: true,
};
const queue: Array<{
original: FullFolderInfo;
reduced: ReducedFolderNode;
}> = [];
// Don't count the root itself towards the limit initially
queue.push({ original: fullInfo, reduced: rootReducedNode });
let itemCount = 0; // Count folders + files added to the reduced structure
while (queue.length > 0) {
const { original: originalFolder, reduced: reducedFolder } = queue.shift()!;
// If the folder being processed was itself marked as ignored (shouldn't happen for root)
if (originalFolder.isIgnored) {
continue;
}
// Process Files
let fileLimitReached = false;
for (const file of originalFolder.files) {
// Check limit *before* adding the file
if (itemCount >= maxItems) {
if (!fileLimitReached) {
reducedFolder.files.push(TRUNCATION_INDICATOR);
reducedFolder.hasMoreFiles = true;
fileLimitReached = true;
}
break;
}
reducedFolder.files.push(file);
itemCount++;
}
// Process Subfolders
let subfolderLimitReached = false;
for (const subFolder of originalFolder.subFolders) {
// Count the folder itself towards the limit
itemCount++;
if (itemCount > maxItems) {
if (!subfolderLimitReached) {
// Add a placeholder node ONLY if we haven't already added one
const truncatedSubfolderNode: ReducedFolderNode = {
name: subFolder.name,
files: [TRUNCATION_INDICATOR], // Generic truncation
subFolders: [],
hasMoreFiles: true,
};
reducedFolder.subFolders.push(truncatedSubfolderNode);
reducedFolder.hasMoreSubfolders = true;
subfolderLimitReached = true;
}
continue; // Stop processing further subfolders for this parent
}
// Handle explicitly ignored folders identified during the read phase
if (subFolder.isIgnored) {
const ignoredReducedNode: ReducedFolderNode = {
name: subFolder.name,
files: [TRUNCATION_INDICATOR], // Indicate contents ignored/truncated
subFolders: [],
hasMoreFiles: true, // Mark as truncated
};
reducedFolder.subFolders.push(ignoredReducedNode);
// DO NOT add the ignored folder to the queue for further processing
} else {
// If not ignored and within limit, create the reduced node and add to queue
const reducedSubFolder: ReducedFolderNode = {
name: subFolder.name,
files: [],
subFolders: [],
};
reducedFolder.subFolders.push(reducedSubFolder);
queue.push({ original: subFolder, reduced: reducedSubFolder });
}
}
}
return rootReducedNode;
}
/** Calculates the total number of items present in the reduced structure. */
function countReducedItems(node: ReducedFolderNode): number {
let count = 0;
// Count files, treating '...' as one item if present
count += node.files.length;
// Count subfolders and recursively count their contents
count += node.subFolders.length;
for (const sub of node.subFolders) {
// Check if it's a placeholder ignored/truncated node
const isTruncatedPlaceholder =
sub.files.length === 1 &&
sub.files[0] === TRUNCATION_INDICATOR &&
sub.subFolders.length === 0;
if (!isTruncatedPlaceholder) {
count += countReducedItems(sub);
}
// Don't add count for items *inside* the placeholder node itself.
}
return count;
}
/**
* Formats the reduced folder structure into a tree-like string.
* (No changes needed in this function)
* @param node The current node in the reduced structure.
* @param indent The current indentation string.
* @param isLast Sibling indicator.
* @param builder Array to build the string lines.
*/
function formatReducedStructure(
node: ReducedFolderNode,
indent: string,
isLast: boolean,
builder: string[],
): void {
const connector = isLast ? '└───' : '├───';
const linePrefix = indent + connector;
// Don't print the root node's name directly, only its contents
if (!node.isRoot) {
builder.push(`${linePrefix}${node.name}/`);
}
const childIndent = indent + (isLast || node.isRoot ? ' ' : '│ '); // Use " " if last, "│" otherwise
// Render files
const fileCount = node.files.length;
for (let i = 0; i < fileCount; i++) {
const isLastFile = i === fileCount - 1 && node.subFolders.length === 0;
const fileConnector = isLastFile ? '└───' : '├───';
builder.push(`${childIndent}${fileConnector}${node.files[i]}`);
}
// Render subfolders
const subFolderCount = node.subFolders.length;
for (let i = 0; i < subFolderCount; i++) {
const isLastSub = i === subFolderCount - 1;
formatReducedStructure(node.subFolders[i], childIndent, isLastSub, builder);
}
}
// --- Main Exported Function ---
/**
* Generates a string representation of a directory's structure,
* limiting the number of items displayed. Ignored folders are shown
* followed by '...' instead of their contents.
*
* @param directory The absolute or relative path to the directory.
* @param options Optional configuration settings.
* @returns A promise resolving to the formatted folder structure string.
*/
export async function getFolderStructure(
directory: string,
options?: FolderStructureOptions,
): Promise<string> {
const resolvedPath = path.resolve(directory);
const mergedOptions: MergedFolderStructureOptions = {
maxItems: options?.maxItems ?? MAX_ITEMS,
ignoredFolders: options?.ignoredFolders ?? DEFAULT_IGNORED_FOLDERS,
fileIncludePattern: options?.fileIncludePattern,
};
try {
// 1. Read the full structure (includes ignored folders marked as such)
const fullInfo = await readFullStructure(resolvedPath, mergedOptions);
if (!fullInfo) {
return `Error: Could not read directory "${resolvedPath}". Check path and permissions.`;
}
// 2. Reduce the structure (handles ignored folders specifically)
const reducedRoot = reduceStructure(fullInfo, mergedOptions.maxItems);
// 3. Count items in the *reduced* structure for the summary
const rootNodeItselfCount = 0; // Don't count the root node in the items summary
const reducedItemCount =
countReducedItems(reducedRoot) - rootNodeItselfCount;
// 4. Format the reduced structure into a string
const structureLines: string[] = [];
formatReducedStructure(reducedRoot, '', true, structureLines);
// 5. Build the final output string
const displayPath = resolvedPath.replace(/\\/g, '/');
const totalOriginalChildren = fullInfo.totalChildren;
let disclaimer = '';
// Check if any truncation happened OR if ignored folders were present
if (
reducedItemCount < totalOriginalChildren ||
fullInfo.subFolders.some((sf) => sf.isIgnored)
) {
disclaimer = `Folders or files indicated with ${TRUNCATION_INDICATOR} contain more items not shown or were ignored.`;
}
const summary =
`Showing ${reducedItemCount} of ${totalOriginalChildren} items (files + folders). ${disclaimer}`.trim();
return `${summary}\n\n${displayPath}/\n${structureLines.join('\n')}`;
} catch (error: unknown) {
console.error(`Error getting folder structure for ${resolvedPath}:`, error);
return `Error processing directory "${resolvedPath}": ${getErrorMessage(error)}`;
}
}
-102
View File
@@ -1,102 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import path from 'node:path'; // Import the 'path' module
/**
* Shortens a path string if it exceeds maxLen, prioritizing the start and end segments.
* Example: /path/to/a/very/long/file.txt -> /path/.../long/file.txt
*/
export function shortenPath(filePath: string, maxLen: number = 35): string {
if (filePath.length <= maxLen) {
return filePath;
}
const parsedPath = path.parse(filePath);
const root = parsedPath.root;
const separator = path.sep;
// Get segments of the path *after* the root
const relativePath = filePath.substring(root.length);
const segments = relativePath.split(separator).filter((s) => s !== ''); // Filter out empty segments
// Handle cases with no segments after root (e.g., "/", "C:\") or only one segment
if (segments.length <= 1) {
// Fallback to simple start/end truncation for very short paths or single segments
const keepLen = Math.floor((maxLen - 3) / 2);
// Ensure keepLen is not negative if maxLen is very small
if (keepLen <= 0) {
return filePath.substring(0, maxLen - 3) + '...';
}
const start = filePath.substring(0, keepLen);
const end = filePath.substring(filePath.length - keepLen);
return `${start}...${end}`;
}
const firstDir = segments[0];
const startComponent = root + firstDir;
const endPartSegments: string[] = [];
// Base length: startComponent + separator + "..."
let currentLength = startComponent.length + separator.length + 3;
// Iterate backwards through segments (excluding the first one)
for (let i = segments.length - 1; i >= 1; i--) {
const segment = segments[i];
// Length needed if we add this segment: current + separator + segment
const lengthWithSegment = currentLength + separator.length + segment.length;
if (lengthWithSegment <= maxLen) {
endPartSegments.unshift(segment); // Add to the beginning of the end part
currentLength = lengthWithSegment;
} else {
// Adding this segment would exceed maxLen
break;
}
}
// Construct the final path
let result = startComponent + separator + '...';
if (endPartSegments.length > 0) {
result += separator + endPartSegments.join(separator);
}
// As a final check, if the result is somehow still too long (e.g., startComponent + ... is too long)
// fallback to simple truncation of the original path
if (result.length > maxLen) {
const keepLen = Math.floor((maxLen - 3) / 2);
if (keepLen <= 0) {
return filePath.substring(0, maxLen - 3) + '...';
}
const start = filePath.substring(0, keepLen);
const end = filePath.substring(filePath.length - keepLen);
return `${start}...${end}`;
}
return result;
}
/**
* Calculates the relative path from a root directory to a target path.
* Ensures both paths are resolved before calculating.
* Returns '.' if the target path is the same as the root directory.
*
* @param targetPath The absolute or relative path to make relative.
* @param rootDirectory The absolute path of the directory to make the target path relative to.
* @returns The relative path from rootDirectory to targetPath.
*/
export function makeRelative(
targetPath: string,
rootDirectory: string,
): string {
const resolvedTargetPath = path.resolve(targetPath);
const resolvedRootDirectory = path.resolve(rootDirectory);
const relativePath = path.relative(resolvedRootDirectory, resolvedTargetPath);
// If the paths are the same, path.relative returns '', return '.' instead
return relativePath || '.';
}
-59
View File
@@ -1,59 +0,0 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
/**
* Simple utility to validate objects against JSON Schemas
* In a real implementation, you would use a library like Ajv
*/
export class SchemaValidator {
/**
* Validates data against a JSON schema
* @param schema JSON Schema to validate against
* @param data Data to validate
* @returns True if valid, false otherwise
*/
static validate(schema: Record<string, unknown>, data: unknown): boolean {
// This is a simplified implementation
// In a real application, you would use a library like Ajv for proper validation
// Check for required fields
if (schema.required && Array.isArray(schema.required)) {
const required = schema.required as string[];
const dataObj = data as Record<string, unknown>;
for (const field of required) {
if (dataObj[field] === undefined) {
console.error(`Missing required field: ${field}`);
return false;
}
}
}
// Check property types if properties are defined
if (schema.properties && typeof schema.properties === 'object') {
const properties = schema.properties as Record<string, { type?: string }>;
const dataObj = data as Record<string, unknown>;
for (const [key, prop] of Object.entries(properties)) {
if (dataObj[key] !== undefined && prop.type) {
const expectedType = prop.type;
const actualType = Array.isArray(dataObj[key])
? 'array'
: typeof dataObj[key];
if (expectedType !== actualType) {
console.error(
`Type mismatch for property "${key}": expected ${expectedType}, got ${actualType}`,
);
return false;
}
}
}
}
return true;
}
}