Final Changes for stable release (#8105)

Co-authored-by: gemini-cli-robot <gemini-cli-robot@google.com>
Co-authored-by: Gal Zahavi <38544478+galz10@users.noreply.github.com>
Co-authored-by: christine betts <chrstn@uw.edu>
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
Co-authored-by: Bryan Morgan <bryanmorgan@google.com>
Co-authored-by: anthony bushong <agmsb@users.noreply.github.com>
Co-authored-by: Shreya Keshive <skeshive@gmail.com>
Co-authored-by: Taylor Mullen <ntaylormullen@google.com>
Co-authored-by: Arya Gummadi <aryagummadi@google.com>
Co-authored-by: Sandy Tao <sandytao520@icloud.com>
Co-authored-by: Pascal Birchler <pascalb@google.com>
Co-authored-by: Victor May <mayvic@google.com>
Co-authored-by: silvio junior <silviojr.dcc@gmail.com>
This commit is contained in:
matt korwel
2025-09-09 13:55:27 -07:00
committed by GitHub
parent c173f77052
commit 89213699bf
26 changed files with 443 additions and 291 deletions
+2
View File
@@ -16,7 +16,9 @@ export { logIdeConnection } from './src/telemetry/loggers.js';
export {
IdeConnectionEvent,
IdeConnectionType,
ExtensionInstallEvent,
} from './src/telemetry/types.js';
export { getIdeTrust } from './src/utils/ide-trust.js';
export { makeFakeConfig } from './src/test-utils/config.js';
export * from './src/utils/pathReader.js';
export { ClearcutLogger } from './src/telemetry/clearcut-logger/clearcut-logger.js';
+1 -1
View File
@@ -1,6 +1,6 @@
{
"name": "@google/gemini-cli-core",
"version": "0.4.0-preview",
"version": "0.4.0-preview.2",
"description": "Gemini CLI Core",
"repository": {
"type": "git",
+3 -3
View File
@@ -282,7 +282,7 @@ export class Config {
private readonly useRipgrep: boolean;
private readonly shouldUseNodePtyShell: boolean;
private readonly skipNextSpeakerCheck: boolean;
private readonly extensionManagement: boolean;
private readonly extensionManagement: boolean = true;
private readonly enablePromptCompletion: boolean = false;
private initialized: boolean = false;
readonly storage: Storage;
@@ -359,8 +359,8 @@ export class Config {
this.useRipgrep = params.useRipgrep ?? false;
this.shouldUseNodePtyShell = params.shouldUseNodePtyShell ?? false;
this.skipNextSpeakerCheck = params.skipNextSpeakerCheck ?? false;
this.useSmartEdit = params.useSmartEdit ?? true;
this.extensionManagement = params.extensionManagement ?? false;
this.useSmartEdit = params.useSmartEdit ?? false;
this.extensionManagement = params.extensionManagement ?? true;
this.storage = new Storage(this.targetDir);
this.enablePromptCompletion = params.enablePromptCompletion ?? false;
this.fileExclusions = new FileExclusions(this);
+94 -19
View File
@@ -416,9 +416,9 @@ describe('GeminiChat', () => {
expect(modelTurn?.parts![0]!.functionCall).toBeDefined();
});
it('should succeed if the stream ends with an empty part but has a valid finishReason', async () => {
// 1. Mock a stream that ends with an invalid part but has a 'STOP' finish reason.
const streamWithValidFinish = (async function* () {
it('should fail if the stream ends with an empty part and has no finishReason', async () => {
// 1. Mock a stream that ends with an invalid part and has no finish reason.
const streamWithNoFinish = (async function* () {
yield {
candidates: [
{
@@ -429,7 +429,7 @@ describe('GeminiChat', () => {
},
],
} as unknown as GenerateContentResponse;
// This second chunk is invalid, but the finishReason should save it from retrying.
// This second chunk is invalid and has no finishReason, so it should fail.
yield {
candidates: [
{
@@ -437,6 +437,50 @@ describe('GeminiChat', () => {
role: 'model',
parts: [{ text: '' }],
},
},
],
} as unknown as GenerateContentResponse;
})();
vi.mocked(mockModelsModule.generateContentStream).mockResolvedValue(
streamWithNoFinish,
);
// 2. Action & Assert: The stream should fail because there's no finish reason.
const stream = await chat.sendMessageStream(
{ message: 'test message' },
'prompt-id-no-finish-empty-end',
);
await expect(
(async () => {
for await (const _ of stream) {
/* consume stream */
}
})(),
).rejects.toThrow(EmptyStreamError);
});
it('should succeed if the stream ends with an invalid part but has a finishReason and contained a valid part', async () => {
// 1. Mock a stream that sends a valid chunk, then an invalid one, but has a finish reason.
const streamWithInvalidEnd = (async function* () {
yield {
candidates: [
{
content: {
role: 'model',
parts: [{ text: 'Initial valid content...' }],
},
},
],
} as unknown as GenerateContentResponse;
// This second chunk is invalid, but the response has a finishReason.
yield {
candidates: [
{
content: {
role: 'model',
parts: [{ text: '' }], // Invalid part
},
finishReason: 'STOP',
},
],
@@ -444,14 +488,13 @@ describe('GeminiChat', () => {
})();
vi.mocked(mockModelsModule.generateContentStream).mockResolvedValue(
streamWithValidFinish,
streamWithInvalidEnd,
);
// 2. Action & Assert: The stream should complete successfully because the valid
// finishReason overrides the invalid final chunk.
// 2. Action & Assert: The stream should complete without throwing an error.
const stream = await chat.sendMessageStream(
{ message: 'test message' },
'prompt-id-valid-finish-empty-end',
'prompt-id-valid-then-invalid-end',
);
await expect(
(async () => {
@@ -461,12 +504,12 @@ describe('GeminiChat', () => {
})(),
).resolves.not.toThrow();
// 3. Verify history was recorded correctly
// 3. Verify history was recorded correctly with only the valid part.
const history = chat.getHistory();
expect(history.length).toBe(2);
expect(history.length).toBe(2); // user turn + model turn
const modelTurn = history[1]!;
expect(modelTurn?.parts?.length).toBe(1); // The empty part is discarded
expect(modelTurn?.parts![0]!.text).toBe('Initial content...');
expect(modelTurn?.parts?.length).toBe(1);
expect(modelTurn?.parts![0]!.text).toBe('Initial valid content...');
});
it('should not consolidate text into a part that also contains a functionCall', async () => {
// 1. Mock the API to stream a malformed part followed by a valid text part.
@@ -542,7 +585,10 @@ describe('GeminiChat', () => {
// as the important part is consolidating what comes after.
yield {
candidates: [
{ content: { role: 'model', parts: [{ text: ' World!' }] } },
{
content: { role: 'model', parts: [{ text: ' World!' }] },
finishReason: 'STOP',
},
],
} as unknown as GenerateContentResponse;
})();
@@ -645,6 +691,7 @@ describe('GeminiChat', () => {
{ text: 'This is the visible text that should not be lost.' },
],
},
finishReason: 'STOP',
},
],
} as unknown as GenerateContentResponse;
@@ -705,7 +752,10 @@ describe('GeminiChat', () => {
const emptyStreamResponse = (async function* () {
yield {
candidates: [
{ content: { role: 'model', parts: [{ thought: true }] } },
{
content: { role: 'model', parts: [{ thought: true }] },
finishReason: 'STOP',
},
],
} as unknown as GenerateContentResponse;
})();
@@ -975,7 +1025,12 @@ describe('GeminiChat', () => {
// Second attempt (the retry): A minimal valid stream.
(async function* () {
yield {
candidates: [{ content: { parts: [{ text: 'Success' }] } }],
candidates: [
{
content: { parts: [{ text: 'Success' }] },
finishReason: 'STOP',
},
],
} as unknown as GenerateContentResponse;
})(),
);
@@ -1012,7 +1067,10 @@ describe('GeminiChat', () => {
(async function* () {
yield {
candidates: [
{ content: { parts: [{ text: 'Successful response' }] } },
{
content: { parts: [{ text: 'Successful response' }] },
finishReason: 'STOP',
},
],
} as unknown as GenerateContentResponse;
})(),
@@ -1123,7 +1181,12 @@ describe('GeminiChat', () => {
// Second attempt succeeds
(async function* () {
yield {
candidates: [{ content: { parts: [{ text: 'Second answer' }] } }],
candidates: [
{
content: { parts: [{ text: 'Second answer' }] },
finishReason: 'STOP',
},
],
} as unknown as GenerateContentResponse;
})(),
);
@@ -1272,6 +1335,7 @@ describe('GeminiChat', () => {
content: {
parts: [{ text: 'Successful response after empty' }],
},
finishReason: 'STOP',
},
],
} as unknown as GenerateContentResponse;
@@ -1333,13 +1397,23 @@ describe('GeminiChat', () => {
} as unknown as GenerateContentResponse;
await firstStreamContinuePromise; // Pause the stream
yield {
candidates: [{ content: { parts: [{ text: ' part 2' }] } }],
candidates: [
{
content: { parts: [{ text: ' part 2' }] },
finishReason: 'STOP',
},
],
} as unknown as GenerateContentResponse;
})();
const secondStreamGenerator = (async function* () {
yield {
candidates: [{ content: { parts: [{ text: 'second response' }] } }],
candidates: [
{
content: { parts: [{ text: 'second response' }] },
finishReason: 'STOP',
},
],
} as unknown as GenerateContentResponse;
})();
@@ -1424,6 +1498,7 @@ describe('GeminiChat', () => {
content: {
parts: [{ text: 'Successful final response' }],
},
finishReason: 'STOP',
},
],
} as unknown as GenerateContentResponse;
+25 -31
View File
@@ -612,23 +612,18 @@ export class GeminiChat {
): AsyncGenerator<GenerateContentResponse> {
const modelResponseParts: Part[] = [];
let hasReceivedAnyChunk = false;
let hasReceivedValidChunk = false;
let hasToolCall = false;
let lastChunk: GenerateContentResponse | null = null;
let isStreamInvalid = false;
let firstInvalidChunkEncountered = false;
let validChunkAfterInvalidEncountered = false;
let lastChunkIsInvalid = false;
for await (const chunk of streamResponse) {
hasReceivedAnyChunk = true;
lastChunk = chunk;
if (isValidResponse(chunk)) {
if (firstInvalidChunkEncountered) {
// A valid chunk appeared *after* an invalid one.
validChunkAfterInvalidEncountered = true;
}
hasReceivedValidChunk = true;
lastChunkIsInvalid = false;
const content = chunk.candidates?.[0]?.content;
if (content?.parts) {
if (content.parts.some((part) => part.thought)) {
@@ -640,14 +635,16 @@ export class GeminiChat {
}
// Always add parts - thoughts will be filtered out later in recordHistory
modelResponseParts.push(...content.parts);
if (content.parts.some((part) => part.functionCall)) {
hasToolCall = true;
}
}
} else {
logInvalidChunk(
this.config,
new InvalidChunkEvent('Invalid chunk received from stream.'),
);
isStreamInvalid = true;
firstInvalidChunkEncountered = true;
lastChunkIsInvalid = true;
}
// Record token usage if this chunk has usageMetadata
@@ -662,27 +659,24 @@ export class GeminiChat {
throw new EmptyStreamError('Model stream completed without any chunks.');
}
// --- FIX: The entire validation block was restructured for clarity and correctness ---
// Only apply complex validation if an invalid chunk was actually found.
if (isStreamInvalid) {
// Fail immediately if an invalid chunk was not the absolute last chunk.
if (validChunkAfterInvalidEncountered) {
throw new EmptyStreamError(
'Model stream had invalid intermediate chunks without a tool call.',
);
}
const hasFinishReason = lastChunk?.candidates?.some(
(candidate) => candidate.finishReason,
);
if (!hasToolCall) {
// If the *only* invalid part was the last chunk, we still check its finish reason.
const finishReason = lastChunk?.candidates?.[0]?.finishReason;
const isSuccessfulFinish =
finishReason === 'STOP' || finishReason === 'MAX_TOKENS';
if (!isSuccessfulFinish) {
throw new EmptyStreamError(
'Model stream ended with an invalid chunk and a failed finish reason.',
);
}
}
// Stream validation logic: A stream is considered successful if:
// 1. There's a tool call (tool calls can end without explicit finish reasons), OR
// 2. There's a finish reason AND the last chunk is valid (or we haven't received any valid chunks)
//
// We throw an error only when there's no tool call AND:
// - No finish reason, OR
// - Last chunk is invalid after receiving valid content
if (
!hasToolCall &&
(!hasFinishReason || (lastChunkIsInvalid && !hasReceivedValidChunk))
) {
throw new EmptyStreamError(
'Model stream ended with an invalid chunk or missing finish reason.',
);
}
// Record model response text from the collected parts
+38 -34
View File
@@ -24,39 +24,44 @@ async function getProcessInfo(pid: number): Promise<{
name: string;
command: string;
}> {
const platform = os.platform();
if (platform === 'win32') {
const powershellCommand = [
'$p = Get-CimInstance Win32_Process',
`-Filter 'ProcessId=${pid}'`,
'-ErrorAction SilentlyContinue;',
'if ($p) {',
'@{Name=$p.Name;ParentProcessId=$p.ParentProcessId;CommandLine=$p.CommandLine}',
'| ConvertTo-Json',
'}',
].join(' ');
const { stdout } = await execAsync(`powershell "${powershellCommand}"`);
const output = stdout.trim();
if (!output) return { parentPid: 0, name: '', command: '' };
const {
Name = '',
ParentProcessId = 0,
CommandLine = '',
} = JSON.parse(output);
return { parentPid: ParentProcessId, name: Name, command: CommandLine };
} else {
const command = `ps -o ppid=,command= -p ${pid}`;
const { stdout } = await execAsync(command);
const trimmedStdout = stdout.trim();
const ppidString = trimmedStdout.split(/\s+/)[0];
const parentPid = parseInt(ppidString, 10);
const fullCommand = trimmedStdout.substring(ppidString.length).trim();
const processName = path.basename(fullCommand.split(' ')[0]);
return {
parentPid: isNaN(parentPid) ? 1 : parentPid,
name: processName,
command: fullCommand,
};
try {
const platform = os.platform();
if (platform === 'win32') {
const powershellCommand = [
'$p = Get-CimInstance Win32_Process',
`-Filter 'ProcessId=${pid}'`,
'-ErrorAction SilentlyContinue;',
'if ($p) {',
'@{Name=$p.Name;ParentProcessId=$p.ParentProcessId;CommandLine=$p.CommandLine}',
'| ConvertTo-Json',
'}',
].join(' ');
const { stdout } = await execAsync(`powershell "${powershellCommand}"`);
const output = stdout.trim();
if (!output) return { parentPid: 0, name: '', command: '' };
const {
Name = '',
ParentProcessId = 0,
CommandLine = '',
} = JSON.parse(output);
return { parentPid: ParentProcessId, name: Name, command: CommandLine };
} else {
const command = `ps -o ppid=,command= -p ${pid}`;
const { stdout } = await execAsync(command);
const trimmedStdout = stdout.trim();
const ppidString = trimmedStdout.split(/\s+/)[0];
const parentPid = parseInt(ppidString, 10);
const fullCommand = trimmedStdout.substring(ppidString.length).trim();
const processName = path.basename(fullCommand.split(' ')[0]);
return {
parentPid: isNaN(parentPid) ? 1 : parentPid,
name: processName,
command: fullCommand,
};
}
} catch (_e) {
console.debug(`Failed to get process info for pid ${pid}:`, _e);
return { parentPid: 0, name: '', command: '' };
}
}
@@ -169,7 +174,6 @@ async function getIdeProcessInfoForWindows(): Promise<{
* top-level ancestor process ID and command as a fallback.
*
* @returns A promise that resolves to the PID and command of the IDE process.
* @throws Will throw an error if the underlying shell commands fail.
*/
export async function getIdeProcessInfo(): Promise<{
pid: number;
@@ -209,7 +209,7 @@ describe('ClearcutLogger', () => {
const cli_version = CLI_VERSION;
const git_commit_hash = GIT_COMMIT_INFO;
const prompt_id = 'my-prompt-123';
const user_settings = safeJsonStringify([{ smart_edit_enabled: true }]);
const user_settings = safeJsonStringify([{ smart_edit_enabled: false }]);
// Setup logger with expected values
const { logger, loggerConfig } = setup({
@@ -24,6 +24,7 @@ import type {
InvalidChunkEvent,
ContentRetryEvent,
ContentRetryFailureEvent,
ExtensionInstallEvent,
} from '../types.js';
import { EventMetadataKey } from './event-metadata-key.js';
import type { Config } from '../../config/config.js';
@@ -55,6 +56,7 @@ export enum EventNames {
INVALID_CHUNK = 'invalid_chunk',
CONTENT_RETRY = 'content_retry',
CONTENT_RETRY_FAILURE = 'content_retry_failure',
EXTENSION_INSTALL = 'extension_install',
}
export interface LogResponse {
@@ -825,6 +827,32 @@ export class ClearcutLogger {
this.flushIfNeeded();
}
logExtensionInstallEvent(event: ExtensionInstallEvent): void {
const data: EventValue[] = [
{
gemini_cli_key: EventMetadataKey.GEMINI_CLI_EXTENSION_NAME,
value: event.extension_name,
},
{
gemini_cli_key: EventMetadataKey.GEMINI_CLI_EXTENSION_VERSION,
value: event.extension_version,
},
{
gemini_cli_key: EventMetadataKey.GEMINI_CLI_EXTENSION_SOURCE,
value: event.extension_source,
},
{
gemini_cli_key: EventMetadataKey.GEMINI_CLI_EXTENSION_INSTALL_STATUS,
value: event.status,
},
];
this.enqueueLogEvent(
this.createLogEvent(EventNames.EXTENSION_INSTALL, data),
);
this.flushIfNeeded();
}
/**
* Adds default fields to data, and returns a new data array. This fields
* should exist on all log events.
@@ -331,4 +331,20 @@ export enum EventMetadataKey {
// Logs the current nodejs version
GEMINI_CLI_NODE_VERSION = 83,
// ==========================================================================
// Extension Install Event Keys
// ===========================================================================
// Logs the name of the extension.
GEMINI_CLI_EXTENSION_NAME = 85,
// Logs the version of the extension.
GEMINI_CLI_EXTENSION_VERSION = 86,
// Logs the source of the extension.
GEMINI_CLI_EXTENSION_SOURCE = 87,
// Logs the status of the extension install.
GEMINI_CLI_EXTENSION_INSTALL_STATUS = 88,
}
+25 -1
View File
@@ -517,4 +517,28 @@ export type TelemetryEvent =
| FileOperationEvent
| InvalidChunkEvent
| ContentRetryEvent
| ContentRetryFailureEvent;
| ContentRetryFailureEvent
| ExtensionInstallEvent;
export class ExtensionInstallEvent implements BaseTelemetryEvent {
'event.name': 'extension_install';
'event.timestamp': string;
extension_name: string;
extension_version: string;
extension_source: string;
status: 'success' | 'error';
constructor(
extension_name: string,
extension_version: string,
extension_source: string,
status: 'success' | 'error',
) {
this['event.name'] = 'extension_install';
this['event.timestamp'] = new Date().toISOString();
this.extension_name = extension_name;
this.extension_version = extension_version;
this.extension_source = extension_source;
this.status = status;
}
}