mirror of
https://github.com/google-gemini/gemini-cli.git
synced 2026-03-11 22:51:00 -07:00
fix(core): set temperature to 1 on retry in sendMessageStream (#10866)
This commit is contained in:
@@ -864,6 +864,72 @@ describe('GeminiChat', () => {
|
||||
expect(uiTelemetryService.setLastPromptTokenCount).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should set temperature to 1 on retry', async () => {
|
||||
// Use mockImplementationOnce to provide a fresh, promise-wrapped generator for each attempt.
|
||||
vi.mocked(mockContentGenerator.generateContentStream)
|
||||
.mockImplementationOnce(async () =>
|
||||
// First call returns an invalid stream
|
||||
(async function* () {
|
||||
yield {
|
||||
candidates: [{ content: { parts: [{ text: '' }] } }], // Invalid empty text part
|
||||
} as unknown as GenerateContentResponse;
|
||||
})(),
|
||||
)
|
||||
.mockImplementationOnce(async () =>
|
||||
// Second call returns a valid stream
|
||||
(async function* () {
|
||||
yield {
|
||||
candidates: [
|
||||
{
|
||||
content: { parts: [{ text: 'Successful response' }] },
|
||||
finishReason: 'STOP',
|
||||
},
|
||||
],
|
||||
} as unknown as GenerateContentResponse;
|
||||
})(),
|
||||
);
|
||||
|
||||
const stream = await chat.sendMessageStream(
|
||||
'test-model',
|
||||
{ message: 'test', config: { temperature: 0.5 } },
|
||||
'prompt-id-retry-temperature',
|
||||
);
|
||||
|
||||
for await (const _ of stream) {
|
||||
// consume stream
|
||||
}
|
||||
|
||||
expect(mockContentGenerator.generateContentStream).toHaveBeenCalledTimes(
|
||||
2,
|
||||
);
|
||||
|
||||
// First call should have original temperature
|
||||
expect(
|
||||
mockContentGenerator.generateContentStream,
|
||||
).toHaveBeenNthCalledWith(
|
||||
1,
|
||||
expect.objectContaining({
|
||||
config: expect.objectContaining({
|
||||
temperature: 0.5,
|
||||
}),
|
||||
}),
|
||||
'prompt-id-retry-temperature',
|
||||
);
|
||||
|
||||
// Second call (retry) should have temperature 1
|
||||
expect(
|
||||
mockContentGenerator.generateContentStream,
|
||||
).toHaveBeenNthCalledWith(
|
||||
2,
|
||||
expect.objectContaining({
|
||||
config: expect.objectContaining({
|
||||
temperature: 1,
|
||||
}),
|
||||
}),
|
||||
'prompt-id-retry-temperature',
|
||||
);
|
||||
});
|
||||
|
||||
it('should fail after all retries on persistent invalid content and report metrics', async () => {
|
||||
vi.mocked(mockContentGenerator.generateContentStream).mockImplementation(
|
||||
async () =>
|
||||
|
||||
@@ -273,10 +273,19 @@ export class GeminiChat {
|
||||
yield { type: StreamEventType.RETRY };
|
||||
}
|
||||
|
||||
// If this is a retry, set temperature to 1 to encourage different output.
|
||||
const currentParams = { ...params };
|
||||
if (attempt > 0) {
|
||||
currentParams.config = {
|
||||
...currentParams.config,
|
||||
temperature: 1,
|
||||
};
|
||||
}
|
||||
|
||||
const stream = await self.makeApiCallAndProcessStream(
|
||||
model,
|
||||
requestContents,
|
||||
params,
|
||||
currentParams,
|
||||
prompt_id,
|
||||
);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user