mirror of
https://github.com/google-gemini/gemini-cli.git
synced 2026-04-28 14:04:41 -07:00
Fix(chat): Finalize next speaker detection logic
- Enhance `checkNextSpeaker` to handle cases where the last message is a function response or an empty model message. - If the last message is a function response, the model should speak next. - If the last message is an empty model message, the model should speak next. - This ensures more robust and accurate determination of the next speaker in the conversation, completing the fix for the issue. - Updated tests. Fixes https://github.com/google-gemini/gemini-cli/issues/551
This commit is contained in:
committed by
N. Taylor Mullen
parent
c92d4edb89
commit
9e1cfca53f
@@ -7,6 +7,7 @@
|
||||
import { Content, SchemaUnion, Type } from '@google/genai';
|
||||
import { GeminiClient } from '../core/client.js';
|
||||
import { GeminiChat } from '../core/geminiChat.js';
|
||||
import { isFunctionResponse } from './messageInspectors.js';
|
||||
|
||||
const CHECK_PROMPT = `Analyze *only* the content and structure of your immediately preceding response (your last turn in the conversation history). Based *strictly* on that response, determine who should logically speak next: the 'user' or the 'model' (you).
|
||||
**Decision Rules (apply in order):**
|
||||
@@ -65,17 +66,62 @@ export async function checkNextSpeaker(
|
||||
// that when passed back up to the endpoint will break subsequent calls. An example of this is when the model decides
|
||||
// to respond with an empty part collection if you were to send that message back to the server it will respond with
|
||||
// a 400 indicating that model part collections MUST have content.
|
||||
const history = await chat.getHistory(/* curated */ true);
|
||||
const curatedHistory = chat.getHistory(/* curated */ true);
|
||||
|
||||
// Ensure there's a model response to analyze
|
||||
if (history.length === 0 || history[history.length - 1].role !== 'model') {
|
||||
if (curatedHistory.length === 0) {
|
||||
// Cannot determine next speaker if history is empty.
|
||||
return null;
|
||||
}
|
||||
|
||||
const comprehensiveHistory = chat.getHistory();
|
||||
// If comprehensiveHistory is empty, there is no last message to check.
|
||||
// This case should ideally be caught by the curatedHistory.length check earlier,
|
||||
// but as a safeguard:
|
||||
if (comprehensiveHistory.length === 0) {
|
||||
return null;
|
||||
}
|
||||
const lastComprehensiveMessage =
|
||||
comprehensiveHistory[comprehensiveHistory.length - 1];
|
||||
|
||||
// If the last message is a user message containing only function_responses,
|
||||
// then the model should speak next.
|
||||
if (
|
||||
lastComprehensiveMessage &&
|
||||
isFunctionResponse(lastComprehensiveMessage)
|
||||
) {
|
||||
return {
|
||||
reasoning:
|
||||
'The last message was a function response, so the model should speak next.',
|
||||
next_speaker: 'model',
|
||||
};
|
||||
}
|
||||
|
||||
if (
|
||||
lastComprehensiveMessage &&
|
||||
lastComprehensiveMessage.role === 'model' &&
|
||||
lastComprehensiveMessage.parts &&
|
||||
lastComprehensiveMessage.parts.length === 0
|
||||
) {
|
||||
lastComprehensiveMessage.parts.push({ text: '' });
|
||||
return {
|
||||
reasoning:
|
||||
'The last message was a filler model message with no content (nothing for user to act on), model should speak next.',
|
||||
next_speaker: 'model',
|
||||
};
|
||||
}
|
||||
|
||||
// Things checked out. Lets proceed to potentially making an LLM request.
|
||||
|
||||
const lastMessage = curatedHistory[curatedHistory.length - 1];
|
||||
if (!lastMessage || lastMessage.role !== 'model') {
|
||||
// Cannot determine next speaker if the last turn wasn't from the model
|
||||
// or if history is empty.
|
||||
return null;
|
||||
}
|
||||
|
||||
const contents: Content[] = [
|
||||
...history,
|
||||
...curatedHistory,
|
||||
{ role: 'user', parts: [{ text: CHECK_PROMPT }] },
|
||||
];
|
||||
|
||||
|
||||
Reference in New Issue
Block a user