mirror of
https://github.com/google-gemini/gemini-cli.git
synced 2026-04-07 11:51:14 -07:00
fix(core): handle partial llm_request in BeforeModel hook override (#22326)
This commit is contained in:
@@ -121,6 +121,56 @@ describe('HookTranslator', () => {
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it('should apply model override when hook returns only model field', () => {
|
||||
const baseRequest: GenerateContentParameters = {
|
||||
model: 'gemini-2.5-flash-lite',
|
||||
contents: [
|
||||
{
|
||||
role: 'user',
|
||||
parts: [{ text: 'Hello' }],
|
||||
},
|
||||
],
|
||||
} as unknown as GenerateContentParameters;
|
||||
|
||||
// Simulate a hook that only overrides the model — no messages field
|
||||
const hookRequest = {
|
||||
model: 'gemini-2.5-flash',
|
||||
} as unknown as LLMRequest;
|
||||
|
||||
const sdkRequest = translator.fromHookLLMRequest(
|
||||
hookRequest,
|
||||
baseRequest,
|
||||
);
|
||||
|
||||
// Model should be overridden
|
||||
expect(sdkRequest.model).toBe('gemini-2.5-flash');
|
||||
// Original conversation contents should be preserved
|
||||
expect(sdkRequest.contents).toEqual(baseRequest.contents);
|
||||
});
|
||||
|
||||
it('should preserve base request contents when hook messages is undefined', () => {
|
||||
const baseRequest: GenerateContentParameters = {
|
||||
model: 'gemini-1.5-flash',
|
||||
contents: [
|
||||
{ role: 'user', parts: [{ text: 'original message' }] },
|
||||
{ role: 'model', parts: [{ text: 'original reply' }] },
|
||||
],
|
||||
} as unknown as GenerateContentParameters;
|
||||
|
||||
const hookRequest = {
|
||||
model: 'gemini-1.5-pro',
|
||||
// messages intentionally omitted
|
||||
} as unknown as LLMRequest;
|
||||
|
||||
const sdkRequest = translator.fromHookLLMRequest(
|
||||
hookRequest,
|
||||
baseRequest,
|
||||
);
|
||||
|
||||
expect(sdkRequest.model).toBe('gemini-1.5-pro');
|
||||
expect(sdkRequest.contents).toEqual(baseRequest.contents);
|
||||
});
|
||||
});
|
||||
|
||||
describe('LLM Response Translation', () => {
|
||||
|
||||
@@ -225,23 +225,30 @@ export class HookTranslatorGenAIv1 extends HookTranslator {
|
||||
hookRequest: LLMRequest,
|
||||
baseRequest?: GenerateContentParameters,
|
||||
): GenerateContentParameters {
|
||||
// Convert hook messages back to SDK Content format
|
||||
const contents = hookRequest.messages.map((message) => ({
|
||||
role: message.role === 'model' ? 'model' : message.role,
|
||||
parts: [
|
||||
{
|
||||
text:
|
||||
typeof message.content === 'string'
|
||||
? message.content
|
||||
: String(message.content),
|
||||
},
|
||||
],
|
||||
}));
|
||||
// Convert hook messages back to SDK Content format.
|
||||
// If the hook returned a partial request without messages (e.g. only
|
||||
// overriding `model`), fall back to the base request's contents so the
|
||||
// conversation is preserved.
|
||||
const contents = hookRequest.messages
|
||||
? hookRequest.messages.map((message) => ({
|
||||
role: message.role === 'model' ? 'model' : message.role,
|
||||
parts: [
|
||||
{
|
||||
text:
|
||||
typeof message.content === 'string'
|
||||
? message.content
|
||||
: String(message.content),
|
||||
},
|
||||
],
|
||||
}))
|
||||
: (baseRequest?.contents ?? []);
|
||||
|
||||
// Build the result with proper typing
|
||||
// Build the result with proper typing.
|
||||
// Use nullish coalescing so a hook that only sets `model` still works --
|
||||
// fall back to the base request's model rather than overwriting with undefined.
|
||||
const result: GenerateContentParameters = {
|
||||
...baseRequest,
|
||||
model: hookRequest.model,
|
||||
model: hookRequest.model ?? baseRequest?.model ?? '',
|
||||
contents,
|
||||
};
|
||||
|
||||
|
||||
Reference in New Issue
Block a user