next batch

This commit is contained in:
Your Name
2026-04-08 20:58:03 +00:00
parent 9287159ccc
commit 0df3521032
9 changed files with 61 additions and 40 deletions
+6 -7
View File
@@ -112,7 +112,7 @@ function parseToolResponses(
currentEpisode: Partial<Episode> | null,
pendingCallParts: Map<string, Part>,
tokenCalculator: ContextTokenCalculator,
createMetadata: (parts: Part[]) => IrMetadata,
_createMetadata: (parts: Part[]) => IrMetadata,
): Partial<Episode> {
if (!currentEpisode) {
currentEpisode = {
@@ -164,7 +164,7 @@ function parseToolResponses(
function parseUserParts(
msg: Content,
createMetadata: (parts: Part[]) => IrMetadata,
_createMetadata: (parts: Part[]) => IrMetadata,
): Partial<Episode> {
const semanticParts: SemanticPart[] = [];
for (const p of msg.parts!) {
@@ -190,7 +190,7 @@ function parseUserParts(
id: getStableId(msg.parts![0] || msg),
type: 'USER_PROMPT',
semanticParts,
metadata: createMetadata(msg.parts!.filter((p) => !p.functionResponse)),
metadata: _createMetadata(msg.parts!.filter((p) => !p.functionResponse)),
};
return {
@@ -204,7 +204,7 @@ function parseModelParts(
msg: Content,
currentEpisode: Partial<Episode> | null,
pendingCallParts: Map<string, Part>,
createMetadata: (parts: Part[]) => IrMetadata,
_createMetadata: (parts: Part[]) => IrMetadata,
): Partial<Episode> {
if (!currentEpisode) {
currentEpisode = {
@@ -223,7 +223,7 @@ function parseModelParts(
id: getStableId(part),
type: 'AGENT_THOUGHT',
text: part.text,
metadata: createMetadata([part]),
metadata: _createMetadata([part]),
};
currentEpisode.concreteNodes = [
...(currentEpisode.concreteNodes || []),
@@ -246,8 +246,7 @@ function finalizeYield(currentEpisode: Partial<Episode>) {
transformations: [],
},
};
const existingNodes =
currentEpisode.concreteNodes as Array<import('./types.js').ConcreteNode>;
const existingNodes = currentEpisode.concreteNodes || [];
currentEpisode.concreteNodes = [...existingNodes, yieldNode];
}
}
@@ -16,7 +16,7 @@ describe('BlobDegradationProcessor', () => {
it('should ignore text parts and only target inline_data and file_data', async () => {
const env = createMockEnvironment();
// Simulate each part costing 100 tokens, but text costing 10 tokens
env.tokenCalculator.estimateTokensForParts = vi.fn((parts: any[]) => {
env.tokenCalculator.estimateTokensForParts = vi.fn((parts: import('@google/genai').Part[]) => {
if (parts[0].text) return 10;
return 100;
});
@@ -39,10 +39,10 @@ describe('BlobDegradationProcessor', () => {
const targets = [prompt];
const result = await processor.process({
buffer: {} as any,
buffer: {} as unknown as import('../pipeline.js').ContextWorkingBuffer,
targets,
state,
inbox: {} as any,
inbox: {} as unknown as import('../pipeline.js').InboxSnapshot,
});
// We modified it, so the ID should change and it should have new metadata
@@ -72,7 +72,7 @@ describe('BlobDegradationProcessor', () => {
// Huge deficit requires one degradation
const state = createDummyState(false, 90);
env.tokenCalculator.estimateTokensForParts = vi.fn((parts: any[]) => {
env.tokenCalculator.estimateTokensForParts = vi.fn((parts: import('@google/genai').Part[]) => {
if (parts[0].text) return 10;
return 100; // saving 90 tokens per degradation
});
@@ -89,10 +89,10 @@ describe('BlobDegradationProcessor', () => {
const targets = [prompt];
const result = await processor.process({
buffer: {} as any,
buffer: {} as unknown as import('../pipeline.js').ContextWorkingBuffer,
targets,
state,
inbox: {} as any,
inbox: {} as unknown as import('../pipeline.js').InboxSnapshot,
});
const modifiedPrompt = result[0] as UserPrompt;
@@ -118,10 +118,10 @@ describe('BlobDegradationProcessor', () => {
const targets = [prompt];
const result = await processor.process({
buffer: {} as any,
buffer: {} as unknown as import('../pipeline.js').ContextWorkingBuffer,
targets,
state,
inbox: {} as any,
inbox: {} as unknown as import('../pipeline.js').InboxSnapshot,
});
// Should return the exact array ref
@@ -51,7 +51,7 @@ describe('HistorySquashingProcessor', () => {
const targets = [prompt, thought, yieldNode];
const result = await processor.process({
buffer: {} as any,
buffer: {} as unknown as import('../pipeline.js').ContextWorkingBuffer,
targets,
state,
inbox: {} as any,
@@ -114,7 +114,7 @@ describe('HistorySquashingProcessor', () => {
const targets = [prompt, thought];
const result = await processor.process({
buffer: {} as any,
buffer: {} as unknown as import('../pipeline.js').ContextWorkingBuffer,
targets,
state,
inbox: {} as any,
@@ -6,7 +6,7 @@
import type { ContextProcessor, ProcessArgs } from '../pipeline.js';
import type { ContextEnvironment } from '../sidecar/environment.js';
import { truncateProportionally } from '../truncation.js';
import type { ConcreteNode, UserPrompt, AgentThought, AgentYield } from '../ir/types.js';
import type { ConcreteNode } from '../ir/types.js';
export interface HistorySquashingProcessorOptions {
maxTokensPerNode: number;
@@ -109,7 +109,14 @@ export class HistorySquashingProcessor implements ContextProcessor {
}
if (modified) {
const newTokens = this.env.tokenCalculator.estimateTokensForParts(newParts as any);
const newTokens = this.env.tokenCalculator.estimateTokensForParts(
newParts.map(p => {
if (p.type === 'text') return { text: p.text };
if (p.type === 'inline_data') return { inlineData: { mimeType: p.mimeType, data: p.data } };
if (p.type === 'file_data') return { fileData: { mimeType: p.mimeType, fileUri: p.fileUri } };
return (p as Extract<import('../ir/types.js').SemanticPart, { type: 'raw_part' }>).part;
})
);
returnedNodes.push({
...prompt,
id: this.env.idGenerator.generateId(),
@@ -60,7 +60,7 @@ describe('SemanticCompressionProcessor', () => {
const targets = [prompt, thought, tool];
const result = await processor.process({
buffer: {} as any,
buffer: {} as unknown as import('../pipeline.js').ContextWorkingBuffer,
targets,
state,
inbox: {} as any,
@@ -136,7 +136,7 @@ describe('SemanticCompressionProcessor', () => {
const targets = [prompt, thought];
const result = await processor.process({
buffer: {} as any,
buffer: {} as unknown as import('../pipeline.js').ContextWorkingBuffer,
targets,
state,
inbox: {} as any,
@@ -4,10 +4,10 @@
* SPDX-License-Identifier: Apache-2.0
*/
import type { ContextProcessor, ProcessArgs } from '../pipeline.js';
import type { ConcreteNode } from '../ir/types.js';
import type { ContextEnvironment } from '../sidecar/environment.js';
import { debugLogger } from '../../utils/debugLogger.js';
import { getResponseText } from '../../utils/partUtils.js';
import type { ConcreteNode, UserPrompt, AgentThought, ToolExecution } from '../ir/types.js';
export interface SemanticCompressionProcessorOptions {
nodeThresholdTokens: number;
@@ -52,13 +52,13 @@ export class SemanticCompressionProcessor implements ContextProcessor {
try {
const response = await this.env.llmClient.generateContent(
{
role: 'user' as any,
modelConfigKey: 'default' as any,
role: 'utility_compressor' as import('../../telemetry/llmRole.js').LlmRole,
modelConfigKey: { model: 'default' },
promptId: this.env.promptId,
abortSignal: new AbortController().signal,
contents: [
{
role: 'user' as any,
role: 'user',
parts: [{ text }],
},
],
@@ -123,7 +123,14 @@ export class SemanticCompressionProcessor implements ContextProcessor {
}
if (modified) {
const newTokens = this.env.tokenCalculator.estimateTokensForParts(newParts as any);
const newTokens = this.env.tokenCalculator.estimateTokensForParts(
newParts.map(p => {
if (p.type === 'text') return { text: p.text };
if (p.type === 'inline_data') return { inlineData: { mimeType: p.mimeType, data: p.data } };
if (p.type === 'file_data') return { fileData: { mimeType: p.mimeType, fileUri: p.fileUri } };
return (p as Extract<import('../ir/types.js').SemanticPart, { type: 'raw_part' }>).part;
})
);
returnedNodes.push({
...prompt,
id: this.env.idGenerator.generateId(),
@@ -150,7 +157,6 @@ export class SemanticCompressionProcessor implements ContextProcessor {
const summary = await this.generateSummary(thought.text, 'Agent Thought');
const newTokens = this.env.tokenCalculator.estimateTokensForParts([{ text: summary }]);
const oldTokens = thought.metadata.currentTokens;
console.log(`Agent Thought compression: newTokens=${newTokens}, oldTokens=${oldTokens}`);
if (newTokens < oldTokens) {
currentDeficit -= (oldTokens - newTokens);
@@ -20,7 +20,7 @@ describe('StateSnapshotProcessor', () => {
const targets = [createDummyNode('ep1', 'USER_PROMPT')];
const inbox = new InboxSnapshotImpl([]);
const result = await processor.process({ buffer: {} as any, targets, state, inbox });
const result = await processor.process({ buffer: {} as unknown as import('../pipeline.js').ContextWorkingBuffer, targets, state, inbox });
expect(result).toBe(targets); // Strict equality
});
@@ -48,12 +48,12 @@ describe('StateSnapshotProcessor', () => {
}
]);
const result = await processor.process({ buffer: {} as any, targets, state, inbox });
const result = await processor.process({ buffer: {} as unknown as import('../pipeline.js').ContextWorkingBuffer, targets, state, inbox });
// Should remove A and B, insert Snapshot, keep C
expect(result.length).toBe(2);
expect(result[0].type).toBe('SNAPSHOT');
expect((result[0] as any).text).toBe('<compressed A and B>');
expect((result[0] as Snapshot).text).toBe('<compressed A and B>');
expect(result[1].id).toBe('node-C');
// Should consume the message
@@ -82,7 +82,7 @@ describe('StateSnapshotProcessor', () => {
}
]);
const result = await processor.process({ buffer: {} as any, targets, state, inbox });
const result = await processor.process({ buffer: {} as unknown as import('../pipeline.js').ContextWorkingBuffer, targets, state, inbox });
// Because deficit is 0, and Inbox was rejected, nothing should change
expect(result.length).toBe(1);
@@ -101,12 +101,12 @@ describe('StateSnapshotProcessor', () => {
const targets = [nodeA, nodeB, nodeC];
const inbox = new InboxSnapshotImpl([]);
const result = await processor.process({ buffer: {} as any, targets, state, inbox });
const result = await processor.process({ buffer: {} as unknown as import('../pipeline.js').ContextWorkingBuffer, targets, state, inbox });
// Should synthesize a new snapshot synchronously
expect(env.llmClient.generateContent).toHaveBeenCalled();
expect(result.length).toBe(2); // nodeA is skipped as "system prompt", snapshot + nodeA
expect(result[1].type).toBe('SNAPSHOT');
expect((result[1] as any).text).toBe('Mock LLM summary response');
expect((result[1] as Snapshot).text).toBe('Mock LLM summary response');
});
});
@@ -37,7 +37,7 @@ export class StateSnapshotProcessor implements ContextProcessor, ContextWorker {
}
// --- ContextWorker Interface (Proactive Accumulation) ---
async execute({ targets, inbox }: { targets: readonly ConcreteNode[]; inbox: import('../pipeline.js').InboxSnapshot }): Promise<void> {
async execute({ targets: _targets, inbox: _inbox }: { targets: readonly ConcreteNode[]; inbox: import('../pipeline.js').InboxSnapshot }): Promise<void> {
// We only care about nodes that have aged out past retainedTokens
// To calculate this precisely, we'd need the ContextAccountingState, but for V0
@@ -187,13 +187,22 @@ Output ONLY the raw factual snapshot, formatted compactly. Do not include markdo
let userPromptText = 'TRANSCRIPT TO SNAPSHOT:\n\n';
for (const node of nodes) {
userPromptText += `[${node.type}]: ${(node as any).text || JSON.stringify((node as any).semanticParts)}\n`;
let nodeContent = '';
if ('text' in node && typeof node.text === 'string') {
nodeContent = node.text;
} else if ('semanticParts' in node) {
nodeContent = JSON.stringify(node.semanticParts);
} else if ('observation' in node) {
nodeContent = typeof node.observation === 'string' ? node.observation : JSON.stringify(node.observation);
}
userPromptText += `[${node.type}]: ${nodeContent}\n`;
}
const response = await this.env.llmClient.generateContent({
role: 'user' as any,
modelConfigKey: 'default' as any,
contents: [{ role: 'user' as any, parts: [{ text: userPromptText }] }],
role: 'utility_state_snapshot_processr' as import('../../telemetry/llmRole.js').LlmRole,
modelConfigKey: { model: 'default' },
contents: [{ role: 'user', parts: [{ text: userPromptText }] }],
systemInstruction: { role: 'system', parts: [{ text: systemPrompt }] },
promptId: this.env.promptId,
abortSignal: new AbortController().signal,
@@ -38,7 +38,7 @@ describe('ToolMaskingProcessor', () => {
});
const result = await processor.process({
buffer: {} as any,
buffer: {} as unknown as import('../pipeline.js').ContextWorkingBuffer,
targets: [toolStep],
state,
inbox: {} as any,
@@ -78,7 +78,7 @@ describe('ToolMaskingProcessor', () => {
});
const result = await processor.process({
buffer: {} as any,
buffer: {} as unknown as import('../pipeline.js').ContextWorkingBuffer,
targets: [toolStep],
state,
inbox: {} as any,