diff --git a/packages/core/src/context/ir/toIr.ts b/packages/core/src/context/ir/toIr.ts index 4c8b2b747c..c409d60b26 100644 --- a/packages/core/src/context/ir/toIr.ts +++ b/packages/core/src/context/ir/toIr.ts @@ -112,7 +112,7 @@ function parseToolResponses( currentEpisode: Partial | null, pendingCallParts: Map, tokenCalculator: ContextTokenCalculator, - createMetadata: (parts: Part[]) => IrMetadata, + _createMetadata: (parts: Part[]) => IrMetadata, ): Partial { if (!currentEpisode) { currentEpisode = { @@ -164,7 +164,7 @@ function parseToolResponses( function parseUserParts( msg: Content, - createMetadata: (parts: Part[]) => IrMetadata, + _createMetadata: (parts: Part[]) => IrMetadata, ): Partial { const semanticParts: SemanticPart[] = []; for (const p of msg.parts!) { @@ -190,7 +190,7 @@ function parseUserParts( id: getStableId(msg.parts![0] || msg), type: 'USER_PROMPT', semanticParts, - metadata: createMetadata(msg.parts!.filter((p) => !p.functionResponse)), + metadata: _createMetadata(msg.parts!.filter((p) => !p.functionResponse)), }; return { @@ -204,7 +204,7 @@ function parseModelParts( msg: Content, currentEpisode: Partial | null, pendingCallParts: Map, - createMetadata: (parts: Part[]) => IrMetadata, + _createMetadata: (parts: Part[]) => IrMetadata, ): Partial { if (!currentEpisode) { currentEpisode = { @@ -223,7 +223,7 @@ function parseModelParts( id: getStableId(part), type: 'AGENT_THOUGHT', text: part.text, - metadata: createMetadata([part]), + metadata: _createMetadata([part]), }; currentEpisode.concreteNodes = [ ...(currentEpisode.concreteNodes || []), @@ -246,8 +246,7 @@ function finalizeYield(currentEpisode: Partial) { transformations: [], }, }; - const existingNodes = - currentEpisode.concreteNodes as Array; + const existingNodes = currentEpisode.concreteNodes || []; currentEpisode.concreteNodes = [...existingNodes, yieldNode]; } } diff --git a/packages/core/src/context/processors/blobDegradationProcessor.test.ts b/packages/core/src/context/processors/blobDegradationProcessor.test.ts index 09d538a722..b36c6abca9 100644 --- a/packages/core/src/context/processors/blobDegradationProcessor.test.ts +++ b/packages/core/src/context/processors/blobDegradationProcessor.test.ts @@ -16,7 +16,7 @@ describe('BlobDegradationProcessor', () => { it('should ignore text parts and only target inline_data and file_data', async () => { const env = createMockEnvironment(); // Simulate each part costing 100 tokens, but text costing 10 tokens - env.tokenCalculator.estimateTokensForParts = vi.fn((parts: any[]) => { + env.tokenCalculator.estimateTokensForParts = vi.fn((parts: import('@google/genai').Part[]) => { if (parts[0].text) return 10; return 100; }); @@ -39,10 +39,10 @@ describe('BlobDegradationProcessor', () => { const targets = [prompt]; const result = await processor.process({ - buffer: {} as any, + buffer: {} as unknown as import('../pipeline.js').ContextWorkingBuffer, targets, state, - inbox: {} as any, + inbox: {} as unknown as import('../pipeline.js').InboxSnapshot, }); // We modified it, so the ID should change and it should have new metadata @@ -72,7 +72,7 @@ describe('BlobDegradationProcessor', () => { // Huge deficit requires one degradation const state = createDummyState(false, 90); - env.tokenCalculator.estimateTokensForParts = vi.fn((parts: any[]) => { + env.tokenCalculator.estimateTokensForParts = vi.fn((parts: import('@google/genai').Part[]) => { if (parts[0].text) return 10; return 100; // saving 90 tokens per degradation }); @@ -89,10 +89,10 @@ describe('BlobDegradationProcessor', () => { const targets = [prompt]; const result = await processor.process({ - buffer: {} as any, + buffer: {} as unknown as import('../pipeline.js').ContextWorkingBuffer, targets, state, - inbox: {} as any, + inbox: {} as unknown as import('../pipeline.js').InboxSnapshot, }); const modifiedPrompt = result[0] as UserPrompt; @@ -118,10 +118,10 @@ describe('BlobDegradationProcessor', () => { const targets = [prompt]; const result = await processor.process({ - buffer: {} as any, + buffer: {} as unknown as import('../pipeline.js').ContextWorkingBuffer, targets, state, - inbox: {} as any, + inbox: {} as unknown as import('../pipeline.js').InboxSnapshot, }); // Should return the exact array ref diff --git a/packages/core/src/context/processors/historySquashingProcessor.test.ts b/packages/core/src/context/processors/historySquashingProcessor.test.ts index ec7afe07a8..8a756bdba8 100644 --- a/packages/core/src/context/processors/historySquashingProcessor.test.ts +++ b/packages/core/src/context/processors/historySquashingProcessor.test.ts @@ -51,7 +51,7 @@ describe('HistorySquashingProcessor', () => { const targets = [prompt, thought, yieldNode]; const result = await processor.process({ - buffer: {} as any, + buffer: {} as unknown as import('../pipeline.js').ContextWorkingBuffer, targets, state, inbox: {} as any, @@ -114,7 +114,7 @@ describe('HistorySquashingProcessor', () => { const targets = [prompt, thought]; const result = await processor.process({ - buffer: {} as any, + buffer: {} as unknown as import('../pipeline.js').ContextWorkingBuffer, targets, state, inbox: {} as any, diff --git a/packages/core/src/context/processors/historySquashingProcessor.ts b/packages/core/src/context/processors/historySquashingProcessor.ts index e390048f91..5abc281214 100644 --- a/packages/core/src/context/processors/historySquashingProcessor.ts +++ b/packages/core/src/context/processors/historySquashingProcessor.ts @@ -6,7 +6,7 @@ import type { ContextProcessor, ProcessArgs } from '../pipeline.js'; import type { ContextEnvironment } from '../sidecar/environment.js'; import { truncateProportionally } from '../truncation.js'; -import type { ConcreteNode, UserPrompt, AgentThought, AgentYield } from '../ir/types.js'; +import type { ConcreteNode } from '../ir/types.js'; export interface HistorySquashingProcessorOptions { maxTokensPerNode: number; @@ -109,7 +109,14 @@ export class HistorySquashingProcessor implements ContextProcessor { } if (modified) { - const newTokens = this.env.tokenCalculator.estimateTokensForParts(newParts as any); + const newTokens = this.env.tokenCalculator.estimateTokensForParts( + newParts.map(p => { + if (p.type === 'text') return { text: p.text }; + if (p.type === 'inline_data') return { inlineData: { mimeType: p.mimeType, data: p.data } }; + if (p.type === 'file_data') return { fileData: { mimeType: p.mimeType, fileUri: p.fileUri } }; + return (p as Extract).part; + }) + ); returnedNodes.push({ ...prompt, id: this.env.idGenerator.generateId(), diff --git a/packages/core/src/context/processors/semanticCompressionProcessor.test.ts b/packages/core/src/context/processors/semanticCompressionProcessor.test.ts index dc482d8653..68411d1a6a 100644 --- a/packages/core/src/context/processors/semanticCompressionProcessor.test.ts +++ b/packages/core/src/context/processors/semanticCompressionProcessor.test.ts @@ -60,7 +60,7 @@ describe('SemanticCompressionProcessor', () => { const targets = [prompt, thought, tool]; const result = await processor.process({ - buffer: {} as any, + buffer: {} as unknown as import('../pipeline.js').ContextWorkingBuffer, targets, state, inbox: {} as any, @@ -136,7 +136,7 @@ describe('SemanticCompressionProcessor', () => { const targets = [prompt, thought]; const result = await processor.process({ - buffer: {} as any, + buffer: {} as unknown as import('../pipeline.js').ContextWorkingBuffer, targets, state, inbox: {} as any, diff --git a/packages/core/src/context/processors/semanticCompressionProcessor.ts b/packages/core/src/context/processors/semanticCompressionProcessor.ts index 98cd9c0ded..b376c2d62f 100644 --- a/packages/core/src/context/processors/semanticCompressionProcessor.ts +++ b/packages/core/src/context/processors/semanticCompressionProcessor.ts @@ -4,10 +4,10 @@ * SPDX-License-Identifier: Apache-2.0 */ import type { ContextProcessor, ProcessArgs } from '../pipeline.js'; +import type { ConcreteNode } from '../ir/types.js'; import type { ContextEnvironment } from '../sidecar/environment.js'; import { debugLogger } from '../../utils/debugLogger.js'; import { getResponseText } from '../../utils/partUtils.js'; -import type { ConcreteNode, UserPrompt, AgentThought, ToolExecution } from '../ir/types.js'; export interface SemanticCompressionProcessorOptions { nodeThresholdTokens: number; @@ -52,13 +52,13 @@ export class SemanticCompressionProcessor implements ContextProcessor { try { const response = await this.env.llmClient.generateContent( { - role: 'user' as any, - modelConfigKey: 'default' as any, + role: 'utility_compressor' as import('../../telemetry/llmRole.js').LlmRole, + modelConfigKey: { model: 'default' }, promptId: this.env.promptId, abortSignal: new AbortController().signal, contents: [ { - role: 'user' as any, + role: 'user', parts: [{ text }], }, ], @@ -123,7 +123,14 @@ export class SemanticCompressionProcessor implements ContextProcessor { } if (modified) { - const newTokens = this.env.tokenCalculator.estimateTokensForParts(newParts as any); + const newTokens = this.env.tokenCalculator.estimateTokensForParts( + newParts.map(p => { + if (p.type === 'text') return { text: p.text }; + if (p.type === 'inline_data') return { inlineData: { mimeType: p.mimeType, data: p.data } }; + if (p.type === 'file_data') return { fileData: { mimeType: p.mimeType, fileUri: p.fileUri } }; + return (p as Extract).part; + }) + ); returnedNodes.push({ ...prompt, id: this.env.idGenerator.generateId(), @@ -150,7 +157,6 @@ export class SemanticCompressionProcessor implements ContextProcessor { const summary = await this.generateSummary(thought.text, 'Agent Thought'); const newTokens = this.env.tokenCalculator.estimateTokensForParts([{ text: summary }]); const oldTokens = thought.metadata.currentTokens; - console.log(`Agent Thought compression: newTokens=${newTokens}, oldTokens=${oldTokens}`); if (newTokens < oldTokens) { currentDeficit -= (oldTokens - newTokens); diff --git a/packages/core/src/context/processors/stateSnapshotProcessor.test.ts b/packages/core/src/context/processors/stateSnapshotProcessor.test.ts index c795361f4e..1773e164f5 100644 --- a/packages/core/src/context/processors/stateSnapshotProcessor.test.ts +++ b/packages/core/src/context/processors/stateSnapshotProcessor.test.ts @@ -20,7 +20,7 @@ describe('StateSnapshotProcessor', () => { const targets = [createDummyNode('ep1', 'USER_PROMPT')]; const inbox = new InboxSnapshotImpl([]); - const result = await processor.process({ buffer: {} as any, targets, state, inbox }); + const result = await processor.process({ buffer: {} as unknown as import('../pipeline.js').ContextWorkingBuffer, targets, state, inbox }); expect(result).toBe(targets); // Strict equality }); @@ -48,12 +48,12 @@ describe('StateSnapshotProcessor', () => { } ]); - const result = await processor.process({ buffer: {} as any, targets, state, inbox }); + const result = await processor.process({ buffer: {} as unknown as import('../pipeline.js').ContextWorkingBuffer, targets, state, inbox }); // Should remove A and B, insert Snapshot, keep C expect(result.length).toBe(2); expect(result[0].type).toBe('SNAPSHOT'); - expect((result[0] as any).text).toBe(''); + expect((result[0] as Snapshot).text).toBe(''); expect(result[1].id).toBe('node-C'); // Should consume the message @@ -82,7 +82,7 @@ describe('StateSnapshotProcessor', () => { } ]); - const result = await processor.process({ buffer: {} as any, targets, state, inbox }); + const result = await processor.process({ buffer: {} as unknown as import('../pipeline.js').ContextWorkingBuffer, targets, state, inbox }); // Because deficit is 0, and Inbox was rejected, nothing should change expect(result.length).toBe(1); @@ -101,12 +101,12 @@ describe('StateSnapshotProcessor', () => { const targets = [nodeA, nodeB, nodeC]; const inbox = new InboxSnapshotImpl([]); - const result = await processor.process({ buffer: {} as any, targets, state, inbox }); + const result = await processor.process({ buffer: {} as unknown as import('../pipeline.js').ContextWorkingBuffer, targets, state, inbox }); // Should synthesize a new snapshot synchronously expect(env.llmClient.generateContent).toHaveBeenCalled(); expect(result.length).toBe(2); // nodeA is skipped as "system prompt", snapshot + nodeA expect(result[1].type).toBe('SNAPSHOT'); - expect((result[1] as any).text).toBe('Mock LLM summary response'); + expect((result[1] as Snapshot).text).toBe('Mock LLM summary response'); }); }); diff --git a/packages/core/src/context/processors/stateSnapshotProcessor.ts b/packages/core/src/context/processors/stateSnapshotProcessor.ts index 032fe80f1d..95f3bf357d 100644 --- a/packages/core/src/context/processors/stateSnapshotProcessor.ts +++ b/packages/core/src/context/processors/stateSnapshotProcessor.ts @@ -37,7 +37,7 @@ export class StateSnapshotProcessor implements ContextProcessor, ContextWorker { } // --- ContextWorker Interface (Proactive Accumulation) --- - async execute({ targets, inbox }: { targets: readonly ConcreteNode[]; inbox: import('../pipeline.js').InboxSnapshot }): Promise { + async execute({ targets: _targets, inbox: _inbox }: { targets: readonly ConcreteNode[]; inbox: import('../pipeline.js').InboxSnapshot }): Promise { // We only care about nodes that have aged out past retainedTokens // To calculate this precisely, we'd need the ContextAccountingState, but for V0 @@ -187,13 +187,22 @@ Output ONLY the raw factual snapshot, formatted compactly. Do not include markdo let userPromptText = 'TRANSCRIPT TO SNAPSHOT:\n\n'; for (const node of nodes) { - userPromptText += `[${node.type}]: ${(node as any).text || JSON.stringify((node as any).semanticParts)}\n`; + let nodeContent = ''; + if ('text' in node && typeof node.text === 'string') { + nodeContent = node.text; + } else if ('semanticParts' in node) { + nodeContent = JSON.stringify(node.semanticParts); + } else if ('observation' in node) { + nodeContent = typeof node.observation === 'string' ? node.observation : JSON.stringify(node.observation); + } + + userPromptText += `[${node.type}]: ${nodeContent}\n`; } const response = await this.env.llmClient.generateContent({ - role: 'user' as any, - modelConfigKey: 'default' as any, - contents: [{ role: 'user' as any, parts: [{ text: userPromptText }] }], + role: 'utility_state_snapshot_processr' as import('../../telemetry/llmRole.js').LlmRole, + modelConfigKey: { model: 'default' }, + contents: [{ role: 'user', parts: [{ text: userPromptText }] }], systemInstruction: { role: 'system', parts: [{ text: systemPrompt }] }, promptId: this.env.promptId, abortSignal: new AbortController().signal, diff --git a/packages/core/src/context/processors/toolMaskingProcessor.test.ts b/packages/core/src/context/processors/toolMaskingProcessor.test.ts index 5c09da0d1f..04e4cb60c8 100644 --- a/packages/core/src/context/processors/toolMaskingProcessor.test.ts +++ b/packages/core/src/context/processors/toolMaskingProcessor.test.ts @@ -38,7 +38,7 @@ describe('ToolMaskingProcessor', () => { }); const result = await processor.process({ - buffer: {} as any, + buffer: {} as unknown as import('../pipeline.js').ContextWorkingBuffer, targets: [toolStep], state, inbox: {} as any, @@ -78,7 +78,7 @@ describe('ToolMaskingProcessor', () => { }); const result = await processor.process({ - buffer: {} as any, + buffer: {} as unknown as import('../pipeline.js').ContextWorkingBuffer, targets: [toolStep], state, inbox: {} as any,