Merge remote-tracking branch 'origin/main' into agent-session/local-invocation

This commit is contained in:
Adam Weidman
2026-05-12 15:18:02 -04:00
44 changed files with 539 additions and 64 deletions
+41 -4
View File
@@ -85,14 +85,51 @@ module.exports = async ({ github, context, core }) => {
continue;
}
const labelsToAdd = entry.labels_to_add || [];
labelsToAdd.push('status/bot-triaged');
let labelsToAdd = entry.labels_to_add || [];
let labelsToRemove = entry.labels_to_remove || [];
labelsToRemove.push('status/need-triage');
// Deduplicate array
if (labelsToAdd.includes('status/manual-triage')) {
// If the AI flagged it for manual triage, remove bot-triaged if it exists
labelsToRemove.push('status/bot-triaged');
// Ensure we don't accidentally try to add bot-triaged if the AI returned it
labelsToAdd = labelsToAdd.filter((l) => l !== 'status/bot-triaged');
} else {
// Standard successful bot triage
labelsToAdd.push('status/bot-triaged');
}
// Deduplicate arrays
labelsToAdd = [...new Set(labelsToAdd)];
labelsToRemove = [...new Set(labelsToRemove)];
// Enforce mutually exclusive area labels
const areaLabelsToAdd = labelsToAdd.filter((l) => l.startsWith('area/'));
if (areaLabelsToAdd.length > 1) {
core.warning(
`Issue #${issueNumber} has multiple area labels to add: ${areaLabelsToAdd.join(', ')}. Keeping only the first one.`,
);
const firstArea = areaLabelsToAdd[0];
labelsToAdd = labelsToAdd.filter(
(l) => !l.startsWith('area/') || l === firstArea,
);
}
// Enforce mutually exclusive priority labels
const priorityLabelsToAdd = labelsToAdd.filter((l) =>
l.startsWith('priority/'),
);
if (priorityLabelsToAdd.length > 1) {
core.warning(
`Issue #${issueNumber} has multiple priority labels to add: ${priorityLabelsToAdd.join(', ')}. Keeping only the first one.`,
);
const firstPriority = priorityLabelsToAdd[0];
labelsToAdd = labelsToAdd.filter(
(l) => !l.startsWith('priority/') || l === firstPriority,
);
}
if (labelsToAdd.length > 0) {
await github.rest.issues.addLabels({
owner: context.repo.owner,
@@ -0,0 +1,60 @@
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
const fs = require('node:fs');
module.exports = async ({ github, context, core }) => {
core.info('Fetching open issues to check for conflicting labels...');
const issues = await github.paginate(github.rest.issues.listForRepo, {
owner: context.repo.owner,
repo: context.repo.repo,
state: 'open',
per_page: 100,
});
const conflictingLabelIssues = [];
for (const issue of issues) {
if (issue.pull_request) continue;
const areaLabels = issue.labels
.filter((l) => l.name && l.name.startsWith('area/'))
.map((l) => l.name);
const priorityLabels = issue.labels
.filter((l) => l.name && l.name.startsWith('priority/'))
.map((l) => l.name);
if (areaLabels.length > 1 || priorityLabels.length > 1) {
let message = `Issue #${issue.number} has conflicting labels:`;
if (areaLabels.length > 1)
message += ` multiple areas (${areaLabels.join(', ')}).`;
if (priorityLabels.length > 1)
message += ` multiple priorities (${priorityLabels.join(', ')}).`;
core.info(message);
conflictingLabelIssues.push({
number: issue.number,
title: issue.title,
body: issue.body || '',
});
}
}
// Limit to 50 to avoid overwhelming the AI in a single run
const issuesToProcess = conflictingLabelIssues.slice(0, 50);
fs.writeFileSync(
'conflicting_labels_issues.json',
JSON.stringify(issuesToProcess, null, 2),
);
core.info(
`Found ${conflictingLabelIssues.length} issues with conflicting labels. Wrote ${issuesToProcess.length} to conflicting_labels_issues.json`,
);
};
@@ -30,6 +30,7 @@ jobs:
uses: 'actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5' # ratchet:actions/checkout@v4
with:
ref: '${{ inputs.ref || github.ref }}'
persist-credentials: false
- name: 'Set up Node.js'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4
+4
View File
@@ -148,6 +148,7 @@ jobs:
with:
ref: '${{ needs.parse_run_context.outputs.sha }}'
repository: '${{ needs.parse_run_context.outputs.repository }}'
persist-credentials: false
- name: 'Set up Node.js ${{ matrix.node-version }}'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions-node@v4
@@ -193,6 +194,7 @@ jobs:
with:
ref: '${{ needs.parse_run_context.outputs.sha }}'
repository: '${{ needs.parse_run_context.outputs.repository }}'
persist-credentials: false
- name: 'Set up Node.js 20.x'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions-node@v4
@@ -233,6 +235,7 @@ jobs:
with:
ref: '${{ needs.parse_run_context.outputs.sha }}'
repository: '${{ needs.parse_run_context.outputs.repository }}'
persist-credentials: false
- name: 'Set up Node.js 20.x'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions-node@v4
@@ -314,6 +317,7 @@ jobs:
with:
ref: '${{ needs.parse_run_context.outputs.sha }}'
repository: '${{ needs.parse_run_context.outputs.repository }}'
persist-credentials: false
fetch-depth: 0
- name: 'Set up Node.js 20.x'
+10
View File
@@ -57,6 +57,7 @@ jobs:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
with:
persist-credentials: false
ref: '${{ github.event.inputs.branch_ref || github.ref }}'
fetch-depth: 0
@@ -130,6 +131,8 @@ jobs:
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
with:
persist-credentials: false
- name: 'Link Checker'
uses: 'lycheeverse/lychee-action@885c65f3dc543b57c898c8099f4e08c8afd178a2' # ratchet: lycheeverse/lychee-action@v2.6.1
with:
@@ -157,6 +160,8 @@ jobs:
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
with:
persist-credentials: false
- name: 'Set up Node.js ${{ matrix.node-version }}'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4
@@ -252,6 +257,8 @@ jobs:
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
with:
persist-credentials: false
- name: 'Set up Node.js ${{ matrix.node-version }}'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4
@@ -339,6 +346,7 @@ jobs:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
with:
persist-credentials: false
ref: '${{ github.event.inputs.branch_ref || github.ref }}'
- name: 'Initialize CodeQL'
@@ -363,6 +371,7 @@ jobs:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
with:
persist-credentials: false
ref: '${{ github.event.inputs.branch_ref || github.ref }}'
fetch-depth: 1
@@ -390,6 +399,7 @@ jobs:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
with:
persist-credentials: false
ref: '${{ github.event.inputs.branch_ref || github.ref }}'
- name: 'Set up Node.js 20.x'
+3
View File
@@ -43,6 +43,7 @@ jobs:
with:
ref: '${{ github.event.pull_request.head.sha }}'
repository: '${{ github.repository }}'
persist-credentials: false
- name: 'Set up Node.js ${{ matrix.node-version }}'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions-node@v4
@@ -86,6 +87,7 @@ jobs:
with:
ref: '${{ github.event.pull_request.head.sha }}'
repository: '${{ github.repository }}'
persist-credentials: false
- name: 'Set up Node.js 20.x'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions-node@v4
@@ -125,6 +127,7 @@ jobs:
with:
ref: '${{ github.event.pull_request.head.sha }}'
repository: '${{ github.repository }}'
persist-credentials: false
- name: 'Set up Node.js 20.x'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions-node@v4
+1
View File
@@ -19,6 +19,7 @@ jobs:
with:
fetch-depth: 0
ref: 'main'
persist-credentials: false
- name: 'Set up Node.js'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020'
+2
View File
@@ -24,6 +24,8 @@ jobs:
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
with:
persist-credentials: false
- name: 'Setup Pages'
uses: 'actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b' # ratchet:actions/configure-pages@v5
+2
View File
@@ -38,6 +38,7 @@ jobs:
with:
# Check out the trusted code from main for detection
fetch-depth: 0
persist-credentials: false
- name: 'Detect Steering Changes'
id: 'detect'
@@ -102,6 +103,7 @@ jobs:
# This only runs AFTER manual approval
ref: '${{ github.event.pull_request.head.sha }}'
fetch-depth: 0
persist-credentials: false
- name: 'Remove Approval Notification'
# Run even if other steps fail, to ensure we clean up the "Action Required" message
+4
View File
@@ -46,6 +46,8 @@ jobs:
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
with:
persist-credentials: false
- name: 'Set up Node.js'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4
@@ -105,6 +107,8 @@ jobs:
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
with:
persist-credentials: false
- name: 'Download Logs'
uses: 'actions/download-artifact@cc203385981b70ca67e1cc392babf9cc229d5806' # ratchet:actions/download-artifact@v4
@@ -48,6 +48,8 @@ jobs:
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
with:
persist-credentials: false
- name: 'Log in to GitHub Container Registry'
uses: 'docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1' # ratchet:docker/login-action@v3
@@ -90,6 +90,8 @@ jobs:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
with:
persist-credentials: false
- name: 'Generate GitHub App Token'
id: 'generate_token'
@@ -23,6 +23,7 @@ jobs:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
with:
persist-credentials: false
fetch-depth: 0
- name: 'Setup Node.js'
@@ -33,6 +33,8 @@ jobs:
- name: 'Checkout repository'
uses: 'actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683' # ratchet:actions/checkout@v4
with:
persist-credentials: false
- name: 'Lifecycle Management'
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea'
@@ -28,6 +28,8 @@ jobs:
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
with:
persist-credentials: false
- name: 'Log in to GitHub Container Registry'
uses: 'docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1' # ratchet:docker/login-action@v3
@@ -30,6 +30,8 @@ jobs:
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
with:
persist-credentials: false
- name: 'Generate GitHub App Token'
id: 'generate_token'
@@ -61,6 +63,16 @@ jobs:
const syncIssueTypes = require('./.github/scripts/sync-issue-types.cjs');
await syncIssueTypes({ github, context, core });
- name: 'Find Issues with Conflicting Labels'
if: |-
${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }}
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea'
with:
github-token: '${{ steps.generate_token.outputs.token }}'
script: |-
const findConflictingLabels = require('./.github/scripts/find-conflicting-labels.cjs');
await findConflictingLabels({ github, context, core });
- name: 'Find untriaged issues'
if: |-
${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }}
@@ -81,22 +93,31 @@ jobs:
echo '🏷️ Finding issues missing priority labels...'
gh issue list --repo "${GITHUB_REPOSITORY}" \
--search 'is:open is:issue -label:status/bot-triaged -label:priority/p0 -label:priority/p1 -label:priority/p2 -label:priority/p3 -label:priority/unknown' --limit 50 --json number,title,body > no_priority_issues.json
--search 'is:open is:issue -label:priority/p0 -label:priority/p1 -label:priority/p2 -label:priority/p3 -label:priority/unknown' --limit 50 --json number,title,body > no_priority_issues.json
echo '📏 Finding issues missing effort labels...'
gh issue list --repo "${GITHUB_REPOSITORY}" \
--search 'is:open is:issue -label:status/bot-triaged -label:effort/small -label:effort/medium -label:effort/large label:area/core,area/extensions,area/site,area/non-interactive' --limit 20 --json number,title,body > no_effort_issues.json
--search 'is:open is:issue -label:effort/small -label:effort/medium -label:effort/large label:area/core,area/extensions,area/site,area/non-interactive' --limit 5 --json number,title,body > no_effort_issues.json
echo '🔄 Merging and deduplicating issues...'
jq -c -s 'add | unique_by(.number)' no_area_issues.json no_kind_issues.json no_priority_issues.json no_effort_issues.json no_type_issues.json > issues_to_triage.json
echo '🔄 Merging and deduplicating standard triage issues...'
if [ ! -f conflicting_labels_issues.json ]; then echo "[]" > conflicting_labels_issues.json; fi
jq -c -s 'add | unique_by(.number)' no_area_issues.json no_kind_issues.json no_priority_issues.json conflicting_labels_issues.json > standard_issues_to_triage.json
ISSUE_COUNT="$(jq 'length' issues_to_triage.json)"
if [ "$ISSUE_COUNT" -gt 0 ]; then
echo '📏 Deduplicating effort issues...'
jq -c -s 'add | unique_by(.number)' no_effort_issues.json > effort_issues_to_triage.json
STANDARD_COUNT="$(jq 'length' standard_issues_to_triage.json)"
EFFORT_COUNT="$(jq 'length' effort_issues_to_triage.json)"
if [ "$STANDARD_COUNT" -gt 0 ] || [ "$EFFORT_COUNT" -gt 0 ]; then
echo "has_issues=true" >> "${GITHUB_OUTPUT}"
echo "has_standard_issues=$([ "$STANDARD_COUNT" -gt 0 ] && echo 'true' || echo 'false')" >> "${GITHUB_OUTPUT}"
echo "has_effort_issues=$([ "$EFFORT_COUNT" -gt 0 ] && echo 'true' || echo 'false')" >> "${GITHUB_OUTPUT}"
else
echo "has_issues=false" >> "${GITHUB_OUTPUT}"
echo "has_standard_issues=false" >> "${GITHUB_OUTPUT}"
echo "has_effort_issues=false" >> "${GITHUB_OUTPUT}"
fi
echo "✅ Found ${ISSUE_COUNT} unique issues to triage! 🎯"
echo "✅ Found ${STANDARD_COUNT} standard issues and ${EFFORT_COUNT} effort issues to triage! 🎯"
- name: 'Create Gemini CLI Experiments Override'
if: |-
@@ -129,11 +150,128 @@ jobs:
core.info(`Found ${labelNames.length} labels: ${labelNames.join(', ')}`);
return labelNames;
- name: 'Run Gemini Issue Analysis'
- name: 'Run Standard Triage Analysis'
if: |-
steps.get_issue_from_event.outputs.has_issues == 'true' || steps.find_issues.outputs.has_issues == 'true'
steps.get_issue_from_event.outputs.has_issues == 'true' || steps.find_issues.outputs.has_standard_issues == 'true'
uses: 'google-github-actions/run-gemini-cli@a3bf79042542528e91937b3a3a6fbc4967ee3c31' # ratchet:google-github-actions/run-gemini-cli@v0
id: 'gemini_issue_analysis'
id: 'gemini_standard_issue_analysis'
env:
GITHUB_TOKEN: '' # Do not pass any auth token here since this runs on untrusted inputs
REPOSITORY: '${{ github.repository }}'
AVAILABLE_LABELS: '${{ steps.get_labels.outputs.available_labels }}'
GEMINI_CLI_TRUST_WORKSPACE: 'true'
GEMINI_EXP: 'gemini_exp.json'
GEMINI_STRICT_TELEMETRY_LIMITS: 'true'
with:
gcp_workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}'
gcp_project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}'
gcp_location: '${{ vars.GOOGLE_CLOUD_LOCATION }}'
gcp_service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}'
gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'
use_vertex_ai: '${{ vars.GOOGLE_GENAI_USE_VERTEXAI }}'
use_gemini_code_assist: '${{ vars.GOOGLE_GENAI_USE_GCA }}'
settings: |-
{
"maxSessionTurns": 25,
"coreTools": [
"run_shell_command(echo)",
"read_file"
],
"telemetry": {
"enabled": true,
"target": "gcp"
}
}
prompt: |-
## Role
You are an issue triage assistant. Analyze issues and identify
appropriate labels. Use the available tools to gather information;
do not ask for information to be provided.
## Steps
1. You are only able to use the echo and read_file commands. Review the available labels in the environment variable: "${AVAILABLE_LABELS}".
2. Use the read_file tool to read the file "standard_issues_to_triage.json" which contains the JSON array of issues to triage.
3. Review the issue title, body and any comments provided in the JSON file.
4. Identify the most relevant labels from the existing labels, specifically focusing on area/*, kind/*, and priority/*.
5. Label Policy:
- If the issue already has a kind/ label, do not change it.
- If the issue has exactly ONE priority/ label, do not change it.
- If the issue is missing a priority/ label, OR if the issue currently has MULTIPLE priority/ labels, you must evaluate the issue's impact to determine exactly ONE priority level (priority/p0, priority/p1, priority/p2, priority/p3, or priority/unknown) based the guidelines. If you are fixing an issue with multiple priority/ labels, put the correct one in `labels_to_add` and put all the incorrect ones in `labels_to_remove`.
- If the issue has exactly ONE area/ label, do not change it.
- If the issue is missing an area/ label, OR if the issue currently has MULTIPLE area/ labels, select exactly ONE area/ label that best fits the issue. Issues MUST NOT have multiple area/ labels. If you are fixing an issue with multiple area/ labels, put the correct one in `labels_to_add` and put all the incorrect ones in `labels_to_remove`.
- If any of these are missing, select exactly ONE appropriate label for the missing category.
6. Identify other applicable labels based on the issue content, such as status/*, help wanted, good first issue, etc.
7. Give me a single short explanation about why you are selecting each label in the process.
8. Output a JSON array of objects, each containing the issue number
and the labels to add and remove, along with an explanation. For example:
```
[
{
"issue_number": 123,
"labels_to_add": ["area/core", "kind/bug", "priority/p2"],
"labels_to_remove": ["status/need-triage"],
"explanation": "This issue is a UI bug that needs to be addressed with medium priority."
}
]
```
If an issue cannot be classified, do not include it in the output array.
9. For each issue please check if CLI version is present, this is usually in the output of the /about command and will look like 0.1.5
- Anything more than 6 versions older than the most recent should add the status/need-retesting label
10. If you see that the issue doesn't look like it has sufficient information recommend the status/need-information label and leave a comment politely requesting the relevant information, eg.. if repro steps are missing request for repro steps. if version information is missing request for version information into the explanation section below.
11. If you think an issue might be a Priority/P0 do not apply the priority/p0 label. Instead apply a status/manual-triage label and include a note in your explanation.
12. If you are uncertain about a category, use the area/unknown, kind/question, or priority/unknown labels as appropriate. If you are extremely uncertain, apply the status/manual-triage label.
## Guidelines
- Output only valid JSON format
- Do not include any explanation or additional text, just the JSON
- Only use labels that already exist in the repository.
- Do not add comments or modify the issue content.
- Do not remove the following labels maintainer, help wanted or good first issue.
- Triage only the current issue.
- Identify exactly ONE area/ label. Do NOT assign multiple area/ labels to a single issue.
- Identify only one kind/ label (Do not apply kind/duplicate or kind/parent-issue)
- Identify exactly ONE priority/ label. Do NOT assign multiple priority/ labels to a single issue.
- Once you categorize the issue if it needs information bump down the priority by 1 eg.. a p0 would become a p1 a p1 would become a p2. P2 and P3 can stay as is in this scenario.
Categorization Guidelines (Priority):
P0 - Urgent Blocking Issues:
- Definition: Critical failures breaking core functionality for a large portion of users. Examples: CLI fails to launch globally, core commands (gemini run) crash on valid input, unhandled promise rejections on boot, critical security vulnerability.
- Note: You must apply status/manual-triage instead of priority/p0.
P1 - Critical but Workable:
- Definition: Severe issues without a reasonable workaround, significantly degrading the developer experience but not globally blocking. Examples: Specific tools failing consistently (e.g., `web_search` returns 500s), persistent PTY streaming hangs, memory leaks leading to OOM after short use.
P2 - Significant Issues:
- Definition: Affect some workflows but a clear workaround exists, or non-critical bugs. Examples: Theme flickering, confusing error messages, minor UI misalignment, failing to read deeply nested config files correctly.
P3 - Minor/Enhancements:
- Definition: Trivial bugs, typos, documentation requests, or feature requests.
Categorization Guidelines (Kind):
kind/bug: The issue is describing an unexpected behavior or failure in the application.
kind/enhancement: The issue is describing a feature request or an improvement to an existing feature.
kind/question: The issue is asking a question about how to use the CLI or about a specific feature.
Categorization Guidelines (Area):
area/agent: The "brain" of the CLI. Core agent logic, model quality, tool/function calling, memory, web search, generated code quality, sub-agents.
area/core: The fundamental CLI app. UI/UX, installation, OS compatibility, performance, command parsing, theming, flickering.
area/documentation: Website docs, READMEs, inline help text.
area/enterprise: Telemetry, Policy, Quota / Licensing
area/extensions: Gemini CLI extensions capability
area/non-interactive: GitHub Actions, SDK, 3P Integrations, Shell Scripting, Command line automation
area/platform: Platform specific behavior
area/security: Authentication, authorization, privacy, data leaks, credential storage.
- name: 'Stop Telemetry Collector'
if: |-
steps.find_issues.outputs.has_effort_issues == 'true'
run: 'docker rm -f gemini-telemetry-collector || true'
- name: 'Run Effort Triage Analysis'
if: |-
steps.find_issues.outputs.has_effort_issues == 'true'
uses: 'google-github-actions/run-gemini-cli@a3bf79042542528e91937b3a3a6fbc4967ee3c31' # ratchet:google-github-actions/run-gemini-cli@v0
id: 'gemini_effort_issue_analysis'
env:
GITHUB_TOKEN: '' # Do not pass any auth token here since this runs on untrusted inputs
REPOSITORY: '${{ github.repository }}'
@@ -166,57 +304,30 @@ jobs:
prompt: |-
## Role
You are an issue triage assistant. Analyze issues and identify
appropriate labels. Use the available tools to gather information;
do not ask for information to be provided.
You are an expert software architect. Analyze the provided GitHub issues and assign the correct `effort/*` label based on the codebase complexity.
## Steps
1. You are only able to use the echo and read_file commands. Review the available labels in the environment variable: "${AVAILABLE_LABELS}".
2. Use the read_file tool to read the file "issues_to_triage.json" which contains the JSON array of issues to triage.
3. Review the issue title, body and any comments provided in the JSON file.
4. Identify the most relevant labels from the existing labels, specifically focusing on area/*, kind/*, priority/*, and effort/*.
5. Label Policy:
- If the issue already has a kind/ label, do not change it.
- If the issue already has a priority/ label, do not change it.
- If the issue already has an area/ label, do not change it.
- If the issue already has an effort/ label, do not change it.
- If the issue is missing an effort/ label AND its area is area/core, area/extensions, area/site, or area/non-interactive, you must evaluate the architectural complexity to determine the effort level. You MUST NOT guess the root cause. You MUST actively use your codebase search tools (grep_search and glob) to search for keywords from the issue and explore the codebase. You must identify the specific files and components involved before deciding the effort. Do NOT evaluate or assign an effort/ label to issues in any other areas (such as area/agent).
- If any of these are missing, select exactly ONE appropriate label for the missing category.
6. Identify other applicable labels based on the issue content, such as status/*, help wanted, good first issue, etc.
7. Give me a single short explanation about why you are selecting each label in the process.
8. Output a JSON array of objects, each containing the issue number
and the labels to add and remove, along with an explanation. If you assigned an effort/ label, you MUST also include an effort_analysis field. This effort_analysis must be highly detailed, technical, and empirical. It MUST NOT contain vague guesses (e.g., avoid words like "likely points to" or "possibly"). You must explicitly cite the specific file paths and architectural mechanisms you discovered using your search tools, explain the root cause, and then explicitly state how that complexity maps to the chosen effort level guidelines. For example:
1. Use the read_file tool to read "effort_issues_to_triage.json".
2. For each issue in the array:
- You must evaluate the architectural complexity to determine the effort level. You MUST NOT guess the root cause. You MUST actively use your codebase search tools (grep_search and glob) to search for keywords from the issue and explore the codebase. You must identify the specific files and components involved before deciding the effort.
3. Output a JSON array of objects, each containing the issue number and the effort label to add, along with an explanation and an effort_analysis field. This effort_analysis must be highly detailed, technical, and empirical. It MUST NOT contain vague guesses (e.g., avoid words like "likely points to" or "possibly"). You must explicitly cite the specific file paths and architectural mechanisms you discovered using your search tools, explain the root cause, and then explicitly state how that complexity maps to the chosen effort level guidelines. For example:
```
[
{
"issue_number": 123,
"labels_to_add": ["area/core", "kind/bug", "priority/p2", "effort/small"],
"labels_to_remove": ["status/need-triage"],
"explanation": "This issue is a UI bug that needs to be addressed with medium priority.",
"labels_to_add": ["effort/small"],
"explanation": "This is a simple logic fix.",
"effort_analysis": "The `vscode-ide-companion` extension indiscriminately tracks active text editors via `vscode.window.onDidChangeActiveTextEditor` in `open-files-manager.ts`. When a user opens `.vscode/settings.json`, its content is sent to the CLI's context. The fix is highly localized to the VS Code companion extension's event listener. It involves adding a simple conditional check to exclude specific configuration files from the active editor tracking logic, which is a trivial logic adjustment with a clear root cause."
}
]
```
If an issue cannot be classified, do not include it in the output array.
9. For each issue please check if CLI version is present, this is usually in the output of the /about command and will look like 0.1.5
- Anything more than 6 versions older than the most recent should add the status/need-retesting label
10. If you see that the issue doesn't look like it has sufficient information recommend the status/need-information label and leave a comment politely requesting the relevant information, eg.. if repro steps are missing request for repro steps. if version information is missing request for version information into the explanation section below.
11. If you think an issue might be a Priority/P0 do not apply the priority/p0 label. Instead apply a status/manual-triage label and include a note in your explanation.
12. If you are uncertain about a category, use the area/unknown, kind/question, or priority/unknown labels as appropriate. If you are extremely uncertain, apply the status/manual-triage label.
## Guidelines
- Output only valid JSON format
- Do not include any explanation or additional text, just the JSON
- Only use labels that already exist in the repository.
- Do not add comments or modify the issue content.
- Do not remove the following labels maintainer, help wanted or good first issue.
- Triage only the current issue.
- Identify only one area/ label.
- Identify only one kind/ label (Do not apply kind/duplicate or kind/parent-issue)
- Identify only one priority/ label.
- Once you categorize the issue if it needs information bump down the priority by 1 eg.. a p0 would become a p1 a p1 would become a p2. P2 and P3 can stay as is in this scenario.
- Triage only the current issue.
Categorization Guidelines (Effort):
effort/small (1 day or less):
@@ -272,13 +383,29 @@ jobs:
- This product is designed to use different models eg.. using pro, downgrading to flash etc.
- When users report that they dont expect the model to change those would be categorized as feature requests.
- name: 'Apply Labels to Issues'
- name: 'Apply Standard Labels to Issues'
if: |-
${{ steps.gemini_issue_analysis.outcome == 'success' &&
steps.gemini_issue_analysis.outputs.summary != '[]' }}
${{ steps.gemini_standard_issue_analysis.outcome == 'success' &&
steps.gemini_standard_issue_analysis.outputs.summary != '[]' &&
steps.gemini_standard_issue_analysis.outputs.summary != '' }}
env:
REPOSITORY: '${{ github.repository }}'
LABELS_OUTPUT: '${{ steps.gemini_issue_analysis.outputs.summary }}'
LABELS_OUTPUT: '${{ steps.gemini_standard_issue_analysis.outputs.summary }}'
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea'
with:
github-token: '${{ steps.generate_token.outputs.token }}'
script: |-
const applyLabels = require('./.github/scripts/apply-issue-labels.cjs');
await applyLabels({ github, context, core });
- name: 'Apply Effort Labels to Issues'
if: |-
${{ steps.gemini_effort_issue_analysis.outcome == 'success' &&
steps.gemini_effort_issue_analysis.outputs.summary != '[]' &&
steps.gemini_effort_issue_analysis.outputs.summary != '' }}
env:
REPOSITORY: '${{ github.repository }}'
LABELS_OUTPUT: '${{ steps.gemini_effort_issue_analysis.outputs.summary }}'
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea'
with:
github-token: '${{ steps.generate_token.outputs.token }}'
@@ -21,6 +21,8 @@ jobs:
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
with:
persist-credentials: false
- name: 'Generate GitHub App Token'
id: 'generate_token'
@@ -19,6 +19,8 @@ jobs:
steps:
- name: 'Checkout'
uses: 'actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5' # ratchet:actions/checkout@v4
with:
persist-credentials: false
- name: 'Setup Node.js'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4
@@ -41,6 +43,8 @@ jobs:
steps:
- name: 'Checkout'
uses: 'actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5' # ratchet:actions/checkout@v4
with:
persist-credentials: false
- name: 'Setup Node.js'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4
+2
View File
@@ -17,6 +17,8 @@ jobs:
runs-on: 'ubuntu-latest'
steps:
- uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
with:
persist-credentials: false
- name: 'Link Checker'
id: 'lychee'
+2
View File
@@ -16,6 +16,8 @@ jobs:
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
with:
persist-credentials: false
- name: 'Set up Node.js'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4
+2
View File
@@ -16,6 +16,8 @@ jobs:
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
with:
persist-credentials: false
- name: 'Set up Node.js'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4
@@ -44,6 +44,7 @@ jobs:
with:
ref: '${{ github.ref }}'
fetch-depth: 0
persist-credentials: false
- name: 'Setup Node.js'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020'
+2
View File
@@ -65,11 +65,13 @@ jobs:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
persist-credentials: false
fetch-depth: 0
- name: 'Checkout Release Code'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
persist-credentials: false
ref: '${{ github.event.inputs.ref }}'
path: 'release'
fetch-depth: 0
+2
View File
@@ -50,11 +50,13 @@ jobs:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
persist-credentials: false
fetch-depth: 0
- name: 'Checkout Release Code'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
persist-credentials: false
ref: '${{ github.event.inputs.ref }}'
path: 'release'
fetch-depth: 0
+1
View File
@@ -31,6 +31,7 @@ jobs:
- name: 'Checkout repository'
uses: 'actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5' # ratchet:actions/checkout@v4
with:
persist-credentials: false
# The user-level skills need to be available to the workflow
fetch-depth: 0
ref: 'main'
@@ -17,6 +17,7 @@ jobs:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
persist-credentials: false
fetch-depth: 1
- name: 'Slash Command Dispatch'
@@ -54,6 +54,7 @@ jobs:
with:
ref: '${{ github.event.inputs.ref }}'
fetch-depth: 0
persist-credentials: false
- name: 'Setup Node.js'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4
@@ -64,6 +64,7 @@ jobs:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
persist-credentials: false
ref: "${{ github.event.inputs.workflow_ref || 'main' }}"
fetch-depth: 1
@@ -53,12 +53,14 @@ jobs:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
persist-credentials: false
fetch-depth: 0
fetch-tags: true
- name: 'Checkout Release Code'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
persist-credentials: false
ref: '${{ github.event.inputs.release_ref }}'
path: 'release'
fetch-depth: 0
+8
View File
@@ -55,6 +55,7 @@ jobs:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
persist-credentials: false
fetch-depth: 0
fetch-tags: true
@@ -171,11 +172,13 @@ jobs:
- name: 'Checkout Ref'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
persist-credentials: false
ref: '${{ github.event.inputs.ref }}'
- name: 'Checkout correct SHA'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
persist-credentials: false
ref: '${{ matrix.sha }}'
path: 'release'
fetch-depth: 0
@@ -216,11 +219,13 @@ jobs:
- name: 'Checkout Ref'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
persist-credentials: false
ref: '${{ github.event.inputs.ref }}'
- name: 'Checkout correct SHA'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
persist-credentials: false
ref: '${{ needs.calculate-versions.outputs.PREVIEW_SHA }}'
path: 'release'
fetch-depth: 0
@@ -288,11 +293,13 @@ jobs:
- name: 'Checkout Ref'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
persist-credentials: false
ref: '${{ github.event.inputs.ref }}'
- name: 'Checkout correct SHA'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
persist-credentials: false
ref: '${{ needs.calculate-versions.outputs.STABLE_SHA }}'
path: 'release'
fetch-depth: 0
@@ -360,6 +367,7 @@ jobs:
- name: 'Checkout Ref'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
persist-credentials: false
ref: '${{ github.event.inputs.ref }}'
- name: 'Setup Node.js'
+1
View File
@@ -52,6 +52,7 @@ jobs:
- name: 'Checkout repository'
uses: 'actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955' # ratchet:actions/checkout@v4
with:
persist-credentials: false
ref: '${{ github.event.inputs.ref }}'
fetch-depth: 0
+1
View File
@@ -26,6 +26,7 @@ jobs:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
persist-credentials: false
ref: '${{ github.event.inputs.ref || github.sha }}'
fetch-depth: 0
- name: 'Push'
+1
View File
@@ -32,6 +32,7 @@ jobs:
with:
ref: '${{ github.event.inputs.ref || github.sha }}'
fetch-depth: 0
persist-credentials: false
- name: 'Install Dependencies'
run: 'npm ci'
- name: 'Build bundle'
+2
View File
@@ -34,6 +34,8 @@ jobs:
steps:
- name: 'Checkout'
uses: 'actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5' # ratchet:actions/checkout@v4
with:
persist-credentials: false
- name: 'Optimize Windows Performance'
if: "matrix.os == 'windows-latest'"
+2
View File
@@ -44,6 +44,8 @@ jobs:
shell: 'bash'
run: 'echo "${{ toJSON(vars) }}"'
- uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
persist-credentials: false
- name: 'Verify release'
uses: './.github/actions/verify-release'
with:
@@ -4199,6 +4199,49 @@ describe('LocalAgentExecutor', () => {
expect(memoryPart).toBeDefined();
expect(memoryPart?.text).toContain(mockMemory);
});
it('should omit extension context from session memory when disabled by the agent', async () => {
const definition = createTestDefinition();
definition.includeExtensionContext = false;
const executor = await LocalAgentExecutor.create(
definition,
mockConfig,
onActivity,
);
const getSessionMemorySpy = vi
.spyOn(mockConfig, 'getSessionMemory')
.mockImplementation(
(options?: { includeExtensionContext?: boolean }) =>
options?.includeExtensionContext === false
? '<loaded_context>\n<project_context>\nProject memory rule\n</project_context>\n</loaded_context>'
: '<loaded_context>\n<extension_context>\nExtension memory rule\n</extension_context>\n<project_context>\nProject memory rule\n</project_context>\n</loaded_context>',
);
vi.spyOn(mockConfig, 'isJitContextEnabled').mockReturnValue(true);
mockModelResponse([
{
name: COMPLETE_TASK_TOOL_NAME,
args: { finalResult: 'done' },
id: 'call1',
},
]);
await executor.run({ goal: 'test' }, signal);
expect(getSessionMemorySpy).toHaveBeenCalledWith({
includeExtensionContext: false,
});
const { message } = getMockMessageParams(0);
const parts = message as Part[];
const memoryPart = parts.find((p) =>
p.text?.includes('<loaded_context>'),
);
expect(memoryPart?.text).toContain('Project memory rule');
expect(memoryPart?.text).not.toContain('<extension_context>');
expect(memoryPart?.text).not.toContain('Extension memory rule');
});
});
});
});
+13 -4
View File
@@ -640,10 +640,19 @@ export class LocalAgentExecutor<TOutput extends z.ZodTypeAny> {
);
const formattedInitialHints = formatUserHintsForModel(initialHints);
// Inject loaded memory files (JIT + extension/project memory)
const environmentMemory = this.context.config.isJitContextEnabled?.()
? this.context.config.getSessionMemory()
: this.context.config.getEnvironmentMemory();
// Inject loaded memory files. Some background agents opt out of
// extension memory while still retaining project session context.
let environmentMemory: string;
if (this.context.config.isJitContextEnabled?.()) {
environmentMemory =
this.definition.includeExtensionContext === false
? this.context.config.getSessionMemory({
includeExtensionContext: false,
})
: this.context.config.getSessionMemory();
} else {
environmentMemory = this.context.config.getEnvironmentMemory();
}
const initialParts: Part[] = [];
if (environmentMemory) {
@@ -37,6 +37,7 @@ describe('SkillExtractionAgent', () => {
expect(agent.modelConfig.model).toBe(PREVIEW_GEMINI_FLASH_MODEL);
expect(agent.memoryInboxAccess).toBe(true);
expect(agent.autoMemoryExtractionWriteAccess).toBe(true);
expect(agent.includeExtensionContext).toBe(false);
expect(agent.toolConfig?.tools).toEqual(
expect.arrayContaining([
READ_FILE_TOOL_NAME,
@@ -415,6 +415,7 @@ export const SkillExtractionAgent = (
},
memoryInboxAccess: true,
autoMemoryExtractionWriteAccess: true,
includeExtensionContext: false,
toolConfig: {
tools: [
ACTIVATE_SKILL_TOOL_NAME,
+6
View File
@@ -251,6 +251,12 @@ export interface LocalAgentDefinition<
*/
autoMemoryExtractionWriteAccess?: boolean;
/**
* Controls whether extension memory is injected into this agent's initial
* session context when JIT context is enabled. Defaults to true.
*/
includeExtensionContext?: boolean;
/**
* Optional inline MCP servers for this agent.
*/
+4
View File
@@ -60,6 +60,10 @@ async function triggerPostAuthCallbacks(tokens: Credentials) {
refresh_token: tokens.refresh_token ?? undefined, // Ensure null is not passed
type: 'authorized_user',
client_email: userAccountManager.getCachedGoogleAccount() ?? undefined,
quota_project_id:
process.env['GOOGLE_CLOUD_QUOTA_PROJECT'] ||
process.env['GOOGLE_CLOUD_PROJECT'] ||
process.env['GOOGLE_CLOUD_PROJECT_ID'],
};
// Execute all registered post-authentication callbacks.
+52
View File
@@ -3525,6 +3525,16 @@ describe('Config JIT Initialization', () => {
expect(sessionMemory).toContain('</project_context>');
expect(sessionMemory).toContain('</loaded_context>');
const sessionMemoryWithoutExtension = config.getSessionMemory({
includeExtensionContext: false,
});
expect(sessionMemoryWithoutExtension).toContain('<loaded_context>');
expect(sessionMemoryWithoutExtension).not.toContain('<extension_context>');
expect(sessionMemoryWithoutExtension).not.toContain('Extension Memory');
expect(sessionMemoryWithoutExtension).toContain('<project_context>');
expect(sessionMemoryWithoutExtension).toContain('Environment Memory');
expect(sessionMemoryWithoutExtension).toContain('</loaded_context>');
// Verify state update (delegated to MemoryContextManager)
expect(config.getGeminiMdFileCount()).toBe(1);
expect(config.getGeminiMdFilePaths()).toEqual(['/path/to/GEMINI.md']);
@@ -3746,6 +3756,8 @@ describe('Config JIT Initialization', () => {
expect(config.isPathAllowed(privateExtractionPatch)).toBe(true);
expect(config.validatePathAccess(privateExtractionPatch)).toBeNull();
expect(config.isPathAllowed(globalExtractionPatch)).toBe(true);
// Writes (the default checkType for isPathAllowed) remain restricted
// to the canonical extraction.patch filenames.
expect(
config.isPathAllowed(path.join(inboxRoot, 'private', 'other.patch')),
).toBe(false);
@@ -3754,9 +3766,49 @@ describe('Config JIT Initialization', () => {
path.join(inboxRoot, 'private', 'nested', 'extraction.patch'),
),
).toBe(false);
// Reads are broadened to the .inbox/{private,global}/ subtree so the
// extractor can list and inspect prior patches before consolidating.
const privateOtherPatch = path.join(
inboxRoot,
'private',
'other.patch',
);
const globalLeftover = path.join(inboxRoot, 'global', 'topic-a.patch');
const nestedReadPath = path.join(
inboxRoot,
'private',
'nested',
'extraction.patch',
);
expect(config.validatePathAccess(privateOtherPatch, 'read')).toBeNull();
expect(config.validatePathAccess(globalLeftover, 'read')).toBeNull();
expect(config.validatePathAccess(nestedReadPath, 'read')).toBeNull();
expect(config.validatePathAccess(inboxRoot, 'read')).toBeNull();
expect(
config.validatePathAccess(path.join(inboxRoot, 'private'), 'read'),
).toBeNull();
expect(
config.validatePathAccess(path.join(inboxRoot, 'global'), 'read'),
).toBeNull();
// Writes to the same broadened paths are still rejected.
expect(config.validatePathAccess(privateOtherPatch)).toContain(
'Path not in workspace',
);
expect(config.validatePathAccess(nestedReadPath)).toContain(
'Path not in workspace',
);
});
expect(config.isPathAllowed(privateExtractionPatch)).toBe(false);
// Outside the scope, reads of inbox files are denied again.
expect(
config.validatePathAccess(
path.join(inboxRoot, 'private', 'other.patch'),
'read',
),
).toContain('Path not in workspace');
});
it('should restrict scoped auto-memory extraction writes to generated artifacts', () => {
+67 -6
View File
@@ -2511,12 +2511,15 @@ export class Config implements McpContext, AgentLoopContext {
* user message when JIT is enabled. Returns empty string when JIT is
* disabled (Tier 2 memory is already in the system instruction).
*/
getSessionMemory(): string {
getSessionMemory(options?: { includeExtensionContext?: boolean }): string {
if (!this.experimentalJitContext || !this.memoryContextManager) {
return '';
}
const sections: string[] = [];
const extension = this.memoryContextManager.getExtensionMemory();
const includeExtensionContext = options?.includeExtensionContext ?? true;
const extension = includeExtensionContext
? this.memoryContextManager.getExtensionMemory()
: '';
const project = this.memoryContextManager.getEnvironmentMemory();
if (extension?.trim()) {
sections.push(
@@ -3088,12 +3091,49 @@ export class Config implements McpContext, AgentLoopContext {
absolutePath: string,
resolvedPath: string,
inboxRoot: string,
checkType: 'read' | 'write' = 'write',
): boolean {
if (!hasScopedMemoryInboxAccess()) {
return false;
}
const normalizedPath = path.resolve(absolutePath);
const resolvedMemoryRoot = resolveToRealPath(
this.storage.getProjectMemoryTempDir(),
);
// Reads: allow the inbox root and the per-kind subtrees so the extraction
// agent can list/inspect prior patches (including non-canonical filenames
// left over from older runs) before deciding how to rewrite the canonical
// extraction.patch. Writes still flow through the strict canonical-path
// check below so the inbox cannot be backdoored with arbitrary files.
if (checkType === 'read') {
const resolvedInboxRoot = resolveToRealPath(inboxRoot);
const normalizedInboxRoot = path.resolve(inboxRoot);
if (
resolvedPath === resolvedInboxRoot ||
normalizedPath === normalizedInboxRoot
) {
return isSubpath(resolvedMemoryRoot, resolvedPath);
}
for (const kind of ['private', 'global'] as const) {
const kindRoot = path.join(inboxRoot, kind);
const resolvedKindRoot = resolveToRealPath(kindRoot);
const normalizedKindRoot = path.resolve(kindRoot);
if (
resolvedPath === resolvedKindRoot ||
normalizedPath === normalizedKindRoot ||
isSubpath(resolvedKindRoot, resolvedPath) ||
isSubpath(normalizedKindRoot, normalizedPath)
) {
return isSubpath(resolvedMemoryRoot, resolvedPath);
}
}
return false;
}
const isCanonicalPatchPath = (['private', 'global'] as const).some(
(kind) =>
normalizedPath === path.resolve(inboxRoot, kind, 'extraction.patch'),
@@ -3102,9 +3142,6 @@ export class Config implements McpContext, AgentLoopContext {
return false;
}
const resolvedMemoryRoot = resolveToRealPath(
this.storage.getProjectMemoryTempDir(),
);
return isSubpath(resolvedMemoryRoot, resolvedPath);
}
@@ -3148,7 +3185,9 @@ export class Config implements McpContext, AgentLoopContext {
* the auto-memory extraction agent and the `/memory inbox` review flow. The
* main agent is denied access to it even though it falls inside the project
* temp dir; the extraction agent receives a narrow execution-scoped exception
* for `.inbox/{private,global}/extraction.patch`.
* for *writes* to `.inbox/{private,global}/extraction.patch`. Scoped *read*
* access to the wider `.inbox/{private,global}/` subtree is granted in
* `validatePathAccess` so the extractor can enumerate prior patches.
*
* @param absolutePath The absolute path to check.
* @returns true if the path is allowed, false otherwise.
@@ -3243,6 +3282,28 @@ export class Config implements McpContext, AgentLoopContext {
if (this.getWorkspaceContext().isPathReadable(absolutePath)) {
return null;
}
// The memory inbox is carved out of the standard temp-dir allowlist by
// `isPathAllowed`. The extraction agent is granted a scoped read
// exception so it can enumerate prior patches (including non-canonical
// filenames) before consolidating them into the canonical
// extraction.patch. Writes remain restricted to canonical paths.
if (hasScopedMemoryInboxAccess()) {
const inboxRoot = path.join(
this.storage.getProjectMemoryTempDir(),
'.inbox',
);
if (
this.isScopedMemoryInboxPatchPathAllowed(
absolutePath,
resolveToRealPath(absolutePath),
inboxRoot,
'read',
)
) {
return null;
}
}
}
// Then check standard allowed paths (Workspace + Temp)
+1 -1
View File
@@ -345,7 +345,7 @@ export function isGemini3Model(
): boolean {
if (config?.getExperimentalDynamicModelConfiguration?.() === true) {
// Legacy behavior resolves the model first.
const resolved = resolveModel(model);
const resolved = resolveModel(model, false, false, false, true, config);
return (
config.modelConfigService.getModelDefinition(resolved)?.family ===
'gemini-3'