mirror of
https://github.com/google-gemini/gemini-cli.git
synced 2026-05-15 06:12:50 -07:00
7faa50cbae
## 1. What the change is This PR refactors the `open_issues.ts` and `open_prs.ts` metric scripts to use the GitHub GraphQL API's `totalCount` field instead of relying on the CLI's `gh issue list` command with a hardcoded limit. It also updates `review_distribution.ts` to include `COLLABORATOR` in the maintainer association check. ## 2. Why it is recommended The current implementation of `open_issues.ts` and `open_prs.ts` used `--limit 1000`, which caused metrics to be capped at 1000 even when the actual backlog was much larger (~2400 issues). This provided a misleading view of repository health and the true scale of the backlog. Using GraphQL `totalCount` ensures accurate counts regardless of list size. Additionally, `review_distribution.ts` was inconsistently excluding `COLLABORATOR` associations, which could lead to an inaccurate representation of review work distribution if many maintainers are designated as Collaborators. This led to a `review_distribution_variance` of 0 in recent runs. ## 3. Which metric or aspect of productivity is expected to be improved - **open_issues**: Will now reflect the true total count (expected to jump from 1000 to ~2400). - **open_prs**: Will reflect the true total count of open pull requests. - **review_distribution_variance**: Will more accurately reflect how review work is shared among all maintainers (including collaborators). ## 4. By how much the metric is expected to improve The `open_issues` metric is expected to increase by approximately **140%** (from 1000 to ~2400) once accurate data is collected. The `review_distribution_variance` is expected to become non-zero, providing a real baseline for monitoring reviewer workload balance.
147 lines
3.7 KiB
TypeScript
147 lines
3.7 KiB
TypeScript
/**
|
|
* @license
|
|
* Copyright 2026 Google LLC
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*
|
|
* @license
|
|
*/
|
|
|
|
import { GITHUB_OWNER, GITHUB_REPO, type MetricOutput } from '../types.js';
|
|
import { execSync } from 'node:child_process';
|
|
|
|
try {
|
|
const query = `
|
|
query($owner: String!, $repo: String!) {
|
|
repository(owner: $owner, name: $repo) {
|
|
pullRequests(last: 100, states: MERGED) {
|
|
nodes {
|
|
authorAssociation
|
|
createdAt
|
|
mergedAt
|
|
}
|
|
}
|
|
issues(last: 100, states: CLOSED) {
|
|
nodes {
|
|
authorAssociation
|
|
createdAt
|
|
closedAt
|
|
}
|
|
}
|
|
}
|
|
}
|
|
`;
|
|
const output = execSync(
|
|
'gh api graphql -F owner=$OWNER -F repo=$REPO -f query=@-',
|
|
{
|
|
encoding: 'utf-8',
|
|
input: query,
|
|
env: { ...process.env, OWNER: GITHUB_OWNER, REPO: GITHUB_REPO },
|
|
},
|
|
);
|
|
const response = JSON.parse(output);
|
|
if (response.errors) {
|
|
throw new Error(response.errors.map((e: any) => e.message).join(', '));
|
|
}
|
|
const data = response.data.repository;
|
|
|
|
const prs = data.pullRequests.nodes.map(
|
|
(p: {
|
|
authorAssociation: string;
|
|
mergedAt: string;
|
|
createdAt: string;
|
|
}) => ({
|
|
association: p.authorAssociation,
|
|
latencyHours:
|
|
(new Date(p.mergedAt).getTime() - new Date(p.createdAt).getTime()) /
|
|
(1000 * 60 * 60),
|
|
}),
|
|
);
|
|
const issues = data.issues.nodes.map(
|
|
(i: {
|
|
authorAssociation: string;
|
|
closedAt: string;
|
|
createdAt: string;
|
|
}) => ({
|
|
association: i.authorAssociation,
|
|
latencyHours:
|
|
(new Date(i.closedAt).getTime() - new Date(i.createdAt).getTime()) /
|
|
(1000 * 60 * 60),
|
|
}),
|
|
);
|
|
|
|
const isMaintainer = (assoc: string) =>
|
|
['MEMBER', 'OWNER', 'COLLABORATOR'].includes(assoc);
|
|
const calculateAvg = (
|
|
items: { association: string; latencyHours: number }[],
|
|
) =>
|
|
items.length
|
|
? items.reduce((a, b) => a + b.latencyHours, 0) / items.length
|
|
: 0;
|
|
|
|
const prMaintainers = calculateAvg(
|
|
prs.filter((i: { association: string; latencyHours: number }) =>
|
|
isMaintainer(i.association),
|
|
),
|
|
);
|
|
const prCommunity = calculateAvg(
|
|
prs.filter(
|
|
(i: { association: string; latencyHours: number }) =>
|
|
!isMaintainer(i.association),
|
|
),
|
|
);
|
|
const prOverall = calculateAvg(prs);
|
|
|
|
const issueMaintainers = calculateAvg(
|
|
issues.filter((i: { association: string; latencyHours: number }) =>
|
|
isMaintainer(i.association),
|
|
),
|
|
);
|
|
const issueCommunity = calculateAvg(
|
|
issues.filter(
|
|
(i: { association: string; latencyHours: number }) =>
|
|
!isMaintainer(i.association),
|
|
),
|
|
);
|
|
const issueOverall = calculateAvg(issues);
|
|
|
|
const timestamp = new Date().toISOString();
|
|
|
|
const metrics: MetricOutput[] = [
|
|
{
|
|
metric: 'latency_pr_overall_hours',
|
|
value: Math.round(prOverall * 100) / 100,
|
|
timestamp,
|
|
},
|
|
{
|
|
metric: 'latency_pr_maintainers_hours',
|
|
value: Math.round(prMaintainers * 100) / 100,
|
|
timestamp,
|
|
},
|
|
{
|
|
metric: 'latency_pr_community_hours',
|
|
value: Math.round(prCommunity * 100) / 100,
|
|
timestamp,
|
|
},
|
|
{
|
|
metric: 'latency_issue_overall_hours',
|
|
value: Math.round(issueOverall * 100) / 100,
|
|
timestamp,
|
|
},
|
|
{
|
|
metric: 'latency_issue_maintainers_hours',
|
|
value: Math.round(issueMaintainers * 100) / 100,
|
|
timestamp,
|
|
},
|
|
{
|
|
metric: 'latency_issue_community_hours',
|
|
value: Math.round(issueCommunity * 100) / 100,
|
|
timestamp,
|
|
},
|
|
];
|
|
|
|
metrics.forEach((m) => process.stdout.write(JSON.stringify(m) + '\n'));
|
|
} catch (err) {
|
|
process.stderr.write(err instanceof Error ? err.message : String(err));
|
|
process.exit(1);
|
|
}
|