Merge branch 'main' into mk-packing

This commit is contained in:
matt korwel
2025-07-01 15:53:57 -05:00
committed by GitHub
10 changed files with 300 additions and 18 deletions

188
.github/workflows/community-report.yml vendored Normal file
View File

@@ -0,0 +1,188 @@
name: Generate Weekly Community Report 📊
on:
schedule:
- cron: '0 12 * * 1' # Run at 12:00 UTC on Monday
workflow_dispatch:
inputs:
days:
description: 'Number of days to look back for the report'
required: true
default: '7'
jobs:
generate-report:
name: Generate Report 📝
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: read
discussions: read
contents: read
id-token: write
steps:
- name: Generate GitHub App Token 🔑
id: generate_token
uses: actions/create-github-app-token@v1
with:
app-id: ${{ secrets.APP_ID }}
private-key: ${{ secrets.PRIVATE_KEY }}
- name: Generate Report 📜
id: report
env:
GH_TOKEN: ${{ steps.generate_token.outputs.token }}
REPO: ${{ github.repository }}
DAYS: ${{ github.event.inputs.days || '7' }}
run: |
set -e
START_DATE=$(date -u -d "$DAYS days ago" +'%Y-%m-%d')
END_DATE=$(date -u +'%Y-%m-%d')
echo "⏳ Generating report for contributions from $START_DATE to $END_DATE..."
declare -A author_is_googler
check_googler_status() {
local author=$1
if [[ "$author" == *"[bot]" ]]; then
author_is_googler[$author]=1
return 1
fi
if [[ -v "author_is_googler[$author]" ]]; then
return ${author_is_googler[$author]}
fi
if gh api "orgs/googlers/members/$author" --silent 2>/dev/null; then
echo "🧑‍💻 $author is a Googler."
author_is_googler[$author]=0
else
echo "🌍 $author is a community contributor."
author_is_googler[$author]=1
fi
return ${author_is_googler[$author]}
}
googler_issues=0
non_googler_issues=0
googler_prs=0
non_googler_prs=0
echo "🔎 Fetching issues and pull requests..."
ITEMS_JSON=$(gh search issues --repo "$REPO" "created:>$START_DATE" --json author,isPullRequest --limit 1000)
for row in $(echo "${ITEMS_JSON}" | jq -r '.[] | @base64'); do
_jq() {
echo ${row} | base64 --decode | jq -r ${1}
}
author=$(_jq '.author.login')
is_pr=$(_jq '.isPullRequest')
if [[ -z "$author" || "$author" == "null" ]]; then
continue
fi
if check_googler_status "$author"; then
if [[ "$is_pr" == "true" ]]; then
((googler_prs++))
else
((googler_issues++))
fi
else
if [[ "$is_pr" == "true" ]]; then
((non_googler_prs++))
else
((non_googler_issues++))
fi
fi
done
googler_discussions=0
non_googler_discussions=0
echo "🗣️ Fetching discussions..."
DISCUSSION_QUERY='''
query($q: String!) {
search(query: $q, type: DISCUSSION, first: 100) {
nodes {
... on Discussion {
author {
login
}
}
}
}
}'''
DISCUSSIONS_JSON=$(gh api graphql -f q="repo:$REPO created:>$START_DATE" -f query="$DISCUSSION_QUERY")
for row in $(echo "${DISCUSSIONS_JSON}" | jq -r '.data.search.nodes[] | @base64'); do
_jq() {
echo ${row} | base64 --decode | jq -r ${1}
}
author=$(_jq '.author.login')
if [[ -z "$author" || "$author" == "null" ]]; then
continue
fi
if check_googler_status "$author"; then
((googler_discussions++))
else
((non_googler_discussions++))
fi
done
echo "✍️ Generating report content..."
REPORT_TITLE="Community Contribution Report: $START_DATE to $END_DATE"
TOTAL_ISSUES=$((googler_issues + non_googler_issues))
TOTAL_PRS=$((googler_prs + non_googler_prs))
TOTAL_DISCUSSIONS=$((googler_discussions + non_googler_discussions))
REPORT_BODY=$(cat <<EOF
### 💖 Community Contribution Report
**Period:** $START_DATE to $END_DATE
| Category | Googlers | Community | Total |
|---|---:|---:|---:|
| **Issues** | $googler_issues | $non_googler_issues | **$TOTAL_ISSUES** |
| **Pull Requests** | $googler_prs | $non_googler_prs | **$TOTAL_PRS** |
| **Discussions** | $googler_discussions | $non_googler_discussions | **$TOTAL_DISCUSSIONS** |
_This report was generated automatically by a GitHub Action._
EOF
)
echo "report_body<<EOF" >> $GITHUB_OUTPUT
echo "$REPORT_BODY" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
echo "📊 Community Contribution Report:"
echo "$REPORT_BODY"
- name: 🤖 Get Insights from Report
if: steps.report.outputs.report_body != ''
uses: google-gemini/gemini-cli-action@41c0f1b3cbd1a0b284251bd1aac034edd07a3a2f
env:
GITHUB_TOKEN: ${{ steps.generate_token.outputs.token }}
with:
version: 0.1.8-rc.0
GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
OTLP_GCP_WIF_PROVIDER: ${{ secrets.OTLP_GCP_WIF_PROVIDER }}
OTLP_GCP_SERVICE_ACCOUNT: ${{ secrets.OTLP_GCP_SERVICE_ACCOUNT }}
OTLP_GOOGLE_CLOUD_PROJECT: ${{ secrets.OTLP_GOOGLE_CLOUD_PROJECT }}
settings_json: |
{
"coreTools": [
"run_shell_command(gh issue list)",
"run_shell_command(gh pr list)",
"run_shell_command(gh search issues)",
"run_shell_command(gh search prs)"
]
}
prompt: |
You are a helpful assistant that analyzes community contribution reports.
Based on the following report, please provide a brief summary and highlight any interesting trends or potential areas for improvement.
Report:
${{ steps.report.outputs.report_body }}

View File

@@ -38,9 +38,9 @@ With the Gemini CLI you can:
You are now ready to use the Gemini CLI!
### For advanced use or increased limits:
### Use a Gemini API key:
If you need to use a specific model or require a higher request capacity, you can use an API key:
The Gemini API provides a free tier with [100 requets per day](https://ai.google.dev/gemini-api/docs/rate-limits#free-tier) using Gemini 2.5 Pro, control over which model you use, and access to higher rate limits (with a paid plan):
1. Generate a key from [Google AI Studio](https://aistudio.google.com/apikey).
2. Set it as an environment variable in your terminal. Replace `YOUR_API_KEY` with your generated key.
@@ -49,6 +49,8 @@ If you need to use a specific model or require a higher request capacity, you ca
export GEMINI_API_KEY="YOUR_API_KEY"
```
3. (Optionally) Upgrade your Gemini API project to a paid plan on the API key page (will automatically unlock [Tier 1 rate limits](https://ai.google.dev/gemini-api/docs/rate-limits#tier-1))
For other authentication methods, including Google Workspace accounts, see the [authentication](./docs/cli/authentication.md) guide.
## Examples

View File

@@ -10,7 +10,6 @@
},
"scripts": {
"build": "node ../../scripts/build_package.js",
"clean": "rm -rf dist",
"start": "node dist/index.js",
"debug": "node --inspect-brk dist/index.js",
"lint": "eslint . --ext .ts,.tsx",

View File

@@ -18,6 +18,7 @@ import {
LoadedSettings,
loadSettings,
SettingScope,
USER_SETTINGS_PATH,
} from './config/settings.js';
import { themeManager } from './ui/themes/theme-manager.js';
import { getStartupWarnings } from './utils/startupWarnings.js';
@@ -279,7 +280,7 @@ async function validateNonInterActiveAuth(
// still expect that exists
if (!selectedAuthType && !process.env.GEMINI_API_KEY) {
console.error(
'Please set an Auth method in your .gemini/settings.json OR specify GEMINI_API_KEY env variable file before running',
`Please set an Auth method in your ${USER_SETTINGS_PATH} OR specify GEMINI_API_KEY env variable file before running`,
);
process.exit(1);
}

View File

@@ -278,7 +278,10 @@ function visitBoxRow(element: React.ReactNode): Row {
// Allow the key prop, which is automatically added by React.
maxExpectedProps += 1;
}
if (boxProps.flexDirection !== 'row') {
if (
boxProps.flexDirection !== undefined &&
boxProps.flexDirection !== 'row'
) {
debugReportError(
'MaxSizedBox children must have flexDirection="row".',
element,

View File

@@ -7,7 +7,6 @@
"main": "dist/index.js",
"scripts": {
"build": "node ../../scripts/build_package.js",
"clean": "rm -rf dist",
"lint": "eslint . --ext .ts,.tsx",
"format": "prettier --write .",
"test": "vitest run",

View File

@@ -14,6 +14,33 @@ import fs from 'fs'; // Actual fs for setup
import os from 'os';
import { Config } from '../config/config.js';
vi.mock('mime-types', () => {
const lookup = (filename: string) => {
if (filename.endsWith('.ts') || filename.endsWith('.js')) {
return 'text/plain';
}
if (filename.endsWith('.png')) {
return 'image/png';
}
if (filename.endsWith('.pdf')) {
return 'application/pdf';
}
if (filename.endsWith('.mp3') || filename.endsWith('.wav')) {
return 'audio/mpeg';
}
if (filename.endsWith('.mp4') || filename.endsWith('.mov')) {
return 'video/mp4';
}
return false;
};
return {
default: {
lookup,
},
lookup,
};
});
describe('ReadManyFilesTool', () => {
let tool: ReadManyFilesTool;
let tempRootDir: string;

View File

@@ -211,6 +211,16 @@ describe('fileUtils', () => {
expect(detectFileType('file.pdf')).toBe('pdf');
});
it('should detect audio type by extension', () => {
mockMimeLookup.mockReturnValueOnce('audio/mpeg');
expect(detectFileType('song.mp3')).toBe('audio');
});
it('should detect video type by extension', () => {
mockMimeLookup.mockReturnValueOnce('video/mp4');
expect(detectFileType('movie.mp4')).toBe('video');
});
it('should detect known binary extensions as binary (e.g. .zip)', () => {
mockMimeLookup.mockReturnValueOnce('application/zip');
expect(detectFileType('archive.zip')).toBe('binary');
@@ -427,5 +437,23 @@ describe('fileUtils', () => {
);
expect(result.isTruncated).toBe(true);
});
it('should return an error if the file size exceeds 20MB', async () => {
// Create a file just over 20MB
const twentyOneMB = 21 * 1024 * 1024;
const buffer = Buffer.alloc(twentyOneMB, 0x61); // Fill with 'a'
actualNodeFs.writeFileSync(testTextFilePath, buffer);
const result = await processSingleFileContent(
testTextFilePath,
tempRootDir,
);
expect(result.error).toContain('File size exceeds the 20MB limit');
expect(result.returnDisplay).toContain(
'File size exceeds the 20MB limit',
);
expect(result.llmContent).toContain('File size exceeds the 20MB limit');
});
});
});

View File

@@ -94,19 +94,27 @@ export function isBinaryFile(filePath: string): boolean {
/**
* Detects the type of file based on extension and content.
* @param filePath Path to the file.
* @returns 'text', 'image', 'pdf', or 'binary'.
* @returns 'text', 'image', 'pdf', 'audio', 'video', or 'binary'.
*/
export function detectFileType(
filePath: string,
): 'text' | 'image' | 'pdf' | 'binary' {
): 'text' | 'image' | 'pdf' | 'audio' | 'video' | 'binary' {
const ext = path.extname(filePath).toLowerCase();
const lookedUpMimeType = mime.lookup(filePath); // Returns false if not found, or the mime type string
if (lookedUpMimeType && lookedUpMimeType.startsWith('image/')) {
return 'image';
}
if (lookedUpMimeType && lookedUpMimeType === 'application/pdf') {
return 'pdf';
if (lookedUpMimeType) {
if (lookedUpMimeType.startsWith('image/')) {
return 'image';
}
if (lookedUpMimeType.startsWith('audio/')) {
return 'audio';
}
if (lookedUpMimeType.startsWith('video/')) {
return 'video';
}
if (lookedUpMimeType === 'application/pdf') {
return 'pdf';
}
}
// Stricter binary check for common non-text extensions before content check
@@ -187,7 +195,7 @@ export async function processSingleFileContent(
error: `File not found: ${filePath}`,
};
}
const stats = fs.statSync(filePath); // Sync check
const stats = await fs.promises.stat(filePath);
if (stats.isDirectory()) {
return {
llmContent: '',
@@ -196,6 +204,19 @@ export async function processSingleFileContent(
};
}
const fileSizeInBytes = stats.size;
// 20MB limit
const maxFileSize = 20 * 1024 * 1024;
if (fileSizeInBytes > maxFileSize) {
throw new Error(
`File size exceeds the 20MB limit: ${filePath} (${(
fileSizeInBytes /
(1024 * 1024)
).toFixed(2)}MB)`,
);
}
const fileType = detectFileType(filePath);
const relativePathForDisplay = path
.relative(rootDirectory, filePath)
@@ -253,7 +274,9 @@ export async function processSingleFileContent(
};
}
case 'image':
case 'pdf': {
case 'pdf':
case 'audio':
case 'video': {
const contentBuffer = await fs.promises.readFile(filePath);
const base64Data = contentBuffer.toString('base64');
return {

View File

@@ -17,10 +17,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
import { execSync } from 'child_process';
import { rmSync } from 'fs';
import { rmSync, readFileSync } from 'fs';
import { dirname, join } from 'path';
import { fileURLToPath } from 'url';
import { globSync } from 'glob';
const __dirname = dirname(fileURLToPath(import.meta.url));
const root = join(__dirname, '..');
@@ -32,4 +32,16 @@ rmSync(join(root, 'packages/cli/src/generated/'), {
recursive: true,
force: true,
});
execSync('npm run clean --workspaces', { stdio: 'inherit', cwd: root });
const RMRF_OPTIONS = { recursive: true, force: true };
rmSync(join(root, 'bundle'), RMRF_OPTIONS);
// Dynamically clean dist directories in all workspaces
const rootPackageJson = JSON.parse(
readFileSync(join(root, 'package.json'), 'utf-8'),
);
for (const workspace of rootPackageJson.workspaces) {
const packages = globSync(join(workspace, 'package.json'), { cwd: root });
for (const pkgPath of packages) {
const pkgDir = dirname(join(root, pkgPath));
rmSync(join(pkgDir, 'dist'), RMRF_OPTIONS);
}
}