feat(workspaces): unify architecture and improve auth UX

This commit is contained in:
mkorwel
2026-03-18 21:43:07 -07:00
parent a1763a3e44
commit c4c5d4e956
4 changed files with 190 additions and 71 deletions

View File

@@ -39,48 +39,61 @@ export async function runOrchestrator(args: string[], env: NodeJS.ProcessEnv = p
await provider.ensureReady();
// Paths
const hostHome = `~`;
const hostWorkDir = `${hostHome}/dev/main`;
const hostWorkspaceRoot = `/mnt/disks/data`;
const hostWorkDir = `${hostWorkspaceRoot}/main`;
const containerHome = '/home/node';
const remotePolicyPath = `${containerHome}/.gemini/policies/workspace-policy.toml`;
const persistentScripts = `${containerHome}/.workspaces/scripts`;
const containerWorkspaceRoot = `/home/node/.workspaces`;
const remotePolicyPath = `${containerWorkspaceRoot}/policies/workspace-policy.toml`;
const persistentScripts = `${containerWorkspaceRoot}/scripts`;
const sessionName = `workspace-${prNumber}-${action}`;
const remoteWorktreeDir = `${containerHome}/dev/worktrees/${sessionName}`;
const hostWorktreeDir = `${hostHome}/dev/worktrees/${sessionName}`;
const remoteWorktreeDir = `${containerWorkspaceRoot}/worktrees/${sessionName}`;
const hostWorktreeDir = `${hostWorkspaceRoot}/worktrees/${sessionName}`;
// 3. Remote Context Setup (Executed on HOST for permission simplicity)
console.log(`🚀 Preparing remote environment for ${action} on #${prNumber}...`);
// FIX: Use the host path to check for existence
const check = await provider.getExecOutput(`ls -d ${hostWorktreeDir}/.git`);
// FIX: Ensure container user (node) owns the workspaces, config, and dev directories
// FIX: Ensure container user (node) owns the workspaces directories
// This resolves EACCES errors across all shared volumes.
await provider.exec(`sudo docker exec -u root maintainer-worker chown -R node:node ${containerHome}/.workspaces ${containerHome}/.gemini ${containerHome}/dev`);
console.log(' - Synchronizing container permissions...');
await provider.exec(`sudo chown -R 1000:1000 /mnt/disks/data`);
if (check.status !== 0) {
console.log(' - Provisioning isolated git worktree...');
// We run these on the host because the host user owns the ~/dev/main directory
// We run these on the host because the host user owns the data directory
const setupCmd = `
mkdir -p ${hostHome}/dev/worktrees && \
cd ${hostWorkDir} && \
git fetch upstream pull/${prNumber}/head && \
git worktree add -f ${hostWorktreeDir} FETCH_HEAD
sudo -u chronos git -C ${hostWorkDir} config --add safe.directory ${hostWorkDir} && \
sudo mkdir -p ${hostWorkspaceRoot}/worktrees && \
sudo chown chronos:chronos ${hostWorkspaceRoot}/worktrees && \
sudo -u chronos git -C ${hostWorkDir} fetch --quiet upstream pull/${prNumber}/head && \
sudo -u chronos git -C ${hostWorkDir} worktree add --quiet -f ${hostWorktreeDir} FETCH_HEAD 2>&1
`;
const setupRes = await provider.exec(setupCmd);
if (setupRes !== 0) {
const setupRes = await provider.getExecOutput(setupCmd);
if (setupRes.status !== 0) {
console.error(' ❌ Failed to provision remote worktree.');
console.error(' STDOUT:', setupRes.stdout);
console.error(' STDERR:', setupRes.stderr);
return 1;
}
console.log(' ✅ Worktree provisioned successfully.');
} else {
console.log(' ✅ Remote worktree ready.');
}
// 4. Execution Logic
const remoteWorker = `tsx ${persistentScripts}/entrypoint.ts ${prNumber} . ${remotePolicyPath} ${action}`;
const remoteConfigPath = `${hostWorkspaceRoot}/gemini-cli-config/.gemini/settings.json`;
// FIX: Dynamically retrieve the API key from the host-side config to inject it
const apiKeyRes = await provider.getExecOutput(`cat ${remoteConfigPath} | grep apiKey | cut -d '\"' -f 4`);
const remoteApiKey = apiKeyRes.stdout.trim();
// DEBUG: Run directly in foreground WITHOUT tmux to see immediate errors
const containerWrap = `sudo docker exec -it maintainer-worker sh -c 'cd ${remoteWorktreeDir} && ${remoteWorker}; exec $SHELL'`;
const containerWrap = `sudo docker exec -it ${remoteApiKey ? `-e GEMINI_API_KEY=${remoteApiKey}` : ''} maintainer-worker sh -c ${q(`cd ${remoteWorktreeDir} && ${remoteWorker}; exec $SHELL`)}`;
const finalSSH = provider.getRunCommand(containerWrap, { interactive: true });

View File

@@ -34,7 +34,10 @@ export class GceConnectionManager {
'-o', 'StrictHostKeyChecking=no',
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'LogLevel=ERROR',
'-o', 'ConnectTimeout=15',
'-o', 'ConnectTimeout=60',
'-o', 'ServerAliveInterval=30',
'-o', 'ServerAliveCountMax=3',
'-o', 'SendEnv=USER',
'-i', `${os.homedir()}/.ssh/google_compute_engine`
];
}

View File

@@ -59,29 +59,54 @@ export class GceCosProvider implements WorkspaceProvider {
'--project', this.projectId, '--network', vpcName, '--allow=tcp:22', '--source-ranges=0.0.0.0/0'], { stdio: 'inherit' });
}
console.log(`🚀 Provisioning GCE COS worker: ${this.instanceName}...`);
console.log(`🚀 Provisioning GCE COS worker: ${this.instanceName} (Unified Workspace Setup)...`);
const startupScriptContent = `#!/bin/bash
set -e
echo "🚀 Starting Maintainer Worker Resilience Loop..."
echo "🚀 Initializing Unified Workspace..."
# Wait for Docker to be ready
# 1. Mount Data Disk
mkdir -p /mnt/disks/data
if ! mountpoint -q /mnt/disks/data; then
DATA_DISK="/dev/disk/by-id/google-data"
[ -e "$DATA_DISK" ] || DATA_DISK="/dev/sdb"
while [ ! -e "$DATA_DISK" ]; do echo "Waiting for data disk..."; sleep 1; done
blkid "$DATA_DISK" || mkfs.ext4 -m 0 -F "$DATA_DISK"
mount -o discard,defaults "$DATA_DISK" /mnt/disks/data
fi
# 2. Prepare Stateful Directories (on the persistent disk)
mkdir -p /mnt/disks/data/main /mnt/disks/data/worktrees /mnt/disks/data/scripts /mnt/disks/data/config /mnt/disks/data/policies
chmod -R 777 /mnt/disks/data
# 3. Handle Global Persistence Symlink
# We create a global /workspaces link that points to the data disk.
# This avoids dependencies on ephemeral home directories.
ln -sfn /mnt/disks/data /workspaces
chmod 777 /workspaces
# Ensure host users can find it via their home too (if directory exists)
for h in /home/*_google_com; do
[ -d "$h" ] || continue
ln -sfn /mnt/disks/data "$h/.workspaces"
chown -h $(basename $h):$(basename $h) "$h/.workspaces"
done
# 4. Container Resilience Loop
until docker info >/dev/null 2>&1; do echo "Waiting for docker..."; sleep 2; done
# Pull with retries
for i in {1..5}; do
docker pull ${imageUri} && break || (echo "Pull failed, retry $i..." && sleep 5)
done
# Run if not already exists
if ! docker ps -a | grep -q "maintainer-worker"; then
docker run -d --name maintainer-worker --restart always \\
-v ~/.workspaces:/home/node/.workspaces:rw \\
-v ~/dev:/home/node/dev:rw \\
-v ~/.gemini:/home/node/.gemini:rw \\
-v /mnt/disks/data:/home/node/.workspaces:rw \\
-v /mnt/disks/data/gemini-cli-config/.gemini:/home/node/.gemini:rw \\
${imageUri} /bin/bash -c "while true; do sleep 1000; done"
fi
echo "✅ Maintainer Worker is active."
echo "✅ Unified Workspace is active."
`;
const tmpScriptPath = path.join(os.tmpdir(), `gcli-startup-${Date.now()}.sh`);
@@ -92,7 +117,11 @@ export class GceCosProvider implements WorkspaceProvider {
'--project', this.projectId,
'--zone', this.zone,
'--machine-type', 'n2-standard-8',
'--create-disk', `auto-delete=yes,boot=yes,device-name=${this.instanceName},image=projects/cos-cloud/global/images/family/cos-stable,mode=rw,size=200,type=projects/${this.projectId}/zones/${this.zone}/diskTypes/pd-balanced`,
'--image-family', 'cos-stable',
'--image-project', 'cos-cloud',
'--boot-disk-size', '10GB',
'--boot-disk-type', 'pd-balanced',
'--create-disk', `name=${this.instanceName}-data,size=200,type=pd-balanced,device-name=data,auto-delete=yes`,
'--metadata-from-file', `startup-script=${tmpScriptPath}`,
'--metadata', 'enable-oslogin=TRUE',
'--network-interface', `network=${vpcName},subnet=${subnetName},no-address`,
@@ -132,13 +161,19 @@ export class GceCosProvider implements WorkspaceProvider {
let needsUpdate = false;
if (containerCheck.status === 0 && containerCheck.stdout.trim()) {
// Check if the running image is stale
const imageUri = 'us-docker.pkg.dev/gemini-code-dev/gemini-cli/maintainer:latest';
// For simplicity in this environment, we'll just check if tmux is missing as a proxy for "stale image"
const tmuxCheck = await this.getExecOutput('sudo docker exec maintainer-worker which tmux');
if (tmuxCheck.status !== 0) {
console.log(' ⚠️ Remote container is stale (missing tmux). Triggering update...');
// Check if the volume mounts are correct by checking for files inside .workspaces/main
const mountCheck = await this.getExecOutput('sudo docker exec maintainer-worker ls -A /home/node/.workspaces/main');
if (mountCheck.status !== 0 || !mountCheck.stdout.trim()) {
console.log(' ⚠️ Remote container has incorrect or empty mounts. Triggering refresh...');
needsUpdate = true;
} else {
// Check if the running image is stale
const imageUri = 'us-docker.pkg.dev/gemini-code-dev/gemini-cli/maintainer:latest';
const tmuxCheck = await this.getExecOutput('sudo docker exec maintainer-worker which tmux');
if (tmuxCheck.status !== 0) {
console.log(' ⚠️ Remote container is stale (missing tmux). Triggering update...');
needsUpdate = true;
}
}
} else {
needsUpdate = true;
@@ -147,7 +182,16 @@ export class GceCosProvider implements WorkspaceProvider {
if (needsUpdate) {
console.log(' ⚠️ Container missing or stale. Attempting refresh...');
const imageUri = 'us-docker.pkg.dev/gemini-code-dev/gemini-cli/maintainer:latest';
const recoverCmd = `sudo docker pull ${imageUri} && (sudo docker rm -f maintainer-worker || true) && sudo docker run -d --name maintainer-worker --restart always -v ~/.workspaces:/home/node/.workspaces:rw -v ~/dev:/home/node/dev:rw -v ~/.gemini:/home/node/.gemini:rw ${imageUri} /bin/bash -c "while true; do sleep 1000; done"`;
// Ensure data mount is available before running
const recoverCmd = `
(mountpoint -q /mnt/disks/data || sudo mount /dev/disk/by-id/google-data /mnt/disks/data) && \
sudo docker pull ${imageUri} && \
(sudo docker rm -f maintainer-worker || true) && \
sudo docker run -d --name maintainer-worker --restart always \
-v /mnt/disks/data:/home/node/.workspaces:rw \
-v /mnt/disks/data/gemini-cli-config/.gemini:/home/node/.gemini:rw \
${imageUri} /bin/bash -c "while true; do sleep 1000; done"
`;
const recoverRes = await this.exec(recoverCmd);
if (recoverRes !== 0) {
console.error(' ❌ Critical: Failed to refresh maintainer container.');
@@ -172,7 +216,8 @@ Host ${this.sshAlias}
UserKnownHostsFile /dev/null
CheckHostIP no
StrictHostKeyChecking no
ConnectTimeout 15
ConnectTimeout 60
ServerAliveInterval 30
`;
fs.writeFileSync(this.sshConfigPath, sshEntry);
@@ -184,7 +229,8 @@ Host ${this.sshAlias}
console.error('\n❌ All connection attempts failed. Please ensure you have "gcert" and IAP permissions.');
return 1;
}
console.log(' ✅ Connection verified.');
console.log(' ✅ Connection verified. Waiting 10s for remote disk initialization...');
await new Promise(r => setTimeout(r, 10000));
return 0;
}
@@ -204,7 +250,7 @@ Host ${this.sshAlias}
async getExecOutput(command: string, options: ExecOptions = {}): Promise<{ status: number; stdout: string; stderr: string }> {
let finalCmd = command;
if (options.wrapContainer) {
finalCmd = `docker exec ${options.interactive ? '-it' : ''} ${options.cwd ? `-w ${options.cwd}` : ''} ${options.wrapContainer} sh -c ${this.quote(command)}`;
finalCmd = `sudo docker exec ${options.interactive ? '-it' : ''} ${options.cwd ? `-w ${options.cwd}` : ''} ${options.wrapContainer} sh -c ${this.quote(command)}`;
}
return this.conn.run(finalCmd, { interactive: options.interactive, stdio: options.interactive ? 'inherit' : 'pipe' });

View File

@@ -9,6 +9,33 @@ import { ProviderFactory } from './providers/ProviderFactory.ts';
const __dirname = path.dirname(fileURLToPath(import.meta.url));
const REPO_ROOT = path.resolve(__dirname, '../../../..');
/**
* Loads and parses a local .env file from the repository root and the home directory.
*/
function loadDotEnv() {
const envPaths = [
path.join(REPO_ROOT, '.env'),
path.join(os.homedir(), '.env')
];
envPaths.forEach(envPath => {
if (fs.existsSync(envPath)) {
const content = fs.readFileSync(envPath, 'utf8');
content.split('\n').forEach(line => {
const trimmed = line.trim();
if (!trimmed || trimmed.startsWith('#')) return;
const match = trimmed.match(/^([^=]+)=(.*)$/);
if (match) {
const key = match[1].trim();
const val = match[2].trim().replace(/^["'](.*)["']$/, '$1');
if (!process.env[key]) process.env[key] = val;
}
});
}
});
}
async function prompt(question: string, defaultValue: string, explanation?: string, sensitive: boolean = false): Promise<string> {
const autoAccept = process.argv.includes('--yes') || process.argv.includes('-y');
if (autoAccept && defaultValue) return defaultValue;
@@ -19,8 +46,13 @@ async function prompt(question: string, defaultValue: string, explanation?: stri
const displayDefault = sensitive && defaultValue ? `${defaultValue.substring(0, 4)}...${defaultValue.substring(defaultValue.length - 4)}` : defaultValue;
const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
const promptMsg = defaultValue
? `${question} [Detected: ${displayDefault}] (Press <Enter> to keep, or type new value): `
: `${question} (<Enter> for none): `;
return new Promise((resolve) => {
rl.question(`${question} (default: ${displayDefault || 'none'}, <Enter> to use default): `, (answer) => {
rl.question(promptMsg, (answer) => {
rl.close();
resolve(answer.trim() || defaultValue);
});
@@ -52,6 +84,8 @@ async function createFork(upstream: string): Promise<string> {
}
export async function runSetup(env: NodeJS.ProcessEnv = process.env) {
loadDotEnv();
console.log(`
================================================================================
🚀 GEMINI WORKSPACES: HIGH-PERFORMANCE REMOTE DEVELOPMENT
@@ -154,30 +188,32 @@ and full builds) to a dedicated, high-performance GCP worker.
// 3. Security & Auth (Always check for token if init is needed)
let githubToken = env.WORKSPACE_GH_TOKEN || '';
if (!githubToken && !skipConfig) {
const hasToken = await confirm('\nDo you already have a GitHub Personal Access Token (PAT) with "Read/Write" access to contents & PRs?');
if (hasToken) {
githubToken = await prompt('Paste Scoped Token', '');
} else {
const shouldGenToken = await confirm('Would you like to generate a new scoped token now? (Highly Recommended)');
if (shouldGenToken) {
const baseUrl = 'https://github.com/settings/personal-access-tokens/new';
const name = `Workspace-${env.USER}`;
const repoParams = userFork !== upstreamRepo
? `&repositories[]=${encodeURIComponent(upstreamRepo)}&repositories[]=${encodeURIComponent(userFork)}`
: `&repositories[]=${encodeURIComponent(upstreamRepo)}`;
const magicLink = `${baseUrl}?name=${encodeURIComponent(name)}&description=Gemini+Workspaces+Worker${repoParams}&contents=write&pull_requests=write&metadata=read`;
const terminalLink = `\u001b]8;;${magicLink}\u0007${magicLink}\u001b]8;;\u0007`;
console.log(`\n🔐 ACTION REQUIRED: Create a token with the required permissions:`);
console.log(`\n${terminalLink}\n`);
if (!skipConfig) {
if (!githubToken) {
const hasToken = await confirm('\nDo you already have a GitHub Personal Access Token (PAT) with "Read/Write" access to contents & PRs?');
if (hasToken) {
githubToken = await prompt('Paste Scoped Token', '');
} else {
const shouldGenToken = await confirm('Would you like to generate a new scoped token now? (Highly Recommended)');
if (shouldGenToken) {
const baseUrl = 'https://github.com/settings/personal-access-tokens/new';
const name = `Workspace-${env.USER}`;
const repoParams = userFork !== upstreamRepo
? `&repositories[]=${encodeURIComponent(upstreamRepo)}&repositories[]=${encodeURIComponent(userFork)}`
: `&repositories[]=${encodeURIComponent(upstreamRepo)}`;
const magicLink = `${baseUrl}?name=${encodeURIComponent(name)}&description=Gemini+Workspaces+Worker${repoParams}&contents=write&pull_requests=write&metadata=read`;
const terminalLink = `\u001b]8;;${magicLink}\u0007${magicLink}\u001b]8;;\u0007`;
console.log(`\n🔐 ACTION REQUIRED: Create a token with the required permissions:`);
console.log(`\n${terminalLink}\n`);
githubToken = await prompt('Paste Scoped Token', '');
}
}
} else {
githubToken = await prompt('GitHub Token', githubToken, 'A GitHub PAT is required for remote repository access and PR operations.', true);
}
} else if (githubToken) {
console.log(' ✅ Using GitHub token from environment (WORKSPACE_GH_TOKEN).');
}
// 4. Gemini API Auth Strategy
@@ -190,13 +226,23 @@ and full builds) to a dedicated, high-performance GCP worker.
try {
const localSettings = JSON.parse(fs.readFileSync(localSettingsPath, 'utf8'));
authStrategy = localSettings.security?.auth?.selectedType || 'google_accounts';
if (!geminiApiKey && localSettings.security?.auth?.apiKey) {
geminiApiKey = localSettings.security.auth.apiKey;
}
console.log(` - Local Auth Method: ${authStrategy}`);
} catch (e) {}
}
if (authStrategy === 'gemini-api-key' && !geminiApiKey) {
console.log('\n📖 In API Key mode, the remote worker needs your Gemini API Key to authenticate.');
geminiApiKey = await prompt('Paste Gemini API Key', '', undefined, true);
if (authStrategy === 'gemini-api-key') {
if (geminiApiKey) {
console.log('\n🔐 Found Gemini API Key in environment or settings.');
geminiApiKey = await prompt('Gemini API Key', geminiApiKey, 'Enter to use? Or paste a new one', true);
} else {
console.log('\n📖 In API Key mode, the remote worker needs your Gemini API Key to authenticate.');
geminiApiKey = await prompt('Gemini API Key', '', 'Paste your Gemini API Key', true);
}
} else {
console.log(` - Using current auth strategy: ${authStrategy}`);
}
// 5. Save Confirmed State
@@ -242,31 +288,42 @@ and full builds) to a dedicated, high-performance GCP worker.
const setupRes = await provider.setup({ projectId, zone, dnsSuffix: '.internal.gcpnode.com' });
if (setupRes !== 0) return setupRes;
const persistentScripts = `~/.workspaces/scripts`;
const remoteConfigDir = `~/.workspaces/gemini-cli-config/.gemini`;
// Use the direct mount path to avoid symlink race conditions
const workspaceRoot = `/mnt/disks/data`;
const persistentScripts = `${workspaceRoot}/scripts`;
const remoteConfigDir = `${workspaceRoot}/gemini-cli-config/.gemini`;
console.log(`\n📦 Synchronizing Logic & Credentials...`);
await provider.exec(`mkdir -p ~/dev/main ~/.gemini/policies ~/.workspaces/scripts ${remoteConfigDir}`);
// Ensure the directory structure exists on the host
await provider.exec(`sudo mkdir -p ${workspaceRoot}/main ${workspaceRoot}/worktrees ${workspaceRoot}/policies ${workspaceRoot}/scripts ${remoteConfigDir}`);
await provider.exec(`sudo chown -R $(whoami):$(whoami) ${workspaceRoot}`);
await provider.exec(`sudo chmod -R 777 ${workspaceRoot}`);
// 1. Sync Scripts & Policies
await provider.sync('.gemini/skills/workspaces/scripts/', `${persistentScripts}/`, { delete: true, sudo: true });
await provider.sync('.gemini/skills/workspaces/policy.toml', `~/.gemini/policies/workspace-policy.toml`, { sudo: true });
await provider.sync('.gemini/skills/workspaces/policy.toml', `${workspaceRoot}/policies/workspace-policy.toml`, { sudo: true });
// 2. Initialize Remote Gemini Config with Auth
console.log('⚙️ Initializing remote Gemini configuration...');
const remoteSettings: any = {
general: {
authMethod: authStrategy
security: {
auth: {
selectedType: authStrategy
}
}
};
if (authStrategy === 'gemini-api-key' && geminiApiKey) {
remoteSettings.general.apiKey = geminiApiKey;
remoteSettings.security.auth.apiKey = geminiApiKey;
console.log(' ✅ Configuring remote for API Key authentication.');
}
const tmpSettingsPath = path.join(os.tmpdir(), `remote-settings-${Date.now()}.json`);
fs.writeFileSync(tmpSettingsPath, JSON.stringify(remoteSettings, null, 2));
// Ensure the remote config dir exists before syncing
await provider.exec(`sudo mkdir -p ${remoteConfigDir} && sudo chmod 777 ${remoteConfigDir}`);
await provider.sync(tmpSettingsPath, `${remoteConfigDir}/settings.json`, { sudo: true });
fs.unlinkSync(tmpSettingsPath);
@@ -279,13 +336,13 @@ and full builds) to a dedicated, high-performance GCP worker.
}
if (githubToken) {
await provider.exec(`mkdir -p ~/.workspaces && echo ${githubToken} > ~/.workspaces/.gh_token && chmod 600 ~/.workspaces/.gh_token`);
await provider.exec(`echo ${githubToken} | sudo tee ${workspaceRoot}/.gh_token > /dev/null && sudo chmod 600 ${workspaceRoot}/.gh_token`);
}
// Final Repo Sync
console.log(`🚀 Finalizing Remote Repository (${userFork})...`);
const repoUrl = `https://github.com/${userFork}.git`;
const cloneCmd = `rm -rf ~/dev/main && git clone --filter=blob:none ${repoUrl} ~/dev/main && cd ~/dev/main && git remote add upstream https://github.com/${upstreamRepo}.git && git fetch upstream`;
const cloneCmd = `sudo rm -rf ${workspaceRoot}/main && sudo git clone --quiet --filter=blob:none ${repoUrl} ${workspaceRoot}/main && cd ${workspaceRoot}/main && sudo git remote add upstream https://github.com/${upstreamRepo}.git && sudo git fetch --quiet upstream && sudo chown -R $(whoami):$(whoami) ${workspaceRoot}`;
await provider.exec(cloneCmd);
console.log('\n✨ ALL SYSTEMS GO! Your Gemini Workspace is ready.');