embedding LR 0.6 to 0.8

This commit is contained in:
autoresearch
2026-03-08 04:19:15 +00:00
parent 59e9dd9aab
commit 7d047e42f4
+1 -1
View File
@@ -435,7 +435,7 @@ WINDOW_PATTERN = "SSSSL" # sliding window pattern: L=full, S=half context
# Optimization
TOTAL_BATCH_SIZE = 2**18 # ~262K tokens per optimizer step
EMBEDDING_LR = 0.6 # learning rate for token embeddings (Adam)
EMBEDDING_LR = 0.8 # learning rate for token embeddings (Adam)
UNEMBEDDING_LR = 0.004 # learning rate for lm_head (Adam)
MATRIX_LR = 0.04 # learning rate for matrix parameters (Muon)
SCALAR_LR = 0.5 # learning rate for per-layer scalars (Adam)