| import transformers |
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| import torch.optim as optim |
| from torch.utils.data import DataLoader |
| import numpy as np |
| import matplotlib.pyplot as plt |
| import json |
| from tqdm.auto import tqdm |
| import random |
| from scipy.signal import savgol_filter |
| import wandb |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
| import os |
| from itertools import product |
| import pandas as pd |
| import multiprocessing as mp |
| from functools import partial |
| import logging |
|
|
| logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
| logger = logging.getLogger(__name__) |
|
|
| class Config: |
| def __init__(self): |
| self.model_name = "Qwen/Qwen2-0.5B" |
| self.data_dir = "dataset_chunks" |
| self.max_length = 1024 |
| self.batch_size = 32 |
| self.num_seeds = 10000 |
| self.num_lr_steps = 10000 |
| self.min_lr = 1e-8 |
| self.max_lr = 10 |
| self.hidden_dim_ratio = 0.5 |
| self.dropout = 0.1 |
| self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| self.num_workers = mp.cpu_count() |
|
|
| class ImprovedAutoencoder(nn.Module): |
| def __init__(self, input_dim, hidden_dim, num_layers, dropout): |
| super().__init__() |
| self.encoder = nn.ModuleList([ |
| nn.Linear(input_dim if i == 0 else hidden_dim, hidden_dim, dtype=torch.bfloat16) |
| for i in range(num_layers) |
| ]) |
| self.decoder = nn.ModuleList([ |
| nn.Linear(hidden_dim, hidden_dim if i < num_layers - 1 else input_dim, dtype=torch.bfloat16) |
| for i in range(num_layers) |
| ]) |
| self.layer_norms = nn.ModuleList([ |
| nn.LayerNorm(hidden_dim, dtype=torch.bfloat16) |
| for _ in range(num_layers * 2 - 1) |
| ]) |
| self.dropout = nn.Dropout(dropout) |
|
|
| def forward(self, x): |
| for enc, norm in zip(self.encoder, self.layer_norms[:len(self.encoder)]): |
| x = F.relu(norm(enc(x))) |
| x = self.dropout(x) |
| for dec, norm in zip(self.decoder[:-1], self.layer_norms[len(self.encoder):]): |
| x = F.relu(norm(dec(x))) |
| x = self.dropout(x) |
| x = self.decoder[-1](x) |
| return x |
|
|
| class TokenizedDataset(torch.utils.data.Dataset): |
| def __init__(self, file_paths): |
| self.data = [] |
| for file_path in tqdm(file_paths, desc="Loading data chunks"): |
| chunk_data = torch.load(file_path) |
| logger.info(f"Loaded data from {file_path}") |
| logger.info(f"Type of loaded data: {type(chunk_data)}") |
|
|
| if isinstance(chunk_data, dict): |
| logger.info(f"Keys in the dictionary: {chunk_data.keys()}") |
| logger.info(f"Shape of input_ids: {chunk_data['input_ids'].shape}") |
| self.data.append(chunk_data) |
| elif isinstance(chunk_data, transformers.tokenization_utils_base.BatchEncoding): |
| logger.info(f"Keys in the BatchEncoding: {chunk_data.keys()}") |
| logger.info(f"Shape of input_ids: {chunk_data['input_ids'].shape}") |
| self.data.append(chunk_data) |
| else: |
| logger.warning(f"Unexpected data type: {type(chunk_data)}") |
|
|
| logger.info(f"Loaded {len(self.data)} chunks of data") |
|
|
| def __len__(self): |
| return sum(len(chunk['input_ids']) for chunk in self.data) |
|
|
| def __getitem__(self, idx): |
| for chunk in self.data: |
| if idx < len(chunk['input_ids']): |
| return {k: v[idx] for k, v in chunk.items()} |
| idx -= len(chunk['input_ids']) |
| raise IndexError("Index out of range") |
|
|
| def set_seed(seed): |
| random.seed(seed) |
| np.random.seed(seed) |
| torch.manual_seed(seed) |
| torch.cuda.manual_seed_all(seed) |
|
|
| def load_data(config): |
| logger.info(f"Looking for data in directory: {config.data_dir}") |
| chunk_files = [f for f in os.listdir(config.data_dir) if f.endswith('_tokenized.pt')] |
| logger.info(f"Found {len(chunk_files)} chunk files: {chunk_files}") |
| |
| if not chunk_files: |
| raise ValueError(f"No tokenized data files found in {config.data_dir}") |
| |
| chunk_files.sort(key=lambda x: int(x.split('_')[1])) |
| chunk_files = [os.path.join(config.data_dir, f) for f in chunk_files] |
| |
| dataset = TokenizedDataset(chunk_files[:1]) |
| logger.info(f"Created dataset with {len(dataset)} samples") |
| |
| return DataLoader(dataset, batch_size=config.batch_size, shuffle=True, num_workers=config.num_workers) |
|
|
| def extract_hidden_states(batch, model): |
| with torch.no_grad(): |
| outputs = model(**batch, output_hidden_states=True) |
| return outputs.hidden_states[0], outputs.hidden_states[-1] |
|
|
| class KLDivergenceLoss(nn.Module): |
| def forward(self, pred, target): |
| pred = F.log_softmax(pred, dim=-1) |
| target = F.softmax(target, dim=-1) |
| return F.kl_div(pred, target, reduction='batchmean', log_target=False) |
|
|
| def lr_finder(model, autoencoder, loss_fn, optimizer, train_loader, config): |
| model.eval() |
| autoencoder.train() |
| log_lrs, losses = [], [] |
| best_loss, best_lr = float('inf'), None |
|
|
| pbar = tqdm(total=config.num_lr_steps, desc="LR Finder") |
| for batch_idx, batch in enumerate(train_loader): |
| if batch_idx >= config.num_lr_steps: |
| break |
|
|
| lr = config.min_lr * (config.max_lr / config.min_lr) ** (batch_idx / (config.num_lr_steps - 1)) |
| optimizer.param_groups[0]['lr'] = lr |
|
|
| batch = {k: v.to(config.device) for k, v in batch.items()} |
| first_states, last_states = extract_hidden_states(batch, model) |
|
|
| optimizer.zero_grad() |
| reconstructed = autoencoder(first_states) |
| loss = loss_fn(reconstructed, last_states) |
| loss.backward() |
| optimizer.step() |
|
|
| if loss < best_loss: |
| best_loss = loss.item() |
| best_lr = lr |
|
|
| log_lrs.append(lr) |
| losses.append(loss.item()) |
|
|
| pbar.update(1) |
| pbar.set_postfix({"Loss": f"{loss.item():.4f}", "LR": f"{lr:.2e}"}) |
|
|
| pbar.close() |
| return log_lrs, losses, best_lr, best_loss |
|
|
| def run_experiment(config, model, train_loader, num_layers, seed): |
| set_seed(seed) |
| input_dim = model.config.hidden_size |
| hidden_dim = int(input_dim * config.hidden_dim_ratio) |
| autoencoder = ImprovedAutoencoder(input_dim, hidden_dim, num_layers, config.dropout).to(config.device) |
|
|
| loss_fn = KLDivergenceLoss() |
| optimizer = optim.AdamW(autoencoder.parameters(), lr=config.min_lr) |
|
|
| log_lrs, losses = [], [] |
| best_loss, best_lr = float('inf'), None |
|
|
| pbar = tqdm(total=config.num_lr_steps, desc=f"LR Finder (Layers: {num_layers}, Seed: {seed})") |
| for batch_idx, batch in enumerate(train_loader): |
| if batch_idx >= config.num_lr_steps: |
| break |
|
|
| lr = config.min_lr * (config.max_lr / config.min_lr) ** (batch_idx / (config.num_lr_steps - 1)) |
| optimizer.param_groups[0]['lr'] = lr |
|
|
| batch = {k: v.to(config.device) for k, v in batch.items()} |
| first_states, last_states = extract_hidden_states(batch, model) |
|
|
| optimizer.zero_grad() |
| reconstructed = autoencoder(first_states) |
| loss = loss_fn(reconstructed, last_states) |
| loss.backward() |
| optimizer.step() |
|
|
| if loss < best_loss: |
| best_loss = loss.item() |
| best_lr = lr |
|
|
| log_lrs.append(lr) |
| losses.append(loss.item()) |
|
|
| |
| wandb.log({ |
| "loss": loss.item(), |
| "lr": lr, |
| "batch_idx": batch_idx, |
| "num_layers": num_layers, |
| "seed": seed, |
| "best_loss": best_loss, |
| "best_lr": best_lr |
| }) |
|
|
| pbar.update(1) |
| pbar.set_postfix({"Loss": f"{loss.item():.4f}", "LR": f"{lr:.2e}"}) |
|
|
| pbar.close() |
|
|
| result = { |
| 'seed': seed, |
| 'num_layers': num_layers, |
| 'hidden_dim_ratio': config.hidden_dim_ratio, |
| 'dropout': config.dropout, |
| 'final_loss': losses[-1], |
| 'final_lr': log_lrs[-1], |
| 'best_lr': best_lr, |
| 'best_loss': best_loss |
| } |
|
|
| logger.info(f"Experiment completed: {result}") |
| return result |
|
|
| def main(): |
| config = Config() |
| wandb.init(project="qwen-autoencoder-lr-finder", config=config.__dict__) |
|
|
| logger.info("Loading Qwen model and tokenizer...") |
| model = AutoModelForCausalLM.from_pretrained(config.model_name, torch_dtype=torch.bfloat16).to(config.device) |
| tokenizer = AutoTokenizer.from_pretrained(config.model_name) |
|
|
| logger.info("Loading data...") |
| train_loader = load_data(config) |
|
|
| logger.info("Starting experiments...") |
| results = [] |
| for num_layers in range(4, 9): |
| for seed in range(1, config.num_seeds + 1): |
| |
| with wandb.init(project="qwen-autoencoder-lr-finder", |
| config=config.__dict__, |
| group=f"layers_{num_layers}", |
| name=f"seed_{seed}", |
| job_type="experiment", |
| reinit=True): |
| |
| result = run_experiment(config, model, train_loader, num_layers, seed) |
| results.append(result) |
| |
| |
| with open('lr_finder_results.jsonl', 'a') as f: |
| json.dump(result, f) |
| f.write('\n') |
| |
| |
| wandb.log(result) |
|
|
| logger.info("Creating visualizations...") |
| plot_results(results) |
| create_heatmap(results) |
| create_parallel_coordinates_plot(results) |
| create_3d_scatter(results) |
|
|
| logger.info("Experiment completed. Check WandB for detailed results and visualizations.") |
|
|
| if __name__ == "__main__": |
| main() |
| def run_experiments_sequential(config, model, train_loader): |
| results = [] |
| for num_layers in tqdm(range(4, 9), desc="Number of Layers"): |
| for seed in tqdm(range(1, config.num_seeds + 1), desc="Seeds", leave=False): |
| result = run_experiment(config, model, train_loader, num_layers, seed) |
| results.append(result) |
| return results |
|
|
| def plot_results(results): |
| fig, axs = plt.subplots(3, 2, figsize=(20, 30)) |
| fig.suptitle('Learning Rate Finder Results') |
|
|
| for i, num_layers in enumerate(range(4, 9)): |
| layer_results = [r for r in results if r['num_layers'] == num_layers] |
| best_lrs = [r['Best'] for r in layer_results] |
| best_losses = [r['best_loss'] for r in layer_results] |
|
|
| axs[i // 2, i % 2].scatter(best_lrs, best_losses, alpha=0.5) |
| axs[i // 2, i % 2].set_xlabel('Best Learning Rate') |
| axs[i // 2, i % 2].set_ylabel('Best Loss') |
| axs[i // 2, i % 2].set_title(f'{num_layers} Layers') |
| axs[i // 2, i % 2].set_xscale('log') |
| axs[i // 2, i % 2].set_yscale('log') |
|
|
| plt.tight_layout() |
| wandb.log({"lr_loss_relationships": wandb.Image(plt)}) |
| plt.close() |
|
|
| def create_heatmap(results): |
| layer_counts = len(set(r['num_layers'] for r in results)) |
| seed_counts = len(set(r['seed'] for r in results)) |
| |
| heatmap_data = np.zeros((layer_counts, seed_counts)) |
| for r in results: |
| layer_idx = r['num_layers'] - 4 |
| seed_idx = r['seed'] - 1 |
| heatmap_data[layer_idx, seed_idx] = r['best_loss'] |
| |
| plt.figure(figsize=(20, 10)) |
| plt.imshow(heatmap_data, aspect='auto', cmap='viridis') |
| plt.colorbar(label='Best Loss') |
| plt.xlabel('Seed') |
| plt.ylabel('Number of Layers') |
| plt.title('Heatmap of Best Loss across Layers and Seeds') |
| plt.tight_layout() |
| wandb.log({"loss_heatmap": wandb.Image(plt)}) |
| plt.close() |
|
|
| def create_parallel_coordinates_plot(results): |
| df = pd.DataFrame(results) |
| |
| plt.figure(figsize=(20, 10)) |
| pd.plotting.parallel_coordinates(df, 'num_layers', colormap='viridis') |
| plt.title('Parallel Coordinates Plot of Hyperparameters') |
| plt.tight_layout() |
| wandb.log({"parallel_coordinates": wandb.Image(plt)}) |
| plt.close() |
|
|
| def create_3d_scatter(results): |
| fig = plt.figure(figsize=(15, 15)) |
| ax = fig.add_subplot(111, projection='3d') |
| |
| for num_layers in range(4, 9): |
| layer_results = [r for r in results if r['num_layers'] == num_layers] |
| x = [r['Best'] for r in layer_results] |
| y = [r['best_loss'] for r in layer_results] |
| z = [r['seed'] for r in layer_results] |
| ax.scatter(x, y, z, label=f'{num_layers} Layers') |
| |
| ax.set_xlabel('Best Learning Rate') |
| ax.set_ylabel('Best Loss') |
| ax.set_zlabel('Seed') |
| ax.set_xscale('log') |
| ax.set_yscale('log') |
| ax.legend() |
| plt.title('3D Scatter Plot of Best LR, Loss, and Seed') |
| plt.tight_layout() |
| wandb.log({"3d_scatter": wandb.Image(plt)}) |
| plt.close() |
|
|
| def main(): |
| mp.set_start_method('spawn') |
| config = Config() |
| wandb.init(project="qwen-autoencoder-lr-finder", config=config.__dict__) |
|
|
| logger.info("Loading Qwen model and tokenizer...") |
| model = AutoModelForCausalLM.from_pretrained(config.model_name, torch_dtype=torch.bfloat16).to(config.device) |
| tokenizer = AutoTokenizer.from_pretrained(config.model_name) |
|
|
| logger.info("Loading data...") |
| train_loader = load_data(config) |
|
|
| logger.info("Starting experiments...") |
| results = run_experiments_sequential(config, model, train_loader) |
|
|
| logger.info("Saving results...") |
| with open('lr_finder_results.jsonl', 'w') as f: |
| for result in results: |
| json.dump(result, f) |
| f.write('\n') |
|
|
| logger.info("Creating visualizations...") |
| plot_results(results) |
| create_heatmap(results) |
| create_parallel_coordinates_plot(results) |
| create_3d_scatter(results) |
|
|
| logger.info("Experiment completed. Check WandB for detailed results and visualizations.") |
| wandb.finish() |
|
|
| if __name__ == "__main__": |
| main() |
|
|