UOMOP

CBS (odd, odd) masking 본문

Main

CBS (odd, odd) masking

Happy PinGu 2024. 4. 18. 11:14
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
import torch.nn.functional as F
import math
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as f
from torch.utils.data import DataLoader, Dataset
import time
from params import *
import os
from tqdm import tqdm
import numpy as np
import cv2

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#print(device)
def count_parameters(model):
    return sum(p.numel() for p in model.parameters() if p.requires_grad)


def patch_std(image, patch_size=2):
    # Calculate the standard deviation within each patch
    H, W = image.shape
    std_map = np.zeros((H // patch_size, W // patch_size))
    for i in range(0, H, patch_size):
        for j in range(0, W, patch_size):
            patch = image[i:i + patch_size, j:j + patch_size]
            std_map[i // patch_size, j // patch_size] = np.std(patch)
    return std_map


def patch_std(image, patch_size=2):
    # Calculate the standard deviation within each patch
    H, W = image.shape
    std_map = np.zeros((H // patch_size, W // patch_size))
    for i in range(0, H, patch_size):
        for j in range(0, W, patch_size):
            patch = image[i:i + patch_size, j:j + patch_size]
            std_map[i // patch_size, j // patch_size] = np.std(patch)
    return std_map


def mask_patches_chessboard(images, patch_size=2, mask_ratio=0.5, complexity_based=False):
    if mask_ratio != 0.5:
        B, C, H, W = images.shape
        masked_images = images.clone()

        for b in range(B):
            image = images[b].permute(1, 2, 0).cpu().numpy() * 255
            image = image.astype(np.uint8)
            gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)

            # Calculate complexity for each patch
            complexity_map = patch_std(gray, patch_size)

            # Initialize mask with chessboard pattern for complexity map dimensions
            complexity_height, complexity_width = complexity_map.shape
            mask = np.zeros((complexity_height, complexity_width), dtype=bool)
            mask[::2, ::2] = 1
            mask[1::2, 1::2] = 1

            if complexity_based:
                if mask_ratio > 0.5:
                    additional_masking_ratio = (mask_ratio - 0.5) / 0.5
                    complexity_threshold = np.quantile(complexity_map[~mask], 1 - additional_masking_ratio)
                    additional_mask = complexity_map <= complexity_threshold
                    mask[~mask] = additional_mask[~mask]


                else:
                    unmasking_ratio = (0.5 - mask_ratio) / 0.5
                    complexity_threshold = np.quantile(complexity_map[mask], unmasking_ratio)
                    unmask = complexity_map >= complexity_threshold
                    mask[mask] = ~unmask[mask]

            # Apply mask to the original image based on complexity map
            for i in range(complexity_height):
                for j in range(complexity_width):
                    if mask[i, j]:
                        image[i * patch_size:(i + 1) * patch_size, j * patch_size:(j + 1) * patch_size] = 0

            # Convert image back to PyTorch format
            image = image.astype(np.float32) / 255.0
            masked_images[b] = torch.from_numpy(image).permute(2, 0, 1)

    elif mask_ratio == 0.5:
        B, C, H, W = images.shape
        masked_images = images.clone()

        pattern = np.tile(np.array([[0, 1] * (W // (2 * patch_size)), [1, 0] * (W // (2 * patch_size))]),
                          (H // (2 * patch_size), 1))

        for b in range(B):
            image = images[b].permute(1, 2, 0).cpu().numpy() * 255
            image = image.astype(np.uint8)

            # Apply masking
            mask = np.repeat(np.repeat(pattern, patch_size, axis=0), patch_size, axis=1)
            image[mask == 0] = 0  # Apply chessboard pattern masking

            # Convert back to PyTorch format
            image = image.astype(np.float32) / 255.0
            masked_images[b] = torch.from_numpy(image).permute(2, 0, 1)

    return masked_images


class Encoder(nn.Module):
    def __init__(self, latent_dim):
        super(Encoder, self).__init__()
        self.latent_dim = latent_dim
        self.encoder = nn.Sequential(
            nn.Conv2d(3, 32, kernel_size=3, stride=2, padding=1),  # Output: [batch, 32, 16, 16]
            nn.ReLU(),
            nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1),  # Output: [batch, 64, 8, 8]
            nn.ReLU(),
            nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1),  # Output: [batch, 128, 4, 4]
            nn.ReLU(),
            nn.Flatten(),
            nn.Linear(4*4*128, self.latent_dim),
        )

    def forward(self, x):
        return self.encoder(x)


class Decoder(nn.Module):
    def __init__(self, latent_dim):
        super(Decoder, self).__init__()
        self.latent_dim = latent_dim
        self.decoder = nn.Sequential(
            nn.Linear(self.latent_dim, 4*4*128),
            nn.ReLU(),
            nn.Unflatten(1, (128, 4, 4)),
            nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1),  # Output: [batch, 64, 8, 8]
            nn.ReLU(),
            nn.Conv2d(64, 64, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.ConvTranspose2d(64, 32, kernel_size=4, stride=2, padding=1),  # Output: [batch, 32, 16, 16]
            nn.ReLU(),
            nn.Conv2d(32, 32, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.ConvTranspose2d(32, 3, kernel_size=4, stride=2, padding=1),  # Output: [batch, 3, 32, 32]
            nn.Sigmoid()
        )

    def forward(self, x):
        return self.decoder(x)


class Autoencoder(nn.Module):
    def __init__(self, latent_dim):
        super(Autoencoder, self).__init__()
        self.latent_dim = latent_dim
        self.encoder = Encoder(latent_dim)
        self.decoder = Decoder(latent_dim)

    def AWGN(self, input, SNRdB):

        normalized_tensor = f.normalize(input, dim=1)

        SNR = 10.0 ** (SNRdB / 10.0)

        std = 1 / math.sqrt(self.latent_dim * SNR)
        n = torch.normal(0, std, size=normalized_tensor.size()).to(device)

        return normalized_tensor + n


    def forward(self, x, SNRdB):

        encoded = self.encoder(x)
        channel_output = self.AWGN(encoded, SNRdB)
        decoded = self.decoder(channel_output)

        return decoded

def preprocess_and_save_dataset(dataset, root_dir, patch_size, mask_ratio):
    os.makedirs(root_dir, exist_ok=True)
    for i, (images, _) in tqdm(enumerate(dataset), total=len(dataset)):
        masked_images = mask_patches_chessboard(images.unsqueeze(0), patch_size, mask_ratio, complexity_based=True)

        torch.save({
            'masked_images': masked_images.squeeze(0),
            'original_images': images
        }, os.path.join(root_dir, f'data_{i}.pt'))


class PreprocessedCIFAR10Dataset(Dataset):
    def __init__(self, root_dir, transform=None):
        self.root_dir = root_dir
        self.transform = transform
        self.file_paths = [os.path.join(root_dir, f) for f in os.listdir(root_dir) if f.endswith('.pt')]

    def __len__(self):
        return len(self.file_paths)

    def __getitem__(self, idx):
        file_path = self.file_paths[idx]
        data = torch.load(file_path)
        masked_images = data['masked_images']
        original_images = data['original_images']

        if self.transform:
            masked_images = self.transform(masked_images)
            original_images = self.transform(original_images)

        return masked_images, original_images


def train(latent_dim, patch_size, mask_ratio, trainloader, testloader):

    for snr_i in range(len(params['SNR'])) :

        model = Autoencoder(latent_dim=latent_dim).to(device)
        print("Model size : {}".format(count_parameters(model)))
        criterion = nn.MSELoss()
        optimizer = optim.Adam(model.parameters(), lr=params['LR'])

        min_test_cost = float('inf')
        epochs_no_improve = 0
        n_epochs_stop = 20

        print("+++++ SNR = {} Training Start! +++++\t".format(params['SNR'][snr_i]))

        max_psnr = 0
        previous_best_model_path = None

        for epoch in range(params['EP']):
            # ========================================== Train ==========================================
            train_loss = 0.0

            model.train()
            timetemp = time.time()

            for masked_images, original_images in trainloader:

                original_images = original_images.to(device)
                masked_images = masked_images.to(device)

                optimizer.zero_grad()
                outputs = model(masked_images, SNRdB = params['SNR'][snr_i])

                loss = criterion(original_images, outputs)
                loss.backward()
                optimizer.step()
                train_loss += loss.item()

            train_cost = train_loss / len(trainloader)
            tr_psnr = round(10 * math.log10(1.0 / train_cost), 3)

            # ========================================================================

            test_loss = 0.0

            model.eval()

            with torch.no_grad():
                for masked_images, original_images in testloader:

                    original_images = original_images.to(device)
                    masked_images = masked_images.to(device)
                    outputs = model(masked_images, SNRdB=params['SNR'][snr_i])
                    loss = criterion(original_images, outputs)
                    test_loss += loss.item()

            test_cost = test_loss / len(testloader)
            test_psnr = round(10 * math.log10(1.0 / test_cost), 3)

            # 조기 중지 조건 확인
            if test_cost < min_test_cost:
                min_test_cost = test_cost
                epochs_no_improve = 0
            else:
                epochs_no_improve += 1

            if epochs_no_improve == n_epochs_stop:
                print("Early stopping!")
                break  # 조기 종료

            training_time = time.time() - timetemp

            print(
                "[{:>3}-Epoch({:>5}sec.)]  PSNR(Train / Val) : {:>6.4f} / {:>6.4f}        Loss(Train / Val) : {:>5.5f} / {:>5.5f}".format(
                    epoch + 1, round(training_time, 2), tr_psnr, test_psnr, train_cost,  test_cost))


            if test_psnr > max_psnr:

                save_folder = 'trained_model'

                if not os.path.exists(save_folder):
                    os.makedirs(save_folder)
                previous_psnr = max_psnr
                max_psnr = test_psnr

                # 이전 최고 성능 모델이 있다면 삭제
                if previous_best_model_path is not None:
                    os.remove(previous_best_model_path)
                    print(f"Performance update!! {previous_psnr} to {max_psnr}")

                save_path = os.path.join(save_folder, f"CBS(PS={patch_size}_DIM={latent_dim}_MR={mask_ratio}_SNR={params['SNR'][snr_i]}_PSNR={max_psnr}).pt")
                torch.save(model, save_path)
                print(f"Saved new best model at {save_path}")

                previous_best_model_path = save_path


            '''
            plt.figure(figsize=(12, 6))
            plt.subplot(1, 2, 1)
            plt.imshow(images[0].permute(1, 2, 0))  # Unnormalize
            plt.title('Original Image')
            plt.subplot(1, 2, 2)
            plt.imshow(masked_images[0].permute(1, 2, 0))  # Unnormalize
            plt.title('Masked Image')
            plt.show()
            '''



if __name__ == '__main__':

    for ps_i in range(len(params['PS'])):
        for dim_i in range(len(params['DIM'])):
            for mr_i in range(len(params['MR'])):

                Processed_train_path = "ProcessedTrain(PS=" + str(params['PS'][ps_i]) + "_MR=" + str(params['MR'][mr_i]) + ")"
                Processed_test_path  = "ProcessedTest(PS=" + str(params['PS'][ps_i]) + "_MR=" + str(params['MR'][mr_i]) + ")"

                if not os.path.exists(Processed_train_path):
                    transform = transforms.Compose([transforms.ToTensor()])
                    train_cifar10 = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
                    preprocess_and_save_dataset(train_cifar10, Processed_train_path, patch_size=params['PS'][ps_i], mask_ratio=params['MR'][mr_i])

                if not os.path.exists(Processed_test_path):
                    transform = transforms.Compose([transforms.ToTensor()])
                    test_cifar10 = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
                    preprocess_and_save_dataset(test_cifar10, Processed_test_path, patch_size=params['PS'][ps_i], mask_ratio=params['MR'][mr_i])

                traindataset = PreprocessedCIFAR10Dataset(root_dir=Processed_train_path)
                testdataset  = PreprocessedCIFAR10Dataset(root_dir=Processed_test_path)

                trainloader = DataLoader(traindataset, batch_size=params['BS'], shuffle=True, num_workers=4)
                testloader = DataLoader(testdataset, batch_size=params['BS'], shuffle=True, num_workers=4)

                train(params['DIM'][dim_i], params['PS'][ps_i], params['MR'][mr_i], trainloader, testloader)

'Main' 카테고리의 다른 글

Patch complexity calculated region extending  (0) 2024.05.03
ChessBoard Masking with Colored Random Noise  (0) 2024.05.02
Masking strategy comparison  (0) 2024.04.11
ChessBoard Selection  (0) 2024.04.09
Chessboard_masking  (0) 2024.04.08
Comments