목록2024/08 (20)
UOMOP
% 데이터 입력SNR = [40, 30, 20, 10, 0];deep = [26.248, 25.997, 24.505, 21.318, 17.638];Ori = [25.945, 25.63, 24.138, 21.506, 17.799];MR_33 = [25.107, 24.855, 23.646, 21.461, 18.165];MR_65 = [24.548, 24.33, 23.257, 21.058, 17.79];MR_75 = [23.362, 23.221, 22.419, 20.533, 17.56];% 그래프 그리기figure;hold on;plot(SNR, Ori, '-s', 'DisplayName', 'Lv.1 transmission (Original image)', 'LineWidth', 2);plot(SNR, MR..
% 데이터 입력SNR = [40, 30, 20, 10, 0];deep = [30.021, 29.227, 26.817, 22.801, 18.781];Ori = [28.866, 28.233, 25.812, 23.185, 18.894];MR_33 = [27.473, 27.001, 25.179, 22.885, 19.197];MR_65 = [25.509, 25.141, 23.906, 21.745, 17.84];MR_75 = [23.894, 23.696, 22.952, 21.731, 18.413];% 그래프 그리기figure;hold on;plot(SNR, Ori, '-s', 'DisplayName', 'Lv.1 transmission (Original image)', 'LineWidth', 2);plot(SNR,..
import torchvision.transforms as transformsimport mathimport torchimport torchvisionimport torch.nn as nnimport torch.optim as optimfrom torch.utils.data import DataLoader, Datasetimport timeimport osfrom tqdm import tqdmimport numpy as npimport mathimport torchimport torchvisionfrom fractions import Fractionimport numpy as npimport torch.nn as nnimport torch.optim as optimimport torch.nn.functi..
import torchimport numpy as npimport matplotlib.pyplot as pltimport torchvision.transforms as transformsfrom torchvision.datasets import CIFAR10from torch.utils.data import DataLoaderimport torchimport numpy as npimport matplotlib.pyplot as pltfrom torchvision import datasets, transformsdef patch_importance(image, patch_size=2, type='variance', how_many=2): if isinstance(image, torch.Tensor):..
DIM=256 : [17.638, 21.318, 24.505, 25.997, 26.248]DIM=512 : [18.781, 22.801, 26.817, 29.227, 30.021]DIM=1024 : [19.958, 23.363, 28.337, 30.97, 33.79]
import torchimport torch.nn as nnclass Encoder(nn.Module): def __init__(self, latent_dim): super(Encoder, self).__init__() self.latent_dim = latent_dim self.in1 = nn.Conv2d(3, 32, kernel_size=5, stride=1, padding=0) self.in2 = nn.Conv2d(3, 32, kernel_size=5, stride=1, padding=0) self.in3 = nn.Conv2d(3, 32, kernel_size=5, stride=1, padding=0) self.in4 ..