목록DE (34)
UOMOP
class Decoder(nn.Module): def __init__(self, latent_dim): super(Decoder, self).__init__() self.latent_dim = latent_dim self.linear = nn.Linear(self.latent_dim, 2048) self.prelu = nn.PReLU() self.unflatten = nn.Unflatten(1, (32, 8, 8)) self.essen = nn.ConvTranspose2d(32, 16, kernel_size=5, stride=2, padding=1, output_padding=1) self.in6 = nn.Co..
import torchimport torch.nn as nnclass Encoder(nn.Module): def __init__(self, latent_dim): super(Encoder, self).__init__() self.latent_dim = latent_dim self.in1 = nn.Conv2d(3, 16, kernel_size=5, stride=1, padding=1) self.in2 = nn.Conv2d(3, 16, kernel_size=5, stride=1, padding=1) self.in3 = nn.Conv2d(3, 16, kernel_size=5, stride=1, padding=1) self.in4 ..
import mathimport torchimport torchvisionfrom fractions import Fractionimport numpy as npimport torch.nn as nnimport torch.optim as optimimport torch.nn.functional as fimport matplotlib.pyplot as pltimport torchvision.transforms as trfrom torchvision import datasetsfrom torch.utils.data import DataLoader, Datasetimport timeimport osfrom skimage.metrics import structural_similarity as ssimfrom pa..
import numpy as npimport torchimport torchvisionimport torchvision.transforms as transformsdef patch_importance(image, patch_size=2, type='variance', how_many=2, noise_scale=0): if isinstance(image, torch.Tensor): image = image.numpy() H, W = image.shape[-2:] extended_patch_size = patch_size + 2 * how_many value_map = np.zeros((H // patch_size, W // patch_size)) for i in ra..
import torchvision.transforms as transformsimport mathimport torchimport torchvisionimport torch.nn as nnimport torch.optim as optimfrom torch.utils.data import DataLoader, Datasetimport time#from params import *import osfrom tqdm import tqdmimport numpy as npdef patch_importance(image, patch_size=2, type='variance', how_many=2): if isinstance(image, torch.Tensor): image = image.numpy(..
import torchimport torchvisionimport torchvision.transforms as trimport numpy as npimport matplotlib.pyplot as pltfrom torchvision import datasetsfrom torch.utils.data import DataLoaderfrom skimage.feature import cannyfrom skimage.measure import shannon_entropyimport cv2# Transform 설정: 이미지 정규화 및 텐서 변환transf = tr.Compose([tr.ToTensor()])# CIFAR-10 데이터셋 불러오기trainset = torchvision.datasets.CIFAR10(..