Python源码示例:torch.nn.utils.spectral_norm()
示例1
def __init__(self, in_ch, out_ch, h_ch=None, ksize=3, pad=1,
activation=F.relu, downsample=False):
super(Block, self).__init__()
self.activation = activation
self.downsample = downsample
self.learnable_sc = (in_ch != out_ch) or downsample
if h_ch is None:
h_ch = in_ch
else:
h_ch = out_ch
self.c1 = utils.spectral_norm(nn.Conv2d(in_ch, h_ch, ksize, 1, pad))
self.c2 = utils.spectral_norm(nn.Conv2d(h_ch, out_ch, ksize, 1, pad))
if self.learnable_sc:
self.c_sc = utils.spectral_norm(nn.Conv2d(in_ch, out_ch, 1, 1, 0))
self._initialize()
示例2
def __init__(self, num_features, num_classes=0, activation=F.relu):
super(SNResNetProjectionDiscriminator, self).__init__()
self.num_features = num_features
self.num_classes = num_classes
self.activation = activation
self.block1 = OptimizedBlock(3, num_features)
self.block2 = Block(num_features, num_features * 2,
activation=activation, downsample=True)
self.block3 = Block(num_features * 2, num_features * 4,
activation=activation, downsample=True)
self.block4 = Block(num_features * 4, num_features * 8,
activation=activation, downsample=True)
self.block5 = Block(num_features * 8, num_features * 16,
activation=activation, downsample=True)
self.block6 = Block(num_features * 16, num_features * 16,
activation=activation, downsample=True)
self.l7 = utils.spectral_norm(nn.Linear(num_features * 16, 1))
if num_classes > 0:
self.l_y = utils.spectral_norm(
nn.Embedding(num_classes, num_features * 16))
self._initialize()
示例3
def __init__(self, num_features, num_classes, activation=F.relu,
dim_emb=128):
super(SNResNetConcatDiscriminator, self).__init__()
self.num_features = num_features
self.num_classes = num_classes
self.dim_emb = dim_emb
self.activation = activation
self.block1 = OptimizedBlock(3, num_features)
self.block2 = Block(num_features, num_features * 2,
activation=activation, downsample=True)
self.block3 = Block(num_features * 2, num_features * 4,
activation=activation, downsample=True)
if num_classes > 0:
self.l_y = utils.spectral_norm(nn.Embedding(num_classes, dim_emb))
self.block4 = Block(num_features * 4 + dim_emb, num_features * 8,
activation=activation, downsample=True)
self.block5 = Block(num_features * 8, num_features * 16,
activation=activation, downsample=True)
self.block6 = Block(num_features * 16, num_features * 16,
activation=activation, downsample=False)
self.l7 = utils.spectral_norm(nn.Linear(num_features * 16, 1))
self._initialize()
示例4
def __init__(self, num_features=64, num_classes=0, activation=F.relu):
super(SNResNetProjectionDiscriminator, self).__init__()
self.num_features = num_features
self.num_classes = num_classes
self.activation = activation
self.block1 = OptimizedBlock(3, num_features)
self.block2 = Block(num_features, num_features * 2,
activation=activation, downsample=True)
self.block3 = Block(num_features * 2, num_features * 4,
activation=activation, downsample=True)
self.block4 = Block(num_features * 4, num_features * 8,
activation=activation, downsample=True)
self.block5 = Block(num_features * 8, num_features * 16,
activation=activation, downsample=True)
self.l6 = utils.spectral_norm(nn.Linear(num_features * 16, 1))
if num_classes > 0:
self.l_y = utils.spectral_norm(
nn.Embedding(num_classes, num_features * 16))
self._initialize()
示例5
def __init__(self, depth=4):
super(ConvEnergy, self).__init__()
self.preprocess = nn.Conv2d(3, _next(0), 1)
self.blocks = nn.ModuleList([
spectral_norm(nn.Conv2d(_next(idx), _next(idx + 1), 3, padding=1))
for idx in range(depth)
])
self.project = [
upscale(_next(idx + 1))
for idx in range(depth)
]
self.bn = nn.ModuleList([
nn.ReLU()
for idx in range(depth)
])
self.postprocess = spectral_norm(nn.Conv2d(_next(depth), 128, 1))
self.predict = spectral_norm(nn.Linear(128, 10))
示例6
def __init__(self, in_size, out_size, latent_size,
hidden_size=None, upsample=1,
normalization=tsn.AdaptiveBatchNorm,
activation=func.relu):
super(BigGANBlock, self).__init__()
if hidden_size is None:
hidden_size = in_size // 4
self.in_size = in_size
self.out_size = out_size
self.upsample = upsample
self.bn = nn.ModuleList([
normalization(in_size, latent_size),
normalization(hidden_size, latent_size),
normalization(hidden_size, latent_size),
normalization(hidden_size, latent_size)
])
self.blocks = nn.ModuleList([
spectral_norm(nn.Conv2d(in_size, hidden_size, 1)),
spectral_norm(nn.Conv2d(hidden_size, hidden_size, 3, padding=1)),
spectral_norm(nn.Conv2d(hidden_size, hidden_size, 3, padding=1)),
spectral_norm(nn.Conv2d(hidden_size, out_size, 1))
])
self.activation = activation
示例7
def __init__(self, in_size, out_size,
hidden_size=None, downsample=2,
activation=func.relu):
super(BigGANDiscriminatorBlock, self).__init__()
if hidden_size is None:
hidden_size = out_size
self.in_size = in_size
self.out_size = out_size
self.hidden_size = hidden_size
self.downsample = downsample
self.blocks = nn.ModuleList([
spectral_norm(nn.Conv2d(in_size, hidden_size, 1)),
spectral_norm(nn.Conv2d(hidden_size, hidden_size, 3, padding=1)),
spectral_norm(nn.Conv2d(hidden_size, hidden_size, 3, padding=1)),
spectral_norm(nn.Conv2d(hidden_size, out_size, 1))
])
self.activation = activation
if out_size > in_size:
self.project = spectral_norm(nn.Conv2d(in_size, out_size - in_size, 1))
else:
self.project = lambda x: None
示例8
def __init__(self, fin, fout, fhidden=None, is_bias=True):
super(GatedResnetBlock,self).__init__()
# Attributes
self.is_bias = is_bias
self.learned_shortcut = (fin != fout)
self.fin = fin
self.fout = fout
if fhidden is None:
self.fhidden = min(fin, fout)
else:
self.fhidden = fhidden
norm_layer='instance'
# Submodules
self.conv_0 = spectral_norm(nn.Conv2d(self.fin, self.fhidden, 3, stride=1, padding=1))
self.conv_1 = spectral_norm(nn.Conv2d(self.fhidden, self.fout, 3, stride=1, padding=1, bias=is_bias))
if self.learned_shortcut:
self.conv_s = spectral_norm( nn.Conv2d(self.fin, self.fout, 1, stride=1, padding=0, bias=False))
示例9
def __init__(self, fin, fout, fhidden=None, is_bias=True,use_sn=False):
super().__init__()
# Attributes
self.is_bias = is_bias
self.learned_shortcut = (fin != fout)
self.fin = fin
self.fout = fout
if fhidden is None:
self.fhidden = min(fin, fout)
else:
self.fhidden = fhidden
if use_sn:
# Submodules
self.conv_0 = spectral_norm(nn.Conv2d(self.fin, self.fhidden, 3, stride=1, padding=1))
self.conv_1 = spectral_norm(nn.Conv2d(self.fhidden, self.fout, 3, stride=1, padding=1, bias=is_bias))
if self.learned_shortcut:
self.conv_s = spectral_norm(nn.Conv2d(self.fin, self.fout, 1, stride=1, padding=0, bias=False))
else:
# Submodules
self.conv_0 = nn.Conv2d(self.fin, self.fhidden, 3, stride=1, padding=1)
self.conv_1 = nn.Conv2d(self.fhidden, self.fout, 3, stride=1, padding=1, bias=is_bias)
if self.learned_shortcut:
self.conv_s = nn.Conv2d(self.fin, self.fout, 1, stride=1, padding=0, bias=False)
示例10
def turn_on_spectral_norm(self):
"""
private helper for turning on the spectral normalization
:return: None (has side effect)
"""
from torch.nn.utils import spectral_norm
if self.spectral_norm_mode is not None:
assert self.spectral_norm_mode is False, \
"can't apply spectral_norm. It is already applied"
# apply the same to the remaining relevant blocks
for module in self.layers:
module.conv_1 = spectral_norm(module.conv_1)
module.conv_2 = spectral_norm(module.conv_2)
# toggle the state variable:
self.spectral_norm_mode = True
示例11
def turn_off_spectral_norm(self):
"""
private helper for turning off the spectral normalization
:return: None (has side effect)
"""
from torch.nn.utils import remove_spectral_norm
if self.spectral_norm_mode is not None:
assert self.spectral_norm_mode is True, \
"can't remove spectral_norm. It is not applied"
# remove the applied spectral norm
for module in self.layers:
remove_spectral_norm(module.conv_1)
remove_spectral_norm(module.conv_2)
# toggle the state variable:
self.spectral_norm_mode = False
示例12
def turn_on_spectral_norm(self):
"""
private helper for turning on the spectral normalization
:return: None (has side effect)
"""
from torch.nn.utils import spectral_norm
if self.spectral_norm_mode is not None:
assert self.spectral_norm_mode is False, \
"can't apply spectral_norm. It is already applied"
# apply the same to the remaining relevant blocks
for module in self.layers:
module.conv_1 = spectral_norm(module.conv_1)
module.conv_2 = spectral_norm(module.conv_2)
# toggle the state variable:
self.spectral_norm_mode = True
示例13
def turn_off_spectral_norm(self):
"""
private helper for turning off the spectral normalization
:return: None (has side effect)
"""
from torch.nn.utils import remove_spectral_norm
if self.spectral_norm_mode is not None:
assert self.spectral_norm_mode is True, \
"can't remove spectral_norm. It is not applied"
# remove the applied spectral norm
for module in self.layers:
remove_spectral_norm(module.conv_1)
remove_spectral_norm(module.conv_2)
# toggle the state variable:
self.spectral_norm_mode = False
示例14
def __init__(self,in_channels,out_channels,kernel_size,padding,stride,num_classes=0,activation=nn.LeakyReLU(0.2),conv_groups=1):
"""
:param in_channels:
:param out_channels:
:param kernel_size:
:param padding:
:param stride:
:param num_classes:
:param activation:
:param conv_groups:
"""
super(StandardGeneratorBlock,self).__init__()
self.activation = activation
self.num_classes = num_classes
self.conv = spectral_norm(ConvTranspose2d(in_channels,out_channels,kernel_size=kernel_size,padding=padding,stride=stride,weight_init=Xavier_Uniform(),groups=conv_groups))
if num_classes > 0:
self.bn = ConditionalBatchNorm2d(out_channels,num_classes)
else:
self.bn = BatchNorm2d(out_channels)
示例15
def __init__(self,in_channels,out_channels,kernel_size,padding,stride,activation=nn.LeakyReLU(0.2),use_bn=False,conv_groups=1):
"""
:param in_channels:
:param out_channels:
:param kernel_size:
:param padding:
:param stride:
:param activation:
:param use_bn:
:param conv_groups:
"""
super(StandardDiscriminatorBlock,self).__init__()
self.activation = activation
self.conv = spectral_norm(Conv2d(in_channels,out_channels,kernel_size=kernel_size,padding=padding,stride=stride,weight_init=Xavier_Uniform(),groups=conv_groups))
self.bn = nn.Sequential()
if use_bn:
self.bn = BatchNorm2d(out_channels,weight_init=Normal(1.0,0.02))
示例16
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=None,
bias=True,
spectral_norm=False,
residual_init=True):
super(CustomConv2d, self).__init__()
self.residual_init = residual_init
if padding is None:
padding = int((kernel_size - 1) / 2)
self.conv = nn.Conv2d(in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=bias)
if spectral_norm:
self.conv = utils.spectral_norm(self.conv)
示例17
def __init__(self,
in_channels,
out_channels,
kernel_size,
spectral_norm=False):
super(OptimizedResidualBlock, self).__init__()
self.conv1 = CustomConv2d(in_channels,
out_channels,
kernel_size=kernel_size,
spectral_norm=spectral_norm)
self.conv2 = ConvMeanPool(out_channels,
out_channels,
kernel_size=kernel_size,
spectral_norm=spectral_norm)
self.conv_shortcut = MeanPoolConv(in_channels,
out_channels,
kernel_size=1,
spectral_norm=spectral_norm,
residual_init=False)
self.relu2 = nn.ReLU()
示例18
def __init__(self):
super(Discriminator_VGG_128_SN, self).__init__()
# features
# hxw, c
# 128, 64
self.lrelu = nn.LeakyReLU(0.2, True)
self.conv0 = spectral_norm(nn.Conv2d(3, 64, 3, 1, 1))
self.conv1 = spectral_norm(nn.Conv2d(64, 64, 4, 2, 1))
# 64, 64
self.conv2 = spectral_norm(nn.Conv2d(64, 128, 3, 1, 1))
self.conv3 = spectral_norm(nn.Conv2d(128, 128, 4, 2, 1))
# 32, 128
self.conv4 = spectral_norm(nn.Conv2d(128, 256, 3, 1, 1))
self.conv5 = spectral_norm(nn.Conv2d(256, 256, 4, 2, 1))
# 16, 256
self.conv6 = spectral_norm(nn.Conv2d(256, 512, 3, 1, 1))
self.conv7 = spectral_norm(nn.Conv2d(512, 512, 4, 2, 1))
# 8, 512
self.conv8 = spectral_norm(nn.Conv2d(512, 512, 3, 1, 1))
self.conv9 = spectral_norm(nn.Conv2d(512, 512, 4, 2, 1))
# 4, 512
# classifier
self.linear0 = spectral_norm(nn.Linear(512 * 4 * 4, 100))
self.linear1 = spectral_norm(nn.Linear(100, 1))
示例19
def __init__(self, in_ch, out_ch, ksize=3, pad=1, activation=F.relu):
super(OptimizedBlock, self).__init__()
self.activation = activation
self.c1 = utils.spectral_norm(nn.Conv2d(in_ch, out_ch, ksize, 1, pad))
self.c2 = utils.spectral_norm(nn.Conv2d(out_ch, out_ch, ksize, 1, pad))
self.c_sc = utils.spectral_norm(nn.Conv2d(in_ch, out_ch, 1, 1, 0))
self._initialize()
示例20
def __init__(self, size=64, z=32):
super().__init__()
self.z = z
self.preprocess = spectral_norm(nn.Conv2d(5 + 3, size, 3, padding=1))
self.noise = nn.Parameter(torch.rand(6, 1, size, 1, 1))
self.post_noise = nn.Parameter(torch.rand(3, 1, size, 1, 1))
self.bg = nn.Parameter(torch.randn(1, 64, 8, 8))
self.color = nn.Conv2d(size, 3, 1)
self.encoder = nn.ModuleList([
spectral_norm(nn.Conv2d(size, size, 3, dilation=idx + 1, padding=idx + 1))
for idx in range(6)
])
self.encoder_norm = nn.ModuleList([
nn.InstanceNorm2d(size)
for idx in range(6)
])
self.decoder = nn.ModuleList([
nn.Conv2d(2 * size, size, 3, dilation=idx + 1, padding=idx + 1)
for idx in reversed(range(6))
])
self.decoder_norm = nn.ModuleList([
AdaptiveInstanceNormPP(size, z)
for idx in reversed(range(6))
])
self.post = nn.ModuleList([
nn.Conv2d(size, size, 3, dilation=1, padding=1)
for idx in range(3)
])
self.post_norm = nn.ModuleList([
AdaptiveInstanceNormPP(size, z)
for idx in reversed(range(3))
])
示例21
def __init__(self):
super().__init__()
self.energy = nn.Parameter(torch.randn(1))#MLP(10, 1, 64, depth=3, normalization=spectral_norm, batch_norm=False)
self.predict = MLP(28 * 28, 10, 128, depth=3, normalization=spectral_norm, batch_norm=False)
示例22
def __init__(self):
super(Energy, self).__init__()
self.input = MLP(28 * 28, 128, hidden_size=128, depth=3, batch_norm=False, normalization=spectral_norm)
self.condition = MLP(10, 128, depth=3, batch_norm=False, normalization=spectral_norm)
self.combine = MLP(128, 1, hidden_size=64, depth=3, batch_norm=False, normalization=spectral_norm)
示例23
def __init__(self, depth=4):
super(ConvEnergy, self).__init__()
self.preprocess = nn.Conv2d(1, 32, 1)
self.blocks = nn.ModuleList([
spectral_norm(nn.Conv2d(32, 32, 3, padding=1))
for idx in range(depth)
])
self.bn = nn.ModuleList([
nn.GroupNorm(8, 32)
for idx in range(depth)
])
self.postprocess = nn.Conv2d(32, 128, 1)
self.condition = MLP(10, 128, depth=3, batch_norm=False, normalization=spectral_norm)
self.combine = MLP(128, 1, hidden_size=64, depth=3, batch_norm=False, normalization=spectral_norm)
示例24
def __init__(self, single, size=5, latents=64):
super(Encoder, self).__init__()
self.size = size
self.single = single
self.weight = spectral_norm(nn.Linear(128, 1))
self.combine = MLP(
128, 128, 64,
depth=3, batch_norm=False,
normalization=spectral_norm,
activation=func.leaky_relu
)
self.mean = spectral_norm(nn.Linear(128, latents))
self.logvar = spectral_norm(nn.Linear(128, latents))
示例25
def __init__(self, sample=True):
super(Energy, self).__init__()
self.sample = sample
self.input = SingleEncoder()
self.condition = Encoder(self.input)
self.input_process = spectral_norm(nn.Linear(128, 64))
self.postprocess = spectral_norm(nn.Linear(64, 64, bias=False))
self.combine = MLP(128, 1, hidden_size=64, depth=4, batch_norm=False, normalization=spectral_norm, activation=func.leaky_relu)
示例26
def __init__(self, latents=32):
super(SingleEncoder, self).__init__()
self.block = MLP(28 * 28, latents, hidden_size=64, depth=4, batch_norm=False, normalization=spectral_norm)
示例27
def __init__(self, single, size=5, latents=16):
super(Encoder, self).__init__()
self.size = size
self.single = single
self.weight = spectral_norm(nn.Linear(32, 1))
self.combine = MLP(32, 32, 64, depth=3, batch_norm=False, normalization=spectral_norm)
self.mean = spectral_norm(nn.Linear(32, latents))
self.logvar = spectral_norm(nn.Linear(32, latents))
示例28
def __init__(self, sample=True):
super(Energy, self).__init__()
self.sample = sample
self.input = SingleEncoder()
self.condition = Encoder(self.input)
self.input_process = spectral_norm(nn.Linear(32, 64))
self.postprocess = spectral_norm(nn.Linear(16, 64))
self.combine = MLP(128, 1, hidden_size=64, depth=4, batch_norm=False, normalization=spectral_norm)
示例29
def __init__(self, depth=4):
super(Connected, self).__init__()
self.block = MLP(
28 * 28, 1,
depth=depth,
batch_norm=False,
normalization=spectral_norm
)
示例30
def __init__(self):
super().__init__()
self.process = MLP(28 * 28, 64, 128, depth=3, normalization=spectral_norm, batch_norm=False)
self.predict = MLP(64, 10, 128, depth=3, normalization=spectral_norm)
self.condition = MLP(1, 128, 128, depth=3, normalization=spectral_norm)
self.scale = nn.Linear(128, 64)
self.bias = nn.Linear(128, 64)