Python源码示例:chainer.links.Deconvolution2D()
示例1
def __init__(self):
super(FCN_32s, self).__init__(
conv1_1=L.Convolution2D(3, 64, 3, pad=100),
conv1_2=L.Convolution2D(64, 64, 3),
conv2_1=L.Convolution2D(64, 128, 3),
conv2_2=L.Convolution2D(128, 128, 3),
conv3_1=L.Convolution2D(128, 256, 3),
conv3_2=L.Convolution2D(256, 256, 3),
conv4_1=L.Convolution2D(256, 512, 3),
conv4_2=L.Convolution2D(512, 512, 3),
conv4_3=L.Convolution2D(512, 512, 3),
conv5_1=L.Convolution2D(512, 512, 3),
conv5_2=L.Convolution2D(512, 512, 3),
conv5_3=L.Convolution2D(512, 512, 3),
fc6=L.Convolution2D(512, 4096, 7),
fc7=L.Convolution2D(4096, 4096, 1),
score_fr=L.Convolution2D(4096, 21, 1),
upsample=L.Deconvolution2D(21, 21, 64, 32),
)
self.train = True
示例2
def __init__(self, n_class=21):
self.train=True
super(FCN32s, self).__init__(
conv1_1=L.Convolution2D(3, 64, 3, stride=1, pad=100),
conv1_2=L.Convolution2D(64, 64, 3, stride=1, pad=1),
conv2_1=L.Convolution2D(64, 128, 3, stride=1, pad=1),
conv2_2=L.Convolution2D(128, 128, 3, stride=1, pad=1),
conv3_1=L.Convolution2D(128, 256, 3, stride=1, pad=1),
conv3_2=L.Convolution2D(256, 256, 3, stride=1, pad=1),
conv3_3=L.Convolution2D(256, 256, 3, stride=1, pad=1),
conv4_1=L.Convolution2D(256, 512, 3, stride=1, pad=1),
conv4_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
conv4_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),
conv5_1=L.Convolution2D(512, 512, 3, stride=1, pad=1),
conv5_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
conv5_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),
fc6=L.Convolution2D(512, 4096, 7, stride=1, pad=0),
fc7=L.Convolution2D(4096, 4096, 1, stride=1, pad=0),
score_fr=L.Convolution2D(4096, n_class, 1, stride=1, pad=0,
nobias=True, initialW=np.zeros((n_class, 4096, 1, 1))),
upscore=L.Deconvolution2D(n_class, n_class, 64, stride=32, pad=0,
nobias=True, initialW=f.bilinear_interpolation_kernel(n_class, n_class, ksize=64)),
)
示例3
def __init__(self, n_hidden, bottom_width=4, ch=512, wscale=0.02):
super(Generator, self).__init__()
self.n_hidden = n_hidden
self.ch = ch
self.bottom_width = bottom_width
with self.init_scope():
w = chainer.initializers.Normal(wscale)
self.l0 = L.Linear(self.n_hidden, bottom_width * bottom_width * ch,
initialW=w)
self.dc1 = L.Deconvolution2D(ch, ch // 2, 4, 2, 1, initialW=w)
self.dc2 = L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w)
self.dc3 = L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w)
self.dc4 = L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w)
self.bn0 = L.BatchNormalization(bottom_width * bottom_width * ch)
self.bn1 = L.BatchNormalization(ch // 2)
self.bn2 = L.BatchNormalization(ch // 4)
self.bn3 = L.BatchNormalization(ch // 8)
示例4
def __init__(self, isize, nc, ngf, conv_init=None, bn_init=None):
cngf, tisize = ngf // 2, 4
while tisize != isize:
cngf = cngf * 2
tisize = tisize * 2
layers = []
# input is Z, going into a convolution
layers.append(L.Deconvolution2D(None, cngf, ksize=4, stride=1, pad=0, initialW=conv_init, nobias=True))
layers.append(L.BatchNormalization(cngf, initial_gamma=bn_init))
layers.append(ReLU())
csize, cndf = 4, cngf
while csize < isize // 2:
layers.append(L.Deconvolution2D(None, cngf // 2, ksize=4, stride=2, pad=1, initialW=conv_init, nobias=True))
layers.append(L.BatchNormalization(cngf // 2, initial_gamma=bn_init))
layers.append(ReLU())
cngf = cngf // 2
csize = csize * 2
layers.append(L.Deconvolution2D(None, nc, ksize=4, stride=2, pad=1, initialW=conv_init, nobias=True))
layers.append(Tanh())
super(DCGAN_G, self).__init__(*layers)
示例5
def __init__(self, n_hidden, bottom_width=4, ch=512, wscale=0.02):
super(Generator, self).__init__()
self.n_hidden = n_hidden
self.ch = ch
self.bottom_width = bottom_width
with self.init_scope():
w = chainer.initializers.Normal(wscale)
self.l0 = L.Linear(self.n_hidden, bottom_width * bottom_width * ch,
initialW=w)
self.dc1 = L.Deconvolution2D(ch, ch // 2, 4, 2, 1, initialW=w)
self.dc2 = L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w)
self.dc3 = L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w)
self.dc4 = L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w)
self.bn0 = L.BatchNormalization(bottom_width * bottom_width * ch)
self.bn1 = L.BatchNormalization(ch // 2)
self.bn2 = L.BatchNormalization(ch // 4)
self.bn3 = L.BatchNormalization(ch // 8)
示例6
def __init__(self, n_hidden, bottom_width=4, ch=512, wscale=0.02):
super(Generator, self).__init__()
self.n_hidden = n_hidden
self.ch = ch
self.bottom_width = bottom_width
with self.init_scope():
w = chainer.initializers.Normal(wscale)
self.l0 = L.Linear(self.n_hidden, bottom_width * bottom_width * ch,
initialW=w)
self.dc1 = L.Deconvolution2D(ch, ch // 2, 4, 2, 1, initialW=w)
self.dc2 = L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w)
self.dc3 = L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w)
self.dc4 = L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w)
self.bn0 = L.BatchNormalization(bottom_width * bottom_width * ch)
self.bn1 = L.BatchNormalization(ch // 2)
self.bn2 = L.BatchNormalization(ch // 4)
self.bn3 = L.BatchNormalization(ch // 8)
示例7
def test_caffe_export_model(self):
class Model(chainer.Chain):
def __init__(self):
super(Model, self).__init__()
with self.init_scope():
self.l1 = L.Convolution2D(None, 1, 1, 1, 0, groups=1)
self.b2 = L.BatchNormalization(1, eps=1e-2)
self.l3 = L.Deconvolution2D(None, 1, 1, 1, 0, groups=1)
self.l4 = L.Linear(None, 1)
def forward(self, x):
h = F.relu(self.l1(x))
h = self.b2(h)
h = self.l3(h)
return self.l4(h)
assert_export_import_match(Model(), self.x)
示例8
def create_link(self, initializers):
initialW, initial_bias = initializers
if self.nobias:
link = L.Deconvolution2D(
self.in_channels, self.out_channels, self.ksize,
stride=self.stride, pad=self.pad, nobias=self.nobias,
dilate=self.dilate, groups=self.groups,
initialW=initialW)
else:
link = L.Deconvolution2D(
self.in_channels, self.out_channels, self.ksize,
stride=self.stride, pad=self.pad, nobias=self.nobias,
dilate=self.dilate, groups=self.groups,
initialW=initialW,
initial_bias=initial_bias)
return link
示例9
def test_deconvolution(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
f = self.func.l1
self.assertIsInstance(f, links.Deconvolution2D)
for i in range(3): # 3 == group
in_slice = slice(i * 4, (i + 1) * 4) # 4 == channels
out_slice = slice(i * 2, (i + 1) * 2) # 2 == num / group
w = f.W.data[out_slice, in_slice]
numpy.testing.assert_array_equal(
w.flatten(), range(i * 32, (i + 1) * 32))
numpy.testing.assert_array_equal(
f.b.data, range(12))
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0])
示例10
def __init__(self, n_hidden=128, bottom_width=4, ch=512, wscale=0.02,
z_distribution="uniform", hidden_activation=F.relu, output_activation=F.tanh, use_bn=True):
super(DCGANGenerator, self).__init__()
self.n_hidden = n_hidden
self.ch = ch
self.bottom_width = bottom_width
self.z_distribution = z_distribution
self.hidden_activation = hidden_activation
self.output_activation = output_activation
self.use_bn = use_bn
with self.init_scope():
w = chainer.initializers.Normal(wscale)
self.l0 = L.Linear(self.n_hidden, bottom_width * bottom_width * ch,
initialW=w)
self.dc1 = L.Deconvolution2D(ch, ch // 2, 4, 2, 1, initialW=w)
self.dc2 = L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w)
self.dc3 = L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w)
self.dc4 = L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w)
if self.use_bn:
self.bn0 = L.BatchNormalization(bottom_width * bottom_width * ch)
self.bn1 = L.BatchNormalization(ch // 2)
self.bn2 = L.BatchNormalization(ch // 4)
self.bn3 = L.BatchNormalization(ch // 8)
示例11
def __init__(self, n_hidden=128, bottom_width=4, ch=512, wscale=0.02):
super(Generator, self).__init__()
self.n_hidden = n_hidden
self.ch = ch
self.bottom_width = bottom_width
with self.init_scope():
w = chainer.initializers.Normal(wscale)
self.l0 = L.Linear(self.n_hidden, bottom_width * bottom_width * ch,
initialW=w)
self.dc1 = L.Deconvolution2D(ch, ch // 2, 4, 2, 1, initialW=w)
self.dc2 = L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w)
self.dc3 = L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w)
self.dc4 = L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w)
self.bn0 = L.BatchNormalization(bottom_width * bottom_width * ch)
self.bn1 = L.BatchNormalization(ch // 2)
self.bn2 = L.BatchNormalization(ch // 4)
self.bn3 = L.BatchNormalization(ch // 8)
示例12
def __init__(self, z_slow_dim, z_fast_dim, out_channels, bottom_width,
conv_ch=512, wscale=0.01):
self.ch = conv_ch
self.bottom_width = bottom_width
slow_mid_dim = bottom_width * bottom_width * conv_ch // 2
fast_mid_dim = bottom_width * bottom_width * conv_ch // 2
super(VideoGeneratorInitUniform, self).__init__()
w = chainer.initializers.Uniform(wscale)
with self.init_scope():
self.l0s = L.Linear(z_slow_dim, slow_mid_dim, initialW=w, nobias=True)
self.l0f = L.Linear(z_fast_dim, fast_mid_dim, initialW=w, nobias=True)
self.dc1 = L.Deconvolution2D(conv_ch, conv_ch // 2, 4, 2, 1, initialW=w, nobias=True)
self.dc2 = L.Deconvolution2D(conv_ch // 2, conv_ch // 4, 4, 2, 1, initialW=w, nobias=True)
self.dc3 = L.Deconvolution2D(conv_ch // 4, conv_ch // 8, 4, 2, 1, initialW=w, nobias=True)
self.dc4 = L.Deconvolution2D(conv_ch // 8, conv_ch // 16, 4, 2, 1, initialW=w, nobias=True)
self.dc5 = L.Deconvolution2D(conv_ch // 16, out_channels, 3, 1, 1, initialW=w, nobias=False)
self.bn0s = L.BatchNormalization(slow_mid_dim)
self.bn0f = L.BatchNormalization(fast_mid_dim)
self.bn1 = L.BatchNormalization(conv_ch // 2)
self.bn2 = L.BatchNormalization(conv_ch // 4)
self.bn3 = L.BatchNormalization(conv_ch // 8)
self.bn4 = L.BatchNormalization(conv_ch // 16)
示例13
def __init__(self, z_slow_dim, z_fast_dim, out_channels, bottom_width,
conv_ch=512, wscale=0.02):
self.ch = conv_ch
self.bottom_width = bottom_width
slow_mid_dim = bottom_width * bottom_width * conv_ch // 2
fast_mid_dim = bottom_width * bottom_width * conv_ch // 2
super(VideoGeneratorInitDefault, self).__init__()
w = None
with self.init_scope():
self.l0s = L.Linear(z_slow_dim, slow_mid_dim, initialW=w, nobias=True)
self.l0f = L.Linear(z_fast_dim, fast_mid_dim, initialW=w, nobias=True)
self.dc1 = L.Deconvolution2D(conv_ch, conv_ch // 2, 4, 2, 1, initialW=w, nobias=True)
self.dc2 = L.Deconvolution2D(conv_ch // 2, conv_ch // 4, 4, 2, 1, initialW=w, nobias=True)
self.dc3 = L.Deconvolution2D(conv_ch // 4, conv_ch // 8, 4, 2, 1, initialW=w, nobias=True)
self.dc4 = L.Deconvolution2D(conv_ch // 8, conv_ch // 16, 4, 2, 1, initialW=w, nobias=True)
self.dc5 = L.Deconvolution2D(conv_ch // 16, out_channels, 3, 1, 1, initialW=w, nobias=False)
self.bn0s = L.BatchNormalization(slow_mid_dim)
self.bn0f = L.BatchNormalization(fast_mid_dim)
self.bn1 = L.BatchNormalization(conv_ch // 2)
self.bn2 = L.BatchNormalization(conv_ch // 4)
self.bn3 = L.BatchNormalization(conv_ch // 8)
self.bn4 = L.BatchNormalization(conv_ch // 16)
示例14
def __init__(self, ch0, ch1, bn=True, sample='down', activation=F.relu, dropout=False) -> None:
super().__init__()
self.bn = bn
self.activation = activation
self.dropout = dropout
w = chainer.initializers.Normal(0.02)
with self.init_scope():
if sample == 'down':
self.c = L.Convolution2D(ch0, ch1, 4, 2, 1, initialW=w)
elif sample == 'up':
self.c = L.Deconvolution2D(ch0, ch1, 4, 2, 1, initialW=w)
else:
self.c = L.Convolution2D(ch0, ch1, 1, 1, 0, initialW=w)
if bn:
self.batchnorm = L.BatchNormalization(ch1)
示例15
def __init__(self):
super(FastStyleNet, self).__init__(
c1=L.Convolution2D(3, 32, 9, stride=1, pad=4),
c2=L.Convolution2D(32, 64, 4, stride=2, pad=1),
c3=L.Convolution2D(64, 128, 4,stride=2, pad=1),
r1=ResidualBlock(128, 128),
r2=ResidualBlock(128, 128),
r3=ResidualBlock(128, 128),
r4=ResidualBlock(128, 128),
r5=ResidualBlock(128, 128),
d1=L.Deconvolution2D(128, 64, 4, stride=2, pad=1),
d2=L.Deconvolution2D(64, 32, 4, stride=2, pad=1),
d3=L.Deconvolution2D(32, 3, 9, stride=1, pad=4),
b1=L.BatchNormalization(32),
b2=L.BatchNormalization(64),
b3=L.BatchNormalization(128),
b4=L.BatchNormalization(64),
b5=L.BatchNormalization(32),
)
示例16
def __init__(self, ch0, ch1, bn=True, sample='down', activation=F.relu, dropout=False):
self.bn = bn
self.activation = activation
self.dropout = dropout
layers = {}
w = chainer.initializers.Normal(0.02)
if sample=='down':
layers['c'] = L.Convolution2D(ch0, ch1, 4, 2, 1, initialW=w)
elif sample=='up':
layers['c'] = L.Deconvolution2D(ch0, ch1, 4, 2, 1, initialW=w)
elif sample=='up-nn':
layers['c'] = NNConvolution2D(ch0, ch1, 2, 3, 1, 1, initialW=w)
elif sample=='none':
layers['c'] = L.Convolution2D(ch0, ch1, 3, 1, 1, initialW=w)
elif sample=='none-5':
layers['c'] = L.Convolution2D(ch0, ch1, 5, 1, 2, initialW=w)
else:
assert False, 'unknown sample {}'.format(sample)
if bn:
layers['batchnorm'] = L.BatchNormalization(ch1)
super(CBR, self).__init__(**layers)
示例17
def __init__(self, in_channels, out_channels, mode='none', activation=F.leaky_relu, bn=False, dr=None):
super(ConvBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.bn = bn
self.dr = dr
with self.init_scope():
if mode == 'none':
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer, nobias=bn)
elif mode == 'none-7':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(7,7), stride=1, pad=(3,3), initialW=initializer, nobias=bn)
elif mode == 'down':
self.c = L.Convolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'up':
self.c = L.Deconvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'full-down':
self.c = L.Convolution2D(in_channels, out_channels, ksize=4, stride=1, pad=0, initialW=initializer, nobias=bn)
elif mode == 'frq':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
elif mode == 'frq-down':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_downsample(x))
elif mode == 'frq-up':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_upsample(x))
elif mode == 'pad':
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=2, initialW=initializer, nobias=bn)
elif mode == 'trim':
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=0, initialW=initializer, nobias=bn)
else:
raise Exception('mode is missing')
if bn:
self.b = L.BatchNormalization(out_channels)
示例18
def __init__(self, in_ch, out_ch, ksize, stride, pad, nobias=False, gain=np.sqrt(2), lrmul=1):
w = chainer.initializers.Normal(1.0/lrmul) # equalized learning rate
self.inv_c = gain * np.sqrt(1.0 / (in_ch))
self.inv_c = self.inv_c * lrmul
super(EqualizedDeconv2d, self).__init__()
with self.init_scope():
self.c = L.Deconvolution2D(in_ch, out_ch, ksize, stride, pad, initialW=w, nobias=nobias)
示例19
def __init__(self, in_ch_dec, in_ch_enc, out_ch):
super(UpBlock, self).__init__()
with self.init_scope():
self.d0 = L.Deconvolution2D(in_ch_dec, out_ch, 2, 2)
self.b0 = L.BatchNormalization(out_ch)
self.c1 = L.Convolution2D(out_ch + in_ch_enc, out_ch, 3, 1, 1)
self.b1 = L.BatchNormalization(out_ch)
self.c2 = L.Convolution2D(out_ch, out_ch, 3, 1, 1)
self.b2 = L.BatchNormalization(out_ch)
示例20
def __init__(self, n_class=21):
self.train=True
super(FCN16s, self).__init__(
conv1_1=L.Convolution2D(3, 64, 3, stride=1, pad=100),
conv1_2=L.Convolution2D(64, 64, 3, stride=1, pad=1),
conv2_1=L.Convolution2D(64, 128, 3, stride=1, pad=1),
conv2_2=L.Convolution2D(128, 128, 3, stride=1, pad=1),
conv3_1=L.Convolution2D(128, 256, 3, stride=1, pad=1),
conv3_2=L.Convolution2D(256, 256, 3, stride=1, pad=1),
conv3_3=L.Convolution2D(256, 256, 3, stride=1, pad=1),
conv4_1=L.Convolution2D(256, 512, 3, stride=1, pad=1),
conv4_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
conv4_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),
conv5_1=L.Convolution2D(512, 512, 3, stride=1, pad=1),
conv5_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
conv5_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),
fc6=L.Convolution2D(512, 4096, 7, stride=1, pad=0),
fc7=L.Convolution2D(4096, 4096, 1, stride=1, pad=0),
score_fr=L.Convolution2D(4096, n_class, 1, stride=1, pad=0,
nobias=True, initialW=np.zeros((n_class, 4096, 1, 1))),
score_pool4=L.Convolution2D(512, n_class, 1, stride=1, pad=0,
nobias=True, initialW=np.zeros((n_class, 512, 1, 1))),
upscore2=L.Deconvolution2D(n_class, n_class, 4, stride=2,
nobias=True, initialW=f.bilinear_interpolation_kernel(n_class, n_class, ksize=4), use_cudnn=False),
upscore16=L.Deconvolution2D(n_class, n_class, 32, stride=16,
nobias=True, initialW=f.bilinear_interpolation_kernel(n_class, n_class, ksize=32), use_cudnn=False),
)
示例21
def __init__(self, n_class=21):
self.train = True
super(FCN8s, self).__init__(
conv1_1=L.Convolution2D(3, 64, 3, stride=1, pad=100),
conv1_2=L.Convolution2D(64, 64, 3, stride=1, pad=1),
conv2_1=L.Convolution2D(64, 128, 3, stride=1, pad=1),
conv2_2=L.Convolution2D(128, 128, 3, stride=1, pad=1),
conv3_1=L.Convolution2D(128, 256, 3, stride=1, pad=1),
conv3_2=L.Convolution2D(256, 256, 3, stride=1, pad=1),
conv3_3=L.Convolution2D(256, 256, 3, stride=1, pad=1),
conv4_1=L.Convolution2D(256, 512, 3, stride=1, pad=1),
conv4_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
conv4_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),
conv5_1=L.Convolution2D(512, 512, 3, stride=1, pad=1),
conv5_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
conv5_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),
fc6=L.Convolution2D(512, 4096, 7, stride=1, pad=0),
fc7=L.Convolution2D(4096, 4096, 1, stride=1, pad=0),
score_fr=L.Convolution2D(4096, n_class, 1, stride=1, pad=0,
nobias=True, initialW=np.zeros((n_class, 4096, 1, 1))),
score_pool3=L.Convolution2D(256, n_class, 1, stride=1, pad=0,
nobias=True, initialW=np.zeros((n_class, 256, 1, 1))),
score_pool4=L.Convolution2D(512, n_class, 1, stride=1, pad=0,
nobias=True, initialW=np.zeros((n_class, 512, 1, 1))),
upscore2=L.Deconvolution2D(n_class, n_class, 4, stride=2,
nobias=True, initialW=f.bilinear_interpolation_kernel(n_class, n_class, ksize=4), use_cudnn=False),
upscore_pool4=L.Deconvolution2D(n_class, n_class, 4, stride=2,
nobias=True, initialW=f.bilinear_interpolation_kernel(n_class, n_class, ksize=4), use_cudnn=False),
upscore8=L.Deconvolution2D(n_class, n_class, 16, stride=8,
nobias=True, initialW=f.bilinear_interpolation_kernel(n_class, n_class, ksize=16), use_cudnn=False),
)
示例22
def __init__(self, n_class, scales):
super(MaskHead, self).__init__()
initialW = HeNormal(1, fan_option='fan_out')
with self.init_scope():
self.conv1 = Conv2DActiv(256, 3, pad=1, initialW=initialW)
self.conv2 = Conv2DActiv(256, 3, pad=1, initialW=initialW)
self.conv3 = Conv2DActiv(256, 3, pad=1, initialW=initialW)
self.conv4 = Conv2DActiv(256, 3, pad=1, initialW=initialW)
self.conv5 = L.Deconvolution2D(
256, 2, pad=0, stride=2, initialW=initialW)
self.seg = L.Convolution2D(n_class, 1, pad=0, initialW=initialW)
self._n_class = n_class
self._scales = scales
示例23
def __init__(self):
super(Generator, self).__init__(
l0z = L.Linear(nz, 6*6*512, wscale=0.02*math.sqrt(nz)),
dc1 = L.Deconvolution2D(512, 256, 4, stride=2, pad=1, wscale=0.02*math.sqrt(4*4*512)),
dc2 = L.Deconvolution2D(256, 128, 4, stride=2, pad=1, wscale=0.02*math.sqrt(4*4*256)),
dc3 = L.Deconvolution2D(128, 64, 4, stride=2, pad=1, wscale=0.02*math.sqrt(4*4*128)),
dc4 = L.Deconvolution2D(64, 3, 4, stride=2, pad=1, wscale=0.02*math.sqrt(4*4*64)),
bn0l = L.BatchNormalization(6*6*512),
bn0 = L.BatchNormalization(512),
bn1 = L.BatchNormalization(256),
bn2 = L.BatchNormalization(128),
bn3 = L.BatchNormalization(64),
)
示例24
def __init__(self):
super(Generator, self).__init__(
l0z = L.Linear(nz, 6*6*512, wscale=0.02*math.sqrt(nz)),
dc1 = L.Deconvolution2D(512, 256, 4, stride=2, pad=1, wscale=0.02*math.sqrt(4*4*512)),
dc2 = L.Deconvolution2D(256, 128, 4, stride=2, pad=1, wscale=0.02*math.sqrt(4*4*256)),
dc3 = L.Deconvolution2D(128, 64, 4, stride=2, pad=1, wscale=0.02*math.sqrt(4*4*128)),
dc4 = L.Deconvolution2D(64, 3, 4, stride=2, pad=1, wscale=0.02*math.sqrt(4*4*64)),
bn0l = L.BatchNormalization(6*6*512),
bn0 = L.BatchNormalization(512),
bn1 = L.BatchNormalization(256),
bn2 = L.BatchNormalization(128),
bn3 = L.BatchNormalization(64),
)
示例25
def __init__(self,
dim2,
classes,
out_size,
bn_eps):
super(SBDecoder, self).__init__()
with self.init_scope():
self.decode1 = SBDecodeBlock(
channels=classes,
out_size=((out_size[0] // 8, out_size[1] // 8) if out_size else None),
bn_eps=bn_eps)
self.decode2 = SBDecodeBlock(
channels=classes,
out_size=((out_size[0] // 4, out_size[1] // 4) if out_size else None),
bn_eps=bn_eps)
self.conv3c = conv1x1_block(
in_channels=dim2,
out_channels=classes,
bn_eps=bn_eps,
activation=(lambda: L.PReLU(shape=(classes,))))
self.output = L.Deconvolution2D(
in_channels=classes,
out_channels=classes,
ksize=2,
stride=2,
pad=0,
# output_pad=0,
nobias=True)
self.up = InterpolationBlock(scale_factor=2)
示例26
def __init__(self,
in_channels,
out_channels,
ksize,
stride,
pad,
dilate=1,
groups=1,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: F.relu),
**kwargs):
super(DeconvBlock, self).__init__(**kwargs)
self.activate = (activation is not None)
self.use_bn = use_bn
with self.init_scope():
self.conv = L.Deconvolution2D(
in_channels=in_channels,
out_channels=out_channels,
ksize=ksize,
stride=stride,
pad=pad,
nobias=(not use_bias),
dilate=dilate,
groups=groups)
if self.use_bn:
self.bn = L.BatchNormalization(
size=out_channels,
eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation)
示例27
def __init__(self, ch0, ch1, use_bn=True,
sample='down', activation=F.relu, dropout=False):
self.use_bn = use_bn
self.activation = activation
self.dropout = dropout
w = chainer.initializers.Normal(0.02)
super(ConvBNR, self).__init__()
with self.init_scope():
if sample == 'down':
self.c = L.Convolution2D(ch0, ch1, 4, 2, 1, initialW=w)
else:
self.c = L.Deconvolution2D(ch0, ch1, 4, 2, 1, initialW=w)
if use_bn:
self.bn = L.BatchNormalization(ch1)
示例28
def added(self, link):
# Define axis and register ``u`` if the weight is initialized.
if not hasattr(link, self.weight_name):
raise ValueError(
'Weight \'{}\' does not exist!'.format(self.weight_name))
if isinstance(link, (L.Deconvolution2D, L.DeconvolutionND)):
self.axis = 1
if getattr(link, self.weight_name).array is not None:
self._prepare_parameters(link)
示例29
def __init__(self, n_class=21):
self.n_class = n_class
kwargs = {
'initialW': chainer.initializers.Zero(),
'initial_bias': chainer.initializers.Zero(),
}
super(FCN16s, self).__init__()
with self.init_scope():
self.conv1_1 = L.Convolution2D(3, 64, 3, 1, 100, **kwargs)
self.conv1_2 = L.Convolution2D(64, 64, 3, 1, 1, **kwargs)
self.conv2_1 = L.Convolution2D(64, 128, 3, 1, 1, **kwargs)
self.conv2_2 = L.Convolution2D(128, 128, 3, 1, 1, **kwargs)
self.conv3_1 = L.Convolution2D(128, 256, 3, 1, 1, **kwargs)
self.conv3_2 = L.Convolution2D(256, 256, 3, 1, 1, **kwargs)
self.conv3_3 = L.Convolution2D(256, 256, 3, 1, 1, **kwargs)
self.conv4_1 = L.Convolution2D(256, 512, 3, 1, 1, **kwargs)
self.conv4_2 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.conv4_3 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.conv5_1 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.conv5_2 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.conv5_3 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.fc6 = L.Convolution2D(512, 4096, 7, 1, 0, **kwargs)
self.fc7 = L.Convolution2D(4096, 4096, 1, 1, 0, **kwargs)
self.score_fr = L.Convolution2D(4096, n_class, 1, 1, 0, **kwargs)
self.score_pool4 = L.Convolution2D(512, n_class, 1, 1, 0, **kwargs)
self.upscore2 = L.Deconvolution2D(
n_class, n_class, 4, 2, nobias=True,
initialW=initializers.UpsamplingDeconvWeight())
self.upscore16 = L.Deconvolution2D(
n_class, n_class, 32, 16, nobias=True,
initialW=initializers.UpsamplingDeconvWeight())
示例30
def __init__(self, n_class=21):
self.n_class = n_class
kwargs = {
'initialW': chainer.initializers.Zero(),
'initial_bias': chainer.initializers.Zero(),
}
super(FCN32s, self).__init__()
with self.init_scope():
self.conv1_1 = L.Convolution2D(3, 64, 3, 1, 100, **kwargs)
self.conv1_2 = L.Convolution2D(64, 64, 3, 1, 1, **kwargs)
self.conv2_1 = L.Convolution2D(64, 128, 3, 1, 1, **kwargs)
self.conv2_2 = L.Convolution2D(128, 128, 3, 1, 1, **kwargs)
self.conv3_1 = L.Convolution2D(128, 256, 3, 1, 1, **kwargs)
self.conv3_2 = L.Convolution2D(256, 256, 3, 1, 1, **kwargs)
self.conv3_3 = L.Convolution2D(256, 256, 3, 1, 1, **kwargs)
self.conv4_1 = L.Convolution2D(256, 512, 3, 1, 1, **kwargs)
self.conv4_2 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.conv4_3 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.conv5_1 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.conv5_2 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.conv5_3 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.fc6 = L.Convolution2D(512, 4096, 7, 1, 0, **kwargs)
self.fc7 = L.Convolution2D(4096, 4096, 1, 1, 0, **kwargs)
self.score_fr = L.Convolution2D(4096, n_class, 1, 1, 0, **kwargs)
self.upscore = L.Deconvolution2D(
n_class, n_class, 64, 32, 0, nobias=True,
initialW=initializers.UpsamplingDeconvWeight())