Python源码示例:chainer.links.Convolution2D()

示例1
def __init__(self, ch):
        super(Link_Convolution2D, self).__init__(L.Convolution2D(None, None))

        # code.InteractiveConsole({'ch': ch}).interact()

        self.ksize = size2d(ch.ksize)
        self.stride = size2d(ch.stride)
        ps = size2d(ch.pad)
        self.pads = ps + ps

        if not (ch.b is None):
            # nobias = True の場合
            self.M = ch.b.shape[0]
            self.b = helper.make_tensor_value_info(
                '/b', TensorProto.FLOAT, [self.M])
        else:
            self.M = "TODO"
            self.b = None

        self.W = helper.make_tensor_value_info(
            '/W', TensorProto.FLOAT,
            [self.M, 'channel_size'] + list(self.ksize)) 
示例2
def __init__(self, in_channels, out_channels, ksize=3, pad=1, activation=F.leaky_relu, mode='none', bn=False, dr=None):
        super(ResBlock, self).__init__()
        initializer = chainer.initializers.GlorotUniform()
        initializer_sc = chainer.initializers.GlorotUniform()
        self.activation = activation
        self.mode = _downsample if mode == 'down' else _upsample if mode == 'up' else None
        self.learnable_sc = in_channels != out_channels
        self.dr = dr
        self.bn = bn
        with self.init_scope():
            self.c1 = L.Convolution2D(in_channels,  out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn)
            self.c2 = L.Convolution2D(out_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn)
            if bn:
                self.b1 = L.BatchNormalization(out_channels)
                self.b2 = L.BatchNormalization(out_channels)
            if self.learnable_sc:
                self.c_sc = L.Convolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc) 
示例3
def __init__(self, in_channels, out_channels, ksize=3, pad=1, activation=F.relu, mode='none', bn=True, dr=None):
        super(ResBlock, self).__init__()
        initializer = chainer.initializers.GlorotUniform()
        initializer_sc = chainer.initializers.GlorotUniform()
        self.activation = activation
        self.mode = _downsample if mode == 'down' else _upsample if mode == 'up' else None
        self.learnable_sc = in_channels != out_channels
        self.dr = dr
        self.bn = bn
        with self.init_scope():
            self.c1 = L.Convolution1D(in_channels,  out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn)
            self.c2 = L.Convolution1D(out_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn)
            if bn:
                self.b1 = L.BatchNormalization(out_channels)
                self.b2 = L.BatchNormalization(out_channels)
            if self.learnable_sc:
                self.c_sc = L.Convolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc) 
示例4
def __init__(self, n_actions, max_episode_steps):
        super().__init__()
        with self.init_scope():
            self.embed = L.EmbedID(max_episode_steps + 1, 3136)
            self.image2hidden = chainerrl.links.Sequence(
                L.Convolution2D(None, 32, 8, stride=4),
                F.relu,
                L.Convolution2D(None, 64, 4, stride=2),
                F.relu,
                L.Convolution2D(None, 64, 3, stride=1),
                functools.partial(F.reshape, shape=(-1, 3136)),
            )
            self.hidden2out = chainerrl.links.Sequence(
                L.Linear(None, 512),
                F.relu,
                L.Linear(None, n_actions),
                DiscreteActionValue,
            ) 
示例5
def __init__(self, n_actions, n_input_channels=4,
                 activation=F.relu, bias=0.1):
        self.n_actions = n_actions
        self.n_input_channels = n_input_channels
        self.activation = activation

        super().__init__()
        with self.init_scope():
            self.conv_layers = chainer.ChainList(
                L.Convolution2D(n_input_channels, 32, 8, stride=4,
                                initial_bias=bias),
                L.Convolution2D(32, 64, 4, stride=2, initial_bias=bias),
                L.Convolution2D(64, 64, 3, stride=1, initial_bias=bias))

            self.a_stream = MLP(3136, n_actions, [512])
            self.v_stream = MLP(3136, 1, [512]) 
示例6
def init_like_torch(link):
    # Mimic torch's default parameter initialization
    # TODO(muupan): Use chainer's initializers when it is merged
    for l in link.links():
        if isinstance(l, L.Linear):
            out_channels, in_channels = l.W.shape
            stdv = 1 / np.sqrt(in_channels)
            l.W.array[:] = np.random.uniform(-stdv, stdv, size=l.W.shape)
            if l.b is not None:
                l.b.array[:] = np.random.uniform(-stdv, stdv, size=l.b.shape)
        elif isinstance(l, L.Convolution2D):
            out_channels, in_channels, kh, kw = l.W.shape
            stdv = 1 / np.sqrt(in_channels * kh * kw)
            l.W.array[:] = np.random.uniform(-stdv, stdv, size=l.W.shape)
            if l.b is not None:
                l.b.array[:] = np.random.uniform(-stdv, stdv, size=l.b.shape) 
示例7
def __init__(self, n_layers, n_vocab, embed_size, hidden_size, dropout=0.1):
        hidden_size /= 3
        super(CNNEncoder, self).__init__(
            embed=L.EmbedID(n_vocab, embed_size, ignore_label=-1,
                            initialW=embed_init),
            cnn_w3=L.Convolution2D(
                embed_size, hidden_size, ksize=(3, 1), stride=1, pad=(2, 0),
                nobias=True),
            cnn_w4=L.Convolution2D(
                embed_size, hidden_size, ksize=(4, 1), stride=1, pad=(3, 0),
                nobias=True),
            cnn_w5=L.Convolution2D(
                embed_size, hidden_size, ksize=(5, 1), stride=1, pad=(4, 0),
                nobias=True),
            mlp=MLP(n_layers, hidden_size * 3, dropout)
        )
        self.output_size = hidden_size * 3
        self.dropout = dropout 
示例8
def __init__(self):
        super(FCN_32s, self).__init__(
            conv1_1=L.Convolution2D(3, 64, 3, pad=100),
            conv1_2=L.Convolution2D(64, 64, 3),
            conv2_1=L.Convolution2D(64, 128, 3),
            conv2_2=L.Convolution2D(128, 128, 3),
            conv3_1=L.Convolution2D(128, 256, 3),
            conv3_2=L.Convolution2D(256, 256, 3),
            conv4_1=L.Convolution2D(256, 512, 3),
            conv4_2=L.Convolution2D(512, 512, 3),
            conv4_3=L.Convolution2D(512, 512, 3),
            conv5_1=L.Convolution2D(512, 512, 3),
            conv5_2=L.Convolution2D(512, 512, 3),
            conv5_3=L.Convolution2D(512, 512, 3),
            fc6=L.Convolution2D(512, 4096, 7),
            fc7=L.Convolution2D(4096, 4096, 1),
            score_fr=L.Convolution2D(4096, 21, 1),
            upsample=L.Deconvolution2D(21, 21, 64, 32),
        )
        self.train = True 
示例9
def __init__(self):
        super(VGG_multi, self).__init__(
            conv1_1=L.Convolution2D(3, 64, 3, stride=1, pad=1),
            conv1_2=L.Convolution2D(64, 64, 3, stride=1, pad=1),

            conv2_1=L.Convolution2D(64, 128, 3, stride=1, pad=1),
            conv2_2=L.Convolution2D(128, 128, 3, stride=1, pad=1),

            conv3_1=L.Convolution2D(128, 256, 3, stride=1, pad=1),
            conv3_2=L.Convolution2D(256, 256, 3, stride=1, pad=1),
            conv3_3=L.Convolution2D(256, 256, 3, stride=1, pad=1),

            conv4_1=L.Convolution2D(256, 512, 3, stride=1, pad=1),
            conv4_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv4_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),

            conv5_1=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv5_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv5_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),

            fc6=L.Linear(2048, 4096),
            fc7=L.Linear(4096, 4096),
            fc8=L.Linear(4096, 768),
        )
        self.train = True 
示例10
def __init__(self):
        super(VGG_single, self).__init__(
            conv1_1=L.Convolution2D(3, 64, 3, stride=1, pad=1),
            conv1_2=L.Convolution2D(64, 64, 3, stride=1, pad=1),

            conv2_1=L.Convolution2D(64, 128, 3, stride=1, pad=1),
            conv2_2=L.Convolution2D(128, 128, 3, stride=1, pad=1),

            conv3_1=L.Convolution2D(128, 256, 3, stride=1, pad=1),
            conv3_2=L.Convolution2D(256, 256, 3, stride=1, pad=1),
            conv3_3=L.Convolution2D(256, 256, 3, stride=1, pad=1),

            conv4_1=L.Convolution2D(256, 512, 3, stride=1, pad=1),
            conv4_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv4_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),

            conv5_1=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv5_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv5_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),

            fc6=L.Linear(2048, 4096),
            fc7=L.Linear(4096, 4096),
            fc8=L.Linear(4096, 256),
        )
        self.train = True 
示例11
def init_like_torch(link):
    # Mimic torch's default parameter initialization
    # TODO(muupan): Use chainer's initializers when it is merged
    for l in link.links():
        if isinstance(l, L.Linear):
            out_channels, in_channels = l.W.data.shape
            stdv = 1 / np.sqrt(in_channels)
            l.W.data[:] = np.random.uniform(-stdv, stdv, size=l.W.data.shape)
            if l.b is not None:
                l.b.data[:] = np.random.uniform(-stdv, stdv,
                                                size=l.b.data.shape)
        elif isinstance(l, L.Convolution2D):
            out_channels, in_channels, kh, kw = l.W.data.shape
            stdv = 1 / np.sqrt(in_channels * kh * kw)
            l.W.data[:] = np.random.uniform(-stdv, stdv, size=l.W.data.shape)
            if l.b is not None:
                l.b.data[:] = np.random.uniform(-stdv, stdv,
                                                size=l.b.data.shape) 
示例12
def __init__(self):
        super(Mix, self).__init__()

        enc_ch = [3, 64, 256, 512, 1024, 2048]
        ins_ch = [6, 128, 384, 640, 2176, 3072]

        self.conv = [None] * 6
        self.bn = [None] * 6
        for i in range(1, 6):
            c = L.Convolution2D(enc_ch[i] + ins_ch[i], enc_ch[i], 1, nobias=True)
            b = L.BatchNormalization(enc_ch[i])

            self.conv[i] = c
            self.bn[i] = b

            self.add_link('c{}'.format(i), c)
            self.add_link('b{}'.format(i), b) 
示例13
def __init__(self, out_ch):
        super(Decoder, self).__init__()

        with self.init_scope():
            self.mix = Mix()

            self.bot1 = BottleNeckB(2048, 1024)
            self.bot2 = BottleNeckB(2048, 1024)
            self.bot3 = BottleNeckB(2048, 1024)

            self.b5 = UpBlock(2048, 1024, 1024)
            self.b4 = UpBlock(1024, 512, 512)
            self.b3 = UpBlock(512, 256, 256)
            self.b2 = UpBlock(256, 64, 128)
            self.b1 = UpBlock(128, 3 + (6 + 3 * 13), 64)

            self.last_b = L.BatchNormalization(64)
            self.last_c = L.Convolution2D(64, out_ch * 2, 1, nobias=True) 
示例14
def __init__(self, n_class=21):
        self.train=True
        super(FCN32s, self).__init__(
            conv1_1=L.Convolution2D(3, 64, 3, stride=1, pad=100),
            conv1_2=L.Convolution2D(64, 64, 3, stride=1, pad=1),
            conv2_1=L.Convolution2D(64, 128, 3, stride=1, pad=1),
            conv2_2=L.Convolution2D(128, 128, 3, stride=1, pad=1),
            conv3_1=L.Convolution2D(128, 256, 3, stride=1, pad=1),
            conv3_2=L.Convolution2D(256, 256, 3, stride=1, pad=1),
            conv3_3=L.Convolution2D(256, 256, 3, stride=1, pad=1),
            conv4_1=L.Convolution2D(256, 512, 3, stride=1, pad=1),
            conv4_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv4_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv5_1=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv5_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            conv5_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),
            fc6=L.Convolution2D(512, 4096, 7, stride=1, pad=0),
            fc7=L.Convolution2D(4096, 4096, 1, stride=1, pad=0),
            score_fr=L.Convolution2D(4096, n_class, 1, stride=1, pad=0,
                nobias=True, initialW=np.zeros((n_class, 4096, 1, 1))),
            upscore=L.Deconvolution2D(n_class, n_class, 64, stride=32, pad=0,
                nobias=True, initialW=f.bilinear_interpolation_kernel(n_class, n_class, ksize=64)),
        ) 
示例15
def __init__(self, in_size, ch, out_size, stride=2, groups=1):
        super(BottleNeckA, self).__init__()
        initialW = initializers.HeNormal()

        with self.init_scope():
            self.conv1 = L.Convolution2D(
                in_size, ch, 1, stride, 0, initialW=initialW, nobias=True)
            self.bn1 = L.BatchNormalization(ch)
            self.conv2 = L.Convolution2D(
                ch, ch, 3, 1, 1, initialW=initialW, nobias=True,
                groups=groups)
            self.bn2 = L.BatchNormalization(ch)
            self.conv3 = L.Convolution2D(
                ch, out_size, 1, 1, 0, initialW=initialW, nobias=True)
            self.bn3 = L.BatchNormalization(out_size)

            self.conv4 = L.Convolution2D(
                in_size, out_size, 1, stride, 0,
                initialW=initialW, nobias=True)
            self.bn4 = L.BatchNormalization(out_size) 
示例16
def __init__(self, in_channels, out1, proj3, out3, proj5, out5, proj_pool,
                 conv_init=None, bias_init=None):
        super(Inception, self).__init__()
        with self.init_scope():
            self.conv1 = convolution_2d.Convolution2D(
                in_channels, out1, 1, initialW=conv_init,
                initial_bias=bias_init)
            self.proj3 = convolution_2d.Convolution2D(
                in_channels, proj3, 1, initialW=conv_init,
                initial_bias=bias_init)
            self.conv3 = convolution_2d.Convolution2D(
                proj3, out3, 3, pad=1, initialW=conv_init,
                initial_bias=bias_init)
            self.proj5 = convolution_2d.Convolution2D(
                in_channels, proj5, 1, initialW=conv_init,
                initial_bias=bias_init)
            self.conv5 = convolution_2d.Convolution2D(
                proj5, out5, 5, pad=2, initialW=conv_init,
                initial_bias=bias_init)
            self.projp = convolution_2d.Convolution2D(
                in_channels, proj_pool, 1, initialW=conv_init,
                initial_bias=bias_init) 
示例17
def __init__(self):
        super(GoogLeNet, self).__init__()
        with self.init_scope():
            self.conv1 = L.Convolution2D(None,  64, 7, stride=2, pad=3)
            self.conv2_reduce = L.Convolution2D(None,  64, 1)
            self.conv2 = L.Convolution2D(None, 192, 3, stride=1, pad=1)
            # 以下、L.Inceptionを上で定義したInceptionとする
            self.inc3a = Inception(None,  64,  96, 128, 16,  32,  32)
            self.inc3b = Inception(None, 128, 128, 192, 32,  96,  64)
            self.inc4a = Inception(None, 192,  96, 208, 16,  48,  64)
            self.inc4b = Inception(None, 160, 112, 224, 24,  64,  64)
            self.inc4c = Inception(None, 128, 128, 256, 24,  64,  64)
            self.inc4d = Inception(None, 112, 144, 288, 32,  64,  64)
            self.inc4e = Inception(None, 256, 160, 320, 32, 128, 128)
            self.inc5a = Inception(None, 256, 160, 320, 32, 128, 128)
            self.inc5b = Inception(None, 384, 192, 384, 48, 128, 128)
            self.loss3_fc = L.Linear(None, 1000)

            self.loss1_conv = L.Convolution2D(None, 128, 1)
            self.loss1_fc1 = L.Linear(None, 1024)
            self.loss1_fc2 = L.Linear(None, 1000)

            self.loss2_conv = L.Convolution2D(None, 128, 1)
            self.loss2_fc1 = L.Linear(None, 1024)
            self.loss2_fc2 = L.Linear(None, 1000) 
示例18
def __init__(self, in_channels, out_channels, ksize=None,
                 stride=1, pad=0, dilate=1, nobias=False, initialW=None,
                 initial_bias=None, activ=relu):
        if ksize is None:
            out_channels, ksize, in_channels = in_channels, out_channels, None

        self.activ = activ
        super(Conv2DActiv, self).__init__()
        with self.init_scope():
            if dilate > 1:
                self.conv = DilatedConvolution2D(
                    in_channels, out_channels, ksize, stride, pad, dilate,
                    nobias, initialW, initial_bias)
            else:
                self.conv = Convolution2D(
                    in_channels, out_channels, ksize, stride, pad,
                    nobias, initialW, initial_bias) 
示例19
def __init__(self, base, n_base_output, scales):
        super(FPN, self).__init__()
        with self.init_scope():
            self.base = base
            self.inner = chainer.ChainList()
            self.outer = chainer.ChainList()

        init = {'initialW': initializers.GlorotNormal()}
        for _ in range(n_base_output):
            self.inner.append(L.Convolution2D(256, 1, **init))
            self.outer.append(L.Convolution2D(256, 3, pad=1, **init))

        self.scales = scales
        # hacks
        self.n_base_output = n_base_output
        self.n_base_output_minus1 = n_base_output - 1
        self.scales_minus_n_base_output = len(scales) - n_base_output 
示例20
def __init__(self, in_size, ch, out_size, stride=2, groups=1):
        super(BottleNeckA, self).__init__()
        initialW = initializers.HeNormal()

        with self.init_scope():
            self.conv1 = L.Convolution2D(
                in_size, ch, 1, stride, 0, initialW=initialW, nobias=True)
            self.bn1 = L.BatchNormalization(ch)
            self.conv2 = L.Convolution2D(
                ch, ch, 3, 1, 1, initialW=initialW, nobias=True,
                groups=groups)
            self.bn2 = L.BatchNormalization(ch)
            self.conv3 = L.Convolution2D(
                ch, out_size, 1, 1, 0, initialW=initialW, nobias=True)
            self.bn3 = L.BatchNormalization(out_size)

            self.conv4 = L.Convolution2D(
                in_size, out_size, 1, stride, 0,
                initialW=initialW, nobias=True)
            self.bn4 = L.BatchNormalization(out_size) 
示例21
def __init__(self, eprojs, dunits, att_dim, aconv_chans, aconv_filts):
        super(AttLoc, self).__init__()
        with self.init_scope():
            self.mlp_enc = L.Linear(eprojs, att_dim)
            self.mlp_dec = L.Linear(dunits, att_dim, nobias=True)
            self.mlp_att = L.Linear(aconv_chans, att_dim, nobias=True)
            self.loc_conv = L.Convolution2D(1, aconv_chans, ksize=(
                1, 2 * aconv_filts + 1), pad=(0, aconv_filts))
            self.gvec = L.Linear(att_dim, 1)

        self.dunits = dunits
        self.eprojs = eprojs
        self.att_dim = att_dim
        self.h_length = None
        self.enc_h = None
        self.pre_compute_enc_h = None
        self.aconv_chans = aconv_chans 
示例22
def __init__(self, in_channels, out1, proj3, out3, proj5, out5, proj_pool,
                 conv_init=None, bias_init=None):
        super(Inception, self).__init__()
        with self.init_scope():
            self.conv1 = convolution_2d.Convolution2D(
                in_channels, out1, 1, initialW=conv_init,
                initial_bias=bias_init)
            self.proj3 = convolution_2d.Convolution2D(
                in_channels, proj3, 1, initialW=conv_init,
                initial_bias=bias_init)
            self.conv3 = convolution_2d.Convolution2D(
                proj3, out3, 3, pad=1, initialW=conv_init,
                initial_bias=bias_init)
            self.proj5 = convolution_2d.Convolution2D(
                in_channels, proj5, 1, initialW=conv_init,
                initial_bias=bias_init)
            self.conv5 = convolution_2d.Convolution2D(
                proj5, out5, 5, pad=2, initialW=conv_init,
                initial_bias=bias_init)
            self.projp = convolution_2d.Convolution2D(
                in_channels, proj_pool, 1, initialW=conv_init,
                initial_bias=bias_init) 
示例23
def __init__(self):
        super(GoogLeNet, self).__init__()
        with self.init_scope():
            self.conv1 = L.Convolution2D(None,  64, 7, stride=2, pad=3)
            self.conv2_reduce = L.Convolution2D(None,  64, 1)
            self.conv2 = L.Convolution2D(None, 192, 3, stride=1, pad=1)
            # 以下、L.Inceptionを上で定義したInceptionとする
            self.inc3a = Inception(None,  64,  96, 128, 16,  32,  32)
            self.inc3b = Inception(None, 128, 128, 192, 32,  96,  64)
            self.inc4a = Inception(None, 192,  96, 208, 16,  48,  64)
            self.inc4b = Inception(None, 160, 112, 224, 24,  64,  64)
            self.inc4c = Inception(None, 128, 128, 256, 24,  64,  64)
            self.inc4d = Inception(None, 112, 144, 288, 32,  64,  64)
            self.inc4e = Inception(None, 256, 160, 320, 32, 128, 128)
            self.inc5a = Inception(None, 256, 160, 320, 32, 128, 128)
            self.inc5b = Inception(None, 384, 192, 384, 48, 128, 128)
            self.loss3_fc = L.Linear(None, 1000)

            self.loss1_conv = L.Convolution2D(None, 128, 1)
            self.loss1_fc1 = L.Linear(None, 1024)
            self.loss1_fc2 = L.Linear(None, 1000)

            self.loss2_conv = L.Convolution2D(None, 128, 1)
            self.loss2_fc1 = L.Linear(None, 1024)
            self.loss2_fc2 = L.Linear(None, 1000) 
示例24
def __init__(self, in_size, ch, out_size, stride=2, groups=1):
        super(BottleNeckA, self).__init__()
        initialW = initializers.HeNormal()

        with self.init_scope():
            self.conv1 = L.Convolution2D(
                in_size, ch, 1, stride, 0, initialW=initialW, nobias=True)
            self.bn1 = L.BatchNormalization(ch)
            self.conv2 = L.Convolution2D(
                ch, ch, 3, 1, 1, initialW=initialW, nobias=True,
                groups=groups)
            self.bn2 = L.BatchNormalization(ch)
            self.conv3 = L.Convolution2D(
                ch, out_size, 1, 1, 0, initialW=initialW, nobias=True)
            self.bn3 = L.BatchNormalization(out_size)

            self.conv4 = L.Convolution2D(
                in_size, out_size, 1, stride, 0,
                initialW=initialW, nobias=True)
            self.bn4 = L.BatchNormalization(out_size) 
示例25
def __init__(self):
        chainer.Chain.__init__(self)
        self.dtype = np.float16
        W = initializers.HeNormal(1 / np.sqrt(2), self.dtype)
        bias = initializers.Zero(self.dtype)

        with self.init_scope():
            self.conv1 = L.Convolution2D(None, 96, 11, stride=4,
                                         initialW=W, initial_bias=bias)
            self.conv2 = L.Convolution2D(None, 256, 5, pad=2,
                                         initialW=W, initial_bias=bias)
            self.conv3 = L.Convolution2D(None, 384, 3, pad=1,
                                         initialW=W, initial_bias=bias)
            self.conv4 = L.Convolution2D(None, 384, 3, pad=1,
                                         initialW=W, initial_bias=bias)
            self.conv5 = L.Convolution2D(None, 256, 3, pad=1,
                                         initialW=W, initial_bias=bias)
            self.fc6 = L.Linear(None, 4096, initialW=W, initial_bias=bias)
            self.fc7 = L.Linear(None, 4096, initialW=W, initial_bias=bias)
            self.fc8 = L.Linear(None, 1000, initialW=W, initial_bias=bias) 
示例26
def __init__(self, n_layers, n_vocab, n_units, dropout=0.1):
        out_units = n_units // 3
        super(CNNEncoder, self).__init__(
            embed=L.EmbedID(n_vocab, n_units, ignore_label=-1,
                            initialW=embed_init),
            cnn_w3=L.Convolution2D(
                n_units, out_units, ksize=(3, 1), stride=1, pad=(2, 0),
                nobias=True),
            cnn_w4=L.Convolution2D(
                n_units, out_units, ksize=(4, 1), stride=1, pad=(3, 0),
                nobias=True),
            cnn_w5=L.Convolution2D(
                n_units, out_units, ksize=(5, 1), stride=1, pad=(4, 0),
                nobias=True),
            mlp=MLP(n_layers, out_units * 3, dropout)
        )
        self.out_units = out_units * 3
        self.dropout = dropout
        self.use_predict_embed = False 
示例27
def __init__(self,
                 in_channels,
                 out_channels,
                 ksize,
                 stride,
                 pad,
                 num_blocks):
        super(PolyConv, self).__init__()
        with self.init_scope():
            self.conv = L.Convolution2D(
                in_channels=in_channels,
                out_channels=out_channels,
                ksize=ksize,
                stride=stride,
                pad=pad,
                nobias=True)
            for i in range(num_blocks):
                setattr(self, "bn{}".format(i + 1), L.BatchNormalization(
                    size=out_channels,
                    eps=1e-5))
            self.activ = F.relu 
示例28
def __init__(self,
                 in_channels,
                 out_channels,
                 ksize,
                 stride,
                 pad,
                 dilate,
                 activate):
        super(DRNConv, self).__init__()
        self.activate = activate

        with self.init_scope():
            self.conv = L.Convolution2D(
                in_channels=in_channels,
                out_channels=out_channels,
                ksize=ksize,
                stride=stride,
                pad=pad,
                nobias=True,
                dilate=dilate)
            self.bn = L.BatchNormalization(
                size=out_channels,
                eps=1e-5)
            if self.activate:
                self.activ = F.relu 
示例29
def __init__(self,
                 in_channels,
                 out_channels,
                 ksize,
                 stride=1,
                 pad=0):
        super(NINConv, self).__init__()
        with self.init_scope():
            self.conv = L.Convolution2D(
                in_channels=in_channels,
                out_channels=out_channels,
                ksize=ksize,
                stride=stride,
                pad=pad,
                nobias=False)
            self.activ = F.relu 
示例30
def __init__(self,
                 in_channels,
                 out_channels,
                 ksize,
                 stride,
                 pad,
                 groups):
        super(CondenseSimpleConv, self).__init__()
        with self.init_scope():
            self.bn = L.BatchNormalization(size=in_channels)
            self.activ = F.relu
            self.conv = L.Convolution2D(
                in_channels=in_channels,
                out_channels=out_channels,
                ksize=ksize,
                stride=stride,
                pad=pad,
                nobias=True,
                groups=groups)