Python源码示例:keras.engine.Model()

示例1
def build_discriminator(config: BEGANConfig, autoencoder: Container):
    """
    
    Keras Model class is able to have several inputs/outputs. 
    But, loss functions should be defined each other, and the loss function cannot reference other inputs/outputs.
    For computing loss, two inputs/outputs are concatenated.
    """
    # IN Shape: [ImageHeight, ImageWidth, (real data(3 channels) + generated data(3 channels))]
    in_out_shape = (config.image_height, config.image_width, 3 * 2)
    all_input = Input(in_out_shape)

    # Split Input Data
    data_input = Lambda(lambda x: x[:, :, :, 0:3], output_shape=(config.image_height, config.image_width, 3))(all_input)
    generator_input = Lambda(lambda x: x[:, :, :, 3:6], output_shape=(config.image_height, config.image_width, 3))(all_input)

    # use same autoencoder(weights are shared)
    data_output = autoencoder(data_input)  # (bs, row, col, ch)
    generator_output = autoencoder(generator_input)

    # concatenate output to be same shape of input
    all_output = Concatenate(axis=-1)([data_output, generator_output])

    discriminator = DiscriminatorModel(all_input, all_output, name="discriminator")
    return discriminator 
示例2
def __init__(self, inputs, outputs, greedy=True, beam_width=100, top_paths=1, charset=None):
        """
        Initialization of a CTC Model. 
        :param inputs: Input layer of the neural network
            outputs: Last layer of the neural network before CTC (e.g. a TimeDistributed Dense)
            greedy, beam_width, top_paths: Parameters of the CTC decoding (see ctc decoding tensorflow for more details)
            charset: labels related to the input of the CTC approach
        """
        self.model_train = None
        self.model_pred = None
        self.model_eval = None
        if not isinstance(inputs, list):
            self.inputs = [inputs]
        else:
            self.inputs = inputs
        if not isinstance(outputs, list):
            self.outputs = [outputs]
        else:
            self.outputs = outputs

        self.greedy = greedy
        self.beam_width = beam_width
        self.top_paths = top_paths
        self.charset = charset 
示例3
def get_probas_on_batch(self, inputs, verbose=False):
        """
        Get the probabilities of each label at each time of an observation sequence (matrix T x D)
        This is the output of the softmax function after the recurrent layers (the input of the CTC computations)

        Computation is done for a batch. This function does not exist in a Keras Model.

        :return: A set of probabilities for each sequence and each time frame, one probability per label + the blank
            (this is the output of the TimeDistributed Dense layer, the blank label is the last probability)
        """

        x = inputs[0]
        x_len = inputs[2]
        batch_size = x.shape[0]

        #  Find the output of the softmax function
        probs = self.model_init.predict(x, batch_size=batch_size, verbose=verbose)

        # Select the outputs that do not refer to padding
        probs_epoch = [np.asarray(probs[data_idx, :x_len[data_idx][0], :]) for data_idx in range(batch_size)]

        return probs_epoch 
示例4
def get_probas(self, inputs, batch_size, verbose=False):
        """
        Get the probabilities of each label at each time of an observation sequence (matrix T x D)
        This is the output of the softmax function after the recurrent layers (the input of the CTC computations)

        Computation is done for a batch. This function does not exist in a Keras Model.

        :return: A set of probabilities for each sequence and each time frame, one probability per label + the blank
            (this is the output of the TimeDistributed Dense layer, the blank label is the last probability)
        """

        x = inputs[0]
        x_len = inputs[2]

        #  Find the output of the softmax function
        probs = self.model_init.predict(x, batch_size=batch_size, verbose=verbose)

        # Select the outputs that do not refer to padding
        probs_epoch = [np.asarray(probs[data_idx, :x_len[data_idx][0], :]) for data_idx in range(batch_size)]

        return probs_epoch 
示例5
def segmentor(self, start_filters=64, filter_inc_rate=2, out_ch=1, depth=2):
        """
        Creates recursively a segmentor model a.k.a. generator in GAN literature
        """
        inp = Input(shape=self.shape)
        first_block = convl1_lrelu(inp, start_filters, 4, 2)
        middle_blocks = level_block(first_block, int(start_filters * 2), depth=depth,
                                    filter_inc_rate=filter_inc_rate, p=0.1)
        if self.softmax:
            last_block = upsampl_softmax(middle_blocks, out_ch+1, 3, 1, 2, self.max_project) # out_ch+1, because softmax needs crossentropy
        else:
            last_block = upsampl_conv(middle_blocks, out_ch, 3, 1, 2)
        if self.crop:
            out = multiply([inp, last_block])  # crop input with predicted mask
            return Model([inp], [out], name='segmentor_net')
        return Model([inp], [last_block], name='segmentor_net')
        #return Model([inp], [last_block], name='segmentor_net') 
示例6
def critic(self):
        """
        Creates a critic a.k.a. discriminator model
        """
        # Note: Future improvement is to provide definable depth of critic
        inp_cropped = Input(self.shape, name='inp_cropped_image')  # Data cropped with generated OR g.t. mask


        shared_1 = shared_convl1_lrelu(self.shape, 64, 4, 2, name='shared_1_conv_lrelu')
        shared_2 = shared_convl1_bn_lrelu((16, 16, 64), 128, 4, 2, name='shared_2_conv_bn_lrelu')
        shared_3 = shared_convl1_bn_lrelu((8, 8, 128), 256, 4, 2, name='shared_3_conv_bn_lrelu')
        shared_4 = shared_convl1_bn_lrelu((4, 4, 256), 512, 4, 2, name='shared_4_conv_bn_lrelu')

        x1_S = shared_1(inp_cropped)
        #x1_S = shared_1(multiply([inp, mask]))
        x2_S = shared_2(x1_S)
        x3_S = shared_3(x2_S)
        x4_S = shared_4(x3_S)
        features = Concatenate(name='features_S')(
            [Flatten()(inp_cropped), Flatten()(x1_S), Flatten()(x2_S), Flatten()(x3_S), Flatten()(x4_S)]
            #[Flatten()(inp), Flatten()(x1_S), Flatten()(x2_S), Flatten()(x3_S), Flatten()(x4_S)]
        )
        return Model(inp_cropped, features, name='critic_net')
        #return Model([inp, mask], features, name='critic_net') 
示例7
def define(self, n_output: int=2, dropout: float=1., base_model=None):
        """
        Define model architecture for eyes_fcscratch
        :param n_output: number of network outputs
        :param dropout: dropout value
        :param base_model: Base model whose architecture and weights are used for convolutional blocks.
        """

        hidden_dim = 1536
        image_input = Input(shape=base_model.input_size[input_type.EYES], name='input')

        # Load base model without FC layers
        base = base_model.load_model(input_tensor=image_input, include_top=False)

        weight_init = glorot_uniform(seed=3)

        # Define architecture on top of base model
        last_layer = base.get_layer('pool5').output
        x = Flatten(name='flatten')(last_layer)
        x = Dense(hidden_dim, activation='relu', kernel_initializer=weight_init, name='fc6')(x)
        if dropout < 1.:
            x = Dropout(dropout, seed=0, name='dp6')(x)
        out = Dense(n_output, kernel_initializer=weight_init, name='fc8')(x)

        # First for layers are not trained
        for layer in base.layers[:4]:
            layer.trainable = False

        self.model = Model([image_input], out)

        print(len(self.model.layers))
        print([n.name for n in self.model.layers])

        # Print model summary
        self.model.summary() 
示例8
def define(self, n_output: int=2, dropout: float=1., base_model=None):
        """
        Define model architecture for face_finetune
        :param n_output: number of network outputs
        :param dropout: dropout value
        :param base_model: Base model whose architecture and weights are used for all network except last FC layer.
        """

        image_input = Input(shape=base_model.input_size[input_type.FACE], name='input')
        weight_init = glorot_uniform(seed=3)

        # Load model with FC layers
        base = base_model.load_model(input_tensor=image_input, include_top=True)

        last_layer = base.get_layer('fc6/relu').output
        fc7 = base.get_layer('fc7')
        fc7r = base.get_layer('fc7/relu')
        x = last_layer

        if dropout < 1.:
            x = Dropout(dropout, seed=0, name='dp6')(x)
        x = fc7(x)
        x = fc7r(x)
        if dropout < 1.:
            x = Dropout(dropout, seed=1, name='dp7')(x)
        out = Dense(n_output, kernel_initializer=weight_init, name='fc8')(x)

        # Freeze first conv layers
        for layer in base.layers[:4]:
            layer.trainable = False

        self.model = Model(image_input, out)

        # Print model summary
        self.model.summary() 
示例9
def get_model(base_model, 
              layer, 
              lr=1e-3, 
              input_shape=(224,224,1), 
              classes=2,
              activation="softmax",
              dropout=None, 
              pooling="avg", 
              weights=None,
              pretrained="imagenet"): 
    base = base_model(input_shape=input_shape,
                      include_top=False,
                      weights=pretrained, 
                      channels="gray") 
    if pooling == "avg": 
        x = GlobalAveragePooling2D()(base.output) 
    elif pooling == "max": 
        x = GlobalMaxPooling2D()(base.output) 
    elif pooling is None: 
        x = Flatten()(base.output) 
    if dropout is not None: 
        x = Dropout(dropout)(x) 
    x = Dense(classes, activation=activation)(x) 
    model = Model(inputs=base.input, outputs=x) 
    if weights is not None: 
        model.load_weights(weights) 
    for l in model.layers[:layer]:
        l.trainable = False 
    model.compile(loss="binary_crossentropy", metrics=["accuracy"], 
                  optimizer=optimizers.Adam(lr)) 
    return model

##########
## DATA ##
##########

# == PREPROCESSING == # 
示例10
def get_model(base_model, 
              layer, 
              lr=1e-3, 
              input_shape=(224,224,1), 
              classes=2,
              activation="softmax",
              dropout=None, 
              pooling="avg", 
              weights=None,
              pretrained="imagenet"): 
    base = base_model(input_shape=input_shape,
                      include_top=False,
                      weights=pretrained, 
                      channels="gray") 
    if pooling == "avg": 
        x = GlobalAveragePooling2D()(base.output) 
    elif pooling == "max": 
        x = GlobalMaxPooling2D()(base.output) 
    elif pooling is None: 
        x = Flatten()(base.output) 
    if dropout is not None: 
        x = Dropout(dropout)(x) 
    x = Dense(classes, activation=activation)(x) 
    model = Model(inputs=base.input, outputs=x) 
    if weights is not None: 
        model.load_weights(weights) 
    for l in model.layers[:layer]:
        l.trainable = False 
    model.compile(loss="binary_crossentropy", metrics=["accuracy"], 
                  optimizer=optimizers.Adam(lr)) 
    return model

##########
## DATA ##
##########

# == PREPROCESSING == # 
示例11
def get_model(base_model, 
              layer, 
              lr=1e-3, 
              input_shape=(224,224,1), 
              classes=2,
              activation="softmax",
              dropout=None, 
              pooling="avg", 
              weights=None,
              pretrained=None): 
    base = base_model(input_shape=input_shape,
                      include_top=False,
                      weights=pretrained, 
                      channels="gray") 
    if pooling == "avg": 
        x = GlobalAveragePooling2D()(base.output) 
    elif pooling == "max": 
        x = GlobalMaxPooling2D()(base.output) 
    elif pooling is None: 
        x = Flatten()(base.output) 
    if dropout is not None: 
        x = Dropout(dropout)(x) 
    x = Dense(classes, activation=activation)(x) 
    model = Model(inputs=base.input, outputs=x) 
    if weights is not None: 
        model.load_weights(weights) 
    for l in model.layers[:layer]:
        l.trainable = False 
    model.compile(loss="binary_crossentropy", metrics=["accuracy"], 
                  optimizer=optimizers.Adam(lr)) 
    return model

##########
## DATA ##
##########

# == PREPROCESSING == # 
示例12
def get_model(base_model, 
              layer, 
              lr=1e-3, 
              input_shape=(224,224,1), 
              classes=2,
              activation="softmax",
              dropout=None, 
              pooling="avg", 
              weights=None,
              pretrained=None): 
    base = base_model(input_shape=input_shape,
                      include_top=False,
                      weights=pretrained, 
                      channels="gray") 
    if pooling == "avg": 
        x = GlobalAveragePooling2D()(base.output) 
    elif pooling == "max": 
        x = GlobalMaxPooling2D()(base.output) 
    elif pooling is None: 
        x = Flatten()(base.output) 
    if dropout is not None: 
        x = Dropout(dropout)(x) 
    x = Dense(classes, activation=activation)(x) 
    model = Model(inputs=base.input, outputs=x) 
    if weights is not None: 
        model.load_weights(weights) 
    for l in model.layers[:layer]:
        l.trainable = False 
    model.compile(loss="binary_crossentropy", metrics=["accuracy"], 
                  optimizer=optimizers.Adam(lr)) 
    return model

##########
## DATA ##
##########

# == PREPROCESSING == # 
示例13
def build_generator(config: BEGANConfig):
    decoder = build_decoder(config, name="generator_decoder")
    generator = Model(decoder.inputs, decoder.outputs, name="generator")
    return generator 
示例14
def nn_architecture_seg_3d(input_shape, pool_size=(2, 2, 2), n_labels=1, initial_learning_rate=0.00001,
                        depth=3, n_base_filters=16, metrics=dice_coefficient, batch_normalization=True):
    inputs = Input(input_shape)
    current_layer = inputs
    levels = list()

    for layer_depth in range(depth):
        layer1 = create_convolution_block(input_layer=current_layer, n_filters=n_base_filters * (2**layer_depth),
                                          batch_normalization=batch_normalization)
        layer2 = create_convolution_block(input_layer=layer1, n_filters=n_base_filters * (2**layer_depth) * 2,
                                          batch_normalization=batch_normalization)
        if layer_depth < depth - 1:
            current_layer = MaxPooling3D(pool_size=pool_size)(layer2)
            levels.append([layer1, layer2, current_layer])
        else:
            current_layer = layer2
            levels.append([layer1, layer2])

    for layer_depth in range(depth - 2, -1, -1):
        up_convolution = UpSampling3D(size=pool_size)
        concat = concatenate([up_convolution, levels[layer_depth][1]], axis=1)
        current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1],
                                                 input_layer=concat, batch_normalization=batch_normalization)
        current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1],
                                                 input_layer=current_layer,
                                                 batch_normalization=batch_normalization)

    final_convolution = Conv3D(n_labels, (1, 1, 1))(current_layer)
    act = Activation('sigmoid')(final_convolution)
    model = Model(inputs=inputs, outputs=act)

    if not isinstance(metrics, list):
        metrics = [metrics]

    model.compile(optimizer=Adam(lr=initial_learning_rate), loss=dice_coefficient_loss, metrics=metrics)
    return model 
示例15
def ab_ag_seq_model(max_ag_len, max_cdr_len):
    input_ag = Input(shape=(max_ag_len, NUM_FEATURES))
    ag_seq = Masking()(input_ag)

    enc_ag = Bidirectional(LSTM(128, dropout=0.1, recurrent_dropout=0.1),
                           merge_mode='concat')(ag_seq)

    input_ab = Input(shape=(max_cdr_len, NUM_FEATURES))
    label_mask = Input(shape=(max_cdr_len,))

    seq = Masking()(input_ab)

    loc_fts = MaskedConvolution1D(64, 5, padding='same', activation='elu')(seq)

    glb_fts = Bidirectional(LSTM(256, dropout=0.15, recurrent_dropout=0.2,
                                 return_sequences=True),
                            merge_mode='concat')(loc_fts)

    enc_ag_rep = RepeatVector(max_cdr_len)(enc_ag)
    ab_ag_repr = concatenate([glb_fts, enc_ag_rep])
    ab_ag_repr = MaskingByLambda(mask_by_input(label_mask))(ab_ag_repr)
    ab_ag_repr = Dropout(0.3)(ab_ag_repr)

    aa_probs = TimeDistributed(Dense(1, activation='sigmoid'))(ab_ag_repr)
    model = Model(inputs=[input_ag, input_ab, label_mask], outputs=aa_probs)
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['binary_accuracy', false_pos, false_neg],
                  sample_weight_mode="temporal")
    return model 
示例16
def ab_seq_model(max_cdr_len):
    input_ab, label_mask, _, probs = base_ab_seq_model(max_cdr_len)
    model = Model(inputs=[input_ab, label_mask], outputs=probs)
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['binary_accuracy', false_pos, false_neg],
                  sample_weight_mode="temporal")
    return model 
示例17
def conv_output_ab_seq_model(max_cdr_len):
    input_ab, label_mask, loc_fts, probs = base_ab_seq_model(max_cdr_len)
    model = Model(inputs=[input_ab, label_mask], outputs=[probs, loc_fts])
    return model 
示例18
def loss_net(self) -> Model:
        """Returns the network that yields a loss given both input spectrograms and labels. Used for training."""
        input_batch = self._input_batch_input
        label_batch = Input(name=Wav2Letter.InputNames.label_batch, shape=(None,), dtype='int32')
        label_lengths = Input(name=Wav2Letter.InputNames.label_lengths, shape=(1,), dtype='int64')

        asg_transition_probabilities_variable = backend.variable(value=self.asg_transition_probabilities,
                                                                 name="asg_transition_probabilities")
        asg_initial_probabilities_variable = backend.variable(value=self.asg_initial_probabilities,
                                                              name="asg_initial_probabilities")
        # Since Keras doesn't currently support loss functions with extra parameters,
        # we define a custom lambda layer yielding one single real-valued CTC loss given the grapheme probabilities:
        loss_layer = Lambda(Wav2Letter._asg_lambda if self.use_asg else Wav2Letter._ctc_lambda,
                            name='asg_loss' if self.use_asg else 'ctc_loss',
                            output_shape=(1,),
                            arguments={"transition_probabilities": asg_transition_probabilities_variable,
                                       "initial_probabilities": asg_initial_probabilities_variable} if self.use_asg else None)

        # ([asg_transition_probabilities_variable, asg_initial_probabilities_variable] if self.use_asg else [])

        # This loss layer is placed atop the predictive network and provided with additional arguments,
        # namely the label batch and prediction/label sequence lengths:
        loss = loss_layer(
            [self.predictive_net(input_batch), label_batch, self._prediction_lengths_input, label_lengths])

        loss_net = Model(inputs=[input_batch, label_batch, self._prediction_lengths_input, label_lengths],
                         outputs=[loss])
        # Since loss is already calculated in the last layer of the net, we just pass through the results here.
        # The loss dummy labels have to be given to satify the Keras API.
        loss_net.compile(loss=lambda dummy_labels, ctc_loss: ctc_loss, optimizer=self.optimizer)
        return loss_net 
示例19
def decoding_net(self):
        decoding_layer = Lambda(self._decode_lambda, name='ctc_decode')

        prediction_batch = self.predictive_net(self._input_batch_input)
        decoded = decoding_layer([prediction_batch, self._prediction_lengths_input])

        return Model(inputs=[self._input_batch_input, self._prediction_lengths_input], outputs=[decoded]) 
示例20
def get_model_train(self):
        """
        :return: Model used for training using the CTC approach
        """
        return self.model_train 
示例21
def get_model_pred(self):
        """
        :return: Model used for testing using the CTC approach
        """
        return self.model_pred 
示例22
def get_model_eval(self):
        """
        :return: Model used for evaluating using the CTC approach
        """
        return self.model_eval 
示例23
def fit_generator(self, generator,
                      steps_per_epoch,
                      epochs=1,
                      verbose=1,
                      callbacks=None,
                      validation_data=None,
                      validation_steps=None,
                      class_weight=None,
                      max_q_size=10,
                      workers=1,
                      pickle_safe=False,
                      initial_epoch=0):
        """
        Model training on data yielded batch-by-batch by a Python generator.

        The generator is run in parallel to the model, for efficiency. 
        For instance, this allows you to do real-time data augmentation on images on CPU in parallel to training your model on GPU.

        A major modification concerns the generator that must provide x data of the form:
          [input_sequences, label_sequences, inputs_lengths, labels_length]
        (in a similar way than for using CTC in tensorflow)

        :param: See keras.engine.Model.fit_generator()
        :return: A History object
        """
        out = self.model_train.fit_generator(generator, steps_per_epoch, epochs=epochs, verbose=verbose,
                                             callbacks=callbacks, validation_data=validation_data,
                                             validation_steps=validation_steps, class_weight=class_weight,
                                             max_q_size=max_q_size, workers=workers, pickle_safe=pickle_safe,
                                             initial_epoch=initial_epoch)

        self.model_pred.set_weights(self.model_train.get_weights())  # required??
        self.model_eval.set_weights(self.model_train.get_weights())
        return out 
示例24
def fit(self, x=None,
            y=None,
            batch_size=None,
            epochs=1,
            verbose=1,
            callbacks=None,
            validation_split=0.0,
            validation_data=None,
            shuffle=True,
            class_weight=None,
            sample_weight=None,
            initial_epoch=0,
            steps_per_epoch=None,
            validation_steps=None):
        """
        Model training on data.

        A major modification concerns the x input of the form:
          [input_sequences, label_sequences, inputs_lengths, labels_length]
        (in a similar way than for using CTC in tensorflow)

        :param: See keras.engine.Model.fit()
        :return: A History object
        """

        out = self.model_train.fit(x=x, y=y, batch_size=batch_size, epochs=epochs, verbose=verbose,
                                   callbacks=callbacks, validation_split=validation_split,
                                   validation_data=validation_data,
                                   shuffle=shuffle, class_weight=class_weight, sample_weight=sample_weight,
                                   initial_epoch=initial_epoch,
                                   steps_per_epoch=steps_per_epoch, validation_steps=validation_steps)

        self.model_pred.set_weights(self.model_train.get_weights())
        self.model_eval.set_weights(self.model_train.get_weights())

        return out 
示例25
def train_on_batch(self, x, y, sample_weight=None, class_weight=None):
        """ Runs a single gradient update on a single batch of data.
        See Keras.Model for more details.


        """

        out = self.model_train.train_on_batch(x, y, sample_weight=sample_weight,
                                              class_weight=class_weight)

        self.model_pred.set_weights(self.model_train.get_weights())
        self.model_eval.set_weights(self.model_train.get_weights())

        return out 
示例26
def test_on_batch(self, x=None, metrics=['loss', 'ler', 'ser']):
        """ Name of a Keras Model function: this relates to evaluate on batch """
        return self.evaluate_on_batch(x) 
示例27
def cnn_multi_filters(wv, sent_length, nfilters, nb_filters, **kwargs):
    noise = kwargs.get("noise", 0)
    trainable = kwargs.get("trainable", False)
    drop_text_input = kwargs.get("drop_text_input", 0.)
    drop_conv = kwargs.get("drop_conv", 0.)
    activity_l2 = kwargs.get("activity_l2", 0.)

    input_text = Input(shape=(sent_length,), dtype='int32')

    emb_text = embeddings_layer(max_length=sent_length, embeddings=wv,
                                trainable=trainable, masking=False)(input_text)
    emb_text = GaussianNoise(noise)(emb_text)
    emb_text = Dropout(drop_text_input)(emb_text)

    pooling_reps = []
    for i in nfilters:
        feat_maps = Convolution1D(nb_filter=nb_filters,
                                  filter_length=i,
                                  border_mode="valid",
                                  activation="relu",
                                  subsample_length=1)(emb_text)
        pool_vecs = MaxPooling1D(pool_length=2)(feat_maps)
        pool_vecs = Flatten()(pool_vecs)
        # pool_vecs = GlobalMaxPooling1D()(feat_maps)
        pooling_reps.append(pool_vecs)

    representation = concatenate(pooling_reps)

    representation = Dropout(drop_conv)(representation)

    probabilities = Dense(3, activation='softmax',
                          activity_regularizer=l2(activity_l2))(representation)

    model = Model(input=input_text, output=probabilities)
    model.compile(optimizer="adam", loss='categorical_crossentropy')

    return model 
示例28
def crop(model, layer_or_tensor):
    if hasattr(layer_or_tensor, '_keras_history'):
        ins, outs = crop_to_tensor(model, layer_or_tensor)
    else:
        ins, outs = crop_to_layer(model, layer_or_tensor)
    return Model(ins, outs, preloaded_data=model.preloaded_data) 
示例29
def test1():
    seq_size = 10
    batch_size = 10 
    rnn_size = 1
    xin = Input(batch_shape=(batch_size, seq_size,1))
    xtop = Input(batch_shape=(batch_size, seq_size))
    xbranch, xsummary = RTTN(rnn_size, return_sequences=True)([xin, xtop])

    model = Model(input=[xin, xtop], output=[xbranch, xsummary])
    model.compile(loss='MSE', optimizer='SGD')
    data_gen = generate_data_batch(batch_size, seq_size)
    model.fit_generator(generator=data_gen, samples_per_epoch=1000, nb_epoch=100) 
示例30
def show_masks(self, out_layer=-2):
        return Model(self.netS.input, self.netS.layers[out_layer].output)