Python源码示例:config.cfg.batch_size()

示例1
def _make_batch_generator(self):
        # data load and construct batch generator
        self.logger.info("Creating dataset...")
        trainset_loader = []
        for i in range(len(cfg.trainset)):
            if i > 0:
                ref_joints_name = trainset_loader[0].joints_name
            else:
                ref_joints_name = None
            trainset_loader.append(DatasetLoader(eval(cfg.trainset[i])("train"), ref_joints_name, True, transforms.Compose([\
                                                                                                        transforms.ToTensor(),
                                                                                                        transforms.Normalize(mean=cfg.pixel_mean, std=cfg.pixel_std)]\
                                                                                                        )))
        self.joint_num = trainset_loader[0].joint_num

        trainset_loader = MultipleDatasets(trainset_loader)
        self.itr_per_epoch = math.ceil(len(trainset_loader) / cfg.num_gpus / cfg.batch_size)
        self.batch_generator = DataLoader(dataset=trainset_loader, batch_size=cfg.num_gpus*cfg.batch_size, shuffle=True, num_workers=cfg.num_thread, pin_memory=True) 
示例2
def get_generators():
    train_generator = TrainGenerator(base_dir=cfg.base_dir, 
                                     annotation_file=os.path.join(cfg.base_dir, 'annotation_train.txt'),
                                     batch_size=cfg.batch_size,
                                     img_size=(cfg.width, cfg.height),
                                     nb_channels=cfg.nb_channels,
                                     timesteps=cfg.timesteps,
                                     label_len=cfg.label_len,
                                     characters=cfg.characters)
    val_generator = ValGenerator(base_dir=cfg.base_dir,
                                 annotation_file=os.path.join(cfg.base_dir, 'annotation_val.txt'),
                                 batch_size=5000,
                                 img_size=(cfg.width, cfg.height),
                                 nb_channels=cfg.nb_channels,
                                 timesteps=cfg.timesteps,
                                 label_len=cfg.label_len,
                                 characters=cfg.characters)
    return train_generator, val_generator 
示例3
def make_data(self):
        from COCOAllJoints import COCOJoints
        from dataset import Preprocessing

        d = COCOJoints()
        train_data, _ = d.load_data(cfg.min_kps)

        from tfflat.data_provider import DataFromList, MultiProcessMapDataZMQ, BatchData, MapData
        dp = DataFromList(train_data)
        if cfg.dpflow_enable:
            dp = MultiProcessMapDataZMQ(dp, cfg.nr_dpflows, Preprocessing)
        else:
            dp = MapData(dp, Preprocessing)
        dp = BatchData(dp, cfg.batch_size // cfg.nr_aug)
        dp.reset_state()
        dataiter = dp.get_data()

        return dataiter 
示例4
def make_data(self):
        from COCOAllJoints import COCOJoints
        from dataset import Preprocessing

        d = COCOJoints()
        train_data, _ = d.load_data(cfg.min_kps)

        from tfflat.data_provider import DataFromList, MultiProcessMapDataZMQ, BatchData, MapData
        dp = DataFromList(train_data)
        if cfg.dpflow_enable:
            dp = MultiProcessMapDataZMQ(dp, cfg.nr_dpflows, Preprocessing)
        else:
            dp = MapData(dp, Preprocessing)
        dp = BatchData(dp, cfg.batch_size // cfg.nr_aug)
        dp.reset_state()
        dataiter = dp.get_data()

        return dataiter 
示例5
def make_data(self):
        from COCOAllJoints import COCOJoints
        from dataset import Preprocessing

        d = COCOJoints()
        train_data, _ = d.load_data(cfg.min_kps)

        from tfflat.data_provider import DataFromList, MultiProcessMapDataZMQ, BatchData, MapData
        dp = DataFromList(train_data)
        if cfg.dpflow_enable:
            dp = MultiProcessMapDataZMQ(dp, cfg.nr_dpflows, Preprocessing)
        else:
            dp = MapData(dp, Preprocessing)
        dp = BatchData(dp, cfg.batch_size // cfg.nr_aug)
        dp.reset_state()
        dataiter = dp.get_data()

        return dataiter 
示例6
def make_data(self):
        from COCOAllJoints import COCOJoints
        from dataset import Preprocessing

        d = COCOJoints()
        train_data, _ = d.load_data(cfg.min_kps)

        from tfflat.data_provider import DataFromList, MultiProcessMapDataZMQ, BatchData, MapData
        dp = DataFromList(train_data)
        if cfg.dpflow_enable:
            dp = MultiProcessMapDataZMQ(dp, cfg.nr_dpflows, Preprocessing)
        else:
            dp = MapData(dp, Preprocessing)
        dp = BatchData(dp, cfg.batch_size // cfg.nr_aug)
        dp.reset_state()
        dataiter = dp.get_data()

        return dataiter 
示例7
def loss_ohem(preds, labels):
    labels = tf.cast(labels, tf.int64)
    labels = tf.reshape(labels, (cfg.batch_size,))
    print('pre labels', labels.get_shape())
    labels = tf.one_hot(labels, cfg.classes)
    print('labels', labels.get_shape())
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=preds, labels=labels)
    print('cross_entropy', cross_entropy.get_shape())
    keep_num = tf.cast(cfg.batch_size * cfg.train.ohem_ratio, tf.int32)
    cross_entropy = tf.reshape(cross_entropy, (cfg.batch_size,))
    print('cross_entropy', cross_entropy.get_shape())
    _, k_index = tf.nn.top_k(cross_entropy, keep_num)
    loss = tf.gather(cross_entropy, k_index)
    print('ohem loss', loss.get_shape())

    return tf.reduce_mean(loss) 
示例8
def kernel_tile(input, kernel, stride):
    # output = tf.extract_image_patches(input, ksizes=[1, kernel, kernel, 1], strides=[1, stride, stride, 1], rates=[1, 1, 1, 1], padding='VALID')

    input_shape = input.get_shape()
    tile_filter = np.zeros(shape=[kernel, kernel, input_shape[3],
                                  kernel * kernel], dtype=np.float32)
    for i in range(kernel):
        for j in range(kernel):
            tile_filter[i, j, :, i * kernel + j] = 1.0

    tile_filter_op = tf.constant(tile_filter, dtype=tf.float32)
    output = tf.nn.depthwise_conv2d(input, tile_filter_op, strides=[
                                    1, stride, stride, 1], padding='VALID')
    output_shape = output.get_shape()
    output = tf.reshape(output, shape=[int(output_shape[0]), int(
        output_shape[1]), int(output_shape[2]), int(input_shape[3]), kernel * kernel])
    output = tf.transpose(output, perm=[0, 1, 2, 4, 3])

    return output

# input should be a tensor with size as [batch_size, caps_num_i, 16] 
示例9
def mat_transform(input, caps_num_c, regularizer, tag=False):
    batch_size = int(input.get_shape()[0])
    caps_num_i = int(input.get_shape()[1])
    output = tf.reshape(input, shape=[batch_size, caps_num_i, 1, 4, 4])
    # the output of capsule is miu, the mean of a Gaussian, and activation, the sum of probabilities
    # it has no relationship with the absolute values of w and votes
    # using weights with bigger stddev helps numerical stability
    w = slim.variable('w', shape=[1, caps_num_i, caps_num_c, 4, 4], dtype=tf.float32,
                      initializer=tf.truncated_normal_initializer(mean=0.0, stddev=1.0),
                      regularizer=regularizer)

    w = tf.tile(w, [batch_size, 1, 1, 1, 1])
    output = tf.tile(output, [1, 1, caps_num_c, 1, 1])
    votes = tf.reshape(tf.matmul(output, w), [batch_size, caps_num_i, caps_num_c, 16])

    return votes 
示例10
def vec_transform(input, caps_num_out, channel_num_out):
    batch_size = int(input.get_shape()[0])
    caps_num_in = int(input.get_shape()[1])
    channel_num_in = int(input.get_shape()[-1])

    w = slim.variable('w', shape=[1, caps_num_out, caps_num_in, channel_num_in, channel_num_out], dtype=tf.float32,
                      initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01)) #

    w = tf.tile(w, [batch_size, 1, 1, 1, 1])
    output = tf.reshape(input, shape=[batch_size, 1, caps_num_in, 1, channel_num_in])
    output = tf.tile(output, [1, caps_num_out, 1, 1, 1])

    output = tf.reshape(tf.matmul(output, w), [batch_size, caps_num_out, caps_num_in, channel_num_out])

    return output

# input should be a tensor with size as [batch_size, caps_num_out, channel_num] 
示例11
def dynamic_routing(input):
    batch_size = int(input.get_shape()[0])
    caps_num_in = int(input.get_shape()[2])
    caps_num_out = int(input.get_shape()[1])

    input_stopped = tf.stop_gradient(input, name='stop_gradient')

    b = tf.constant(np.zeros([batch_size, caps_num_out, caps_num_in, 1], dtype=np.float32))

    for r_iter in range(cfg.iter_routing):
        c = tf.nn.softmax(b, dim=1)
        if r_iter == cfg.iter_routing-1:
            s = tf.matmul(input, c, transpose_a=True)
            v = squash(tf.squeeze(s))
        else:
            s = tf.matmul(input_stopped, c, transpose_a=True)
            v = squash(tf.squeeze(s))
            b += tf.reduce_sum(tf.reshape(v, shape=[batch_size, caps_num_out, 1, -1])*input_stopped, axis=-1, keep_dims=True)

    return v 
示例12
def evaluation(model, supervisor, num_label):
    teX, teY, num_te_batch = load_data(cfg.dataset, cfg.batch_size, is_training=False)
    fd_test_acc = save_to()
    with supervisor.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        supervisor.saver.restore(sess, tf.train.latest_checkpoint(cfg.logdir))
        tf.logging.info('Model restored!')

        test_acc = 0
        for i in tqdm(range(num_te_batch), total=num_te_batch, ncols=70, leave=False, unit='b'):
            start = i * cfg.batch_size
            end = start + cfg.batch_size
            acc = sess.run(model.accuracy, {model.X: teX[start:end], model.labels: teY[start:end]})
            test_acc += acc
        test_acc = test_acc / (cfg.batch_size * num_te_batch)
        fd_test_acc.write(str(test_acc))
        fd_test_acc.close()
        print('Test accuracy has been saved to ' + cfg.results + '/test_acc.csv') 
示例13
def loss(v_len, output, x, y):
    max_l = tf.square(tf.maximum(0., cfg.m_plus-v_len))
    max_r = tf.square(tf.maximum(0., v_len - cfg.m_minus))

    l_c = y*max_l+cfg.lambda_val * (1 - y) * max_r

    margin_loss = tf.reduce_mean(tf.reduce_sum(l_c, axis=1))

    origin = tf.reshape(x, shape=[cfg.batch_size, -1])
    reconstruction_err = tf.reduce_mean(tf.square(output-origin))

    total_loss = margin_loss+0.0005*reconstruction_err

    tf.losses.add_loss(total_loss)

    return total_loss 
示例14
def evaluation(model, supervisor, num_label):
    teX, teY, num_te_batch = load_data(cfg.dataset, cfg.batch_size, is_training=False)
    fd_test_acc = save_to()
    with supervisor.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        supervisor.saver.restore(sess, tf.train.latest_checkpoint(cfg.logdir))
        tf.logging.info('Model restored!')

        test_acc = 0
        for i in tqdm(range(num_te_batch), total=num_te_batch, ncols=70, leave=False, unit='b'):
            start = i * cfg.batch_size
            end = start + cfg.batch_size
            acc = sess.run(model.accuracy, {model.X: teX[start:end], model.labels: teY[start:end]})
            test_acc += acc
        test_acc = test_acc / (cfg.batch_size * num_te_batch)
        fd_test_acc.write(str(test_acc))
        fd_test_acc.close()
        print('Test accuracy has been saved to ' + cfg.results + '/test_acc.csv') 
示例15
def loss(v_len, output, x, y):
    max_l = tf.square(tf.maximum(0., cfg.m_plus-v_len))
    max_r = tf.square(tf.maximum(0., v_len - cfg.m_minus))

    l_c = y*max_l+cfg.lambda_val * (1 - y) * max_r

    margin_loss = tf.reduce_mean(tf.reduce_sum(l_c, axis=1))

    origin = tf.reshape(x, shape=[cfg.batch_size, -1])
    reconstruction_err = tf.reduce_mean(tf.square(output-origin))

    total_loss = margin_loss+0.0005*reconstruction_err

    tf.losses.add_loss(total_loss)

    return total_loss 
示例16
def evaluate(model, data_loader):
    # Setting up model
    test_iterator = data_loader(cfg.batch_size, mode="test")
    inputs = data_loader.next_element["images"]
    labels = data_loader.next_element["labels"]
    model.create_network(inputs, labels)

    # Create files to save evaluating results
    fd = save_to(is_training=False)
    saver = tf.train.Saver()

    config = tf.ConfigProto(allow_soft_placement=True)
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        test_handle = sess.run(test_iterator.string_handle())
        saver.restore(sess, tf.train.latest_checkpoint(cfg.logdir))
        tf.logging.info('Model restored!')

        probs = []
        targets = []
        total_acc = 0
        n = 0
        while True:
            try:
                test_acc, prob, label = sess.run([model.accuracy, model.probs, labels], feed_dict={data_loader.handle: test_handle})
                probs.append(prob)
                targets.append(label)
                total_acc += test_acc
                n += 1
            except tf.errors.OutOfRangeError:
                break
        probs = np.concatenate(probs, axis=0)
        targets = np.concatenate(targets, axis=0).reshape((-1, 1))
        avg_acc = total_acc / n
        out_path = os.path.join(cfg.results_dir, 'prob_test.txt')
        np.savetxt(out_path, np.hstack((probs, targets)), fmt='%1.2f')
        print('Classification probability for each category has been saved to ' + out_path)
        fd["test_acc"].write(str(avg_acc))
        fd["test_acc"].close()
        out_path = os.path.join(cfg.results_dir, 'test_accuracy.txt')
        print('Test accuracy has been saved to ' + out_path) 
示例17
def _make_batch_generator(self):
        # data load and construct batch generator
        self.logger.info("Creating dataset...")
        testset = eval(cfg.testset)("test")
        testset_loader = DatasetLoader(testset, None, False, transforms.Compose([\
                                                                                                        transforms.ToTensor(),
                                                                                                        transforms.Normalize(mean=cfg.pixel_mean, std=cfg.pixel_std)]\
                                                                                                        ))
        batch_generator = DataLoader(dataset=testset_loader, batch_size=cfg.num_gpus*cfg.test_batch_size, shuffle=False, num_workers=cfg.num_thread, pin_memory=True)
        
        self.testset = testset
        self.joint_num = testset_loader.joint_num
        self.skeleton = testset_loader.skeleton
        self.flip_pairs = testset.flip_pairs
        self.batch_generator = batch_generator 
示例18
def squash(vector):
    '''Squashing function.
    Args:
        vector: A 4-D tensor with shape [batch_size, num_caps, vec_len, 1],
    Returns:
        A 4-D tensor with the same shape as vector but
        squashed in 3rd and 4th dimensions.
    '''
    vec_abs = tf.sqrt(tf.reduce_sum(tf.square(vector)))  # a scalar
    scalar_factor = tf.square(vec_abs) / (1 + tf.square(vec_abs))
    vec_squashed = scalar_factor * tf.divide(vector, vec_abs)  # element-wise
    return(vec_squashed) 
示例19
def get_batch_data():
    trX, trY = load_mnist(cfg.dataset, cfg.is_training)

    data_queues = tf.train.slice_input_producer([trX, trY])
    X, Y = tf.train.shuffle_batch(data_queues, num_threads=cfg.num_threads,
                                  batch_size=cfg.batch_size,
                                  capacity=cfg.batch_size * 64,
                                  min_after_dequeue=cfg.batch_size * 32,
                                  allow_smaller_final_batch=False)

    return(X, Y) 
示例20
def save_images(imgs, size, path):
    '''
    Args:
        imgs: [batch_size, image_height, image_width]
        size: a list with tow int elements, [image_height, image_width]
        path: the path to save images
    '''
    imgs = (imgs + 1.) / 2  # inverse_transform
    return(scipy.misc.imsave(path, mergeImgs(imgs, size))) 
示例21
def loss(self):
        # 1. The margin loss

        # [batch_size, 10, 1, 1]
        # max_l = max(0, m_plus-||v_c||)^2
        max_l = tf.square(tf.maximum(0., cfg.m_plus - self.v_length))
        # max_r = max(0, ||v_c||-m_minus)^2
        max_r = tf.square(tf.maximum(0., self.v_length - cfg.m_minus))
        assert max_l.get_shape() == [cfg.batch_size, 10, 1, 1]

        # reshape: [batch_size, 10, 1, 1] => [batch_size, 10]
        max_l = tf.reshape(max_l, shape=(cfg.batch_size, -1))
        max_r = tf.reshape(max_r, shape=(cfg.batch_size, -1))

        # calc T_c: [batch_size, 10]
        # T_c = Y, is my understanding correct? Try it.
        T_c = self.Y
        # [batch_size, 10], element-wise multiply
        L_c = T_c * max_l + cfg.lambda_val * (1 - T_c) * max_r

        self.margin_loss = tf.reduce_mean(tf.reduce_sum(L_c, axis=1))

        # 2. The reconstruction loss
        orgin = tf.reshape(self.X, shape=(cfg.batch_size, -1))
        squared = tf.square(self.decoded - orgin)
        self.reconstruction_err = tf.reduce_mean(squared)

        # 3. Total loss
        self.total_loss = self.margin_loss + 0.0005 * self.reconstruction_err

        # Summary
        tf.summary.scalar('margin_loss', self.margin_loss)
        tf.summary.scalar('reconstruction_loss', self.reconstruction_err)
        tf.summary.scalar('total_loss', self.total_loss)
        recon_img = tf.reshape(self.decoded, shape=(cfg.batch_size, 28, 28, 1))
        tf.summary.image('reconstruction_img', recon_img)
        self.merged_sum = tf.summary.merge_all() 
示例22
def test(result, model, models, test_outputs):
	
	num_te_batch = int(math.ceil(float(len(result["test"]) / model.batch_size)))
	idx = 0
	config = tf.ConfigProto()
	config.gpu_options.allow_growth = True
	
	with tf.Session(config=config, graph=model.graph) as sess:
		
		model.saver.restore(sess, tf.train.latest_checkpoint(models))
		tf.logging.info("Model restored!")
		print("Test phase: ")
		
		test_iou = 0
		start = time.time()
		for i in range(num_te_batch):
			
			mean_iou = 0
			teX, size_list, idx, filenames = get_batch_of_test(result, idx, model.batch_size)
			prediction = sess.run(model.prediction, feed_dict={model.X: teX})
			
			for j in range(len(filenames)):
				
				output = Image.fromarray(prediction[j] * 255.0).convert("L").resize(size_list[j], Image.NEAREST)
				output.save(test_outputs + "/" + filenames[j])
				print(test_outputs + "/" + filenames[j] + " has been saved.")
		
		print("Total time: %d" % (time.time() - start))
		print("All results have been saved.") 
示例23
def main(_):
	
	# get dataset info
	result = create_image_lists(cfg.images)
	max_iters = len(result["train"]) * cfg.epoch // cfg.batch_size
	
	tf.logging.info('Loading Graph...')
	model = DFN(max_iters, batch_size=cfg.batch_size, init_lr=cfg.init_lr, power=cfg.power, momentum=cfg.momentum, stddev=cfg.stddev, regularization_scale=cfg.regularization_scale, alpha=cfg.alpha, gamma=cfg.gamma, fl_weight=cfg.fl_weight)
	tf.logging.info('Graph loaded.')
	
	if cfg.is_training:
		
		if not tf.gfile.Exists(cfg.logdir):
			
			tf.gfile.MakeDirs(cfg.logdir)
		
		if not tf.gfile.Exists(cfg.models):
			
			tf.gfile.MakeDirs(cfg.models)
		
		if os.path.exists(cfg.log):
			
			os.remove(cfg.log)
		
		fd = open(cfg.log, "a")
		tf.logging.info('Start training...')
		fd.write('Start training...\n')
		train(result, model, cfg.logdir, cfg.train_sum_freq, cfg.val_sum_freq, cfg.save_freq, cfg.models, fd)
		tf.logging.info('Training done.')
		fd.write('Training done.')
		fd.close()
	
	else:
		
		if not tf.gfile.Exists(cfg.test_outputs):
			
			tf.gfile.MakeDirs(cfg.test_outputs)
		
		tf.logging.info('Start testing...')
		test(result, model, cfg.models, cfg.test_outputs)
		tf.logging.info('Testing done.') 
示例24
def submit(img_path, submit_file, epoch):
    submit_f = open(submit_file ,'w')
    is_training = False
    cfg.batch_size = 1
    ckpt_dir = cfg.ckpt_path

    configer = tf.ConfigProto()
    configer.gpu_options.per_process_gpu_memory_fraction = 0.1
    with tf.Session(config=configer) as sess:
        imgs_holder = tf.placeholder(tf.float32, shape=[1, 224, 224, 3])
        model = SenseClsNet(imgs_holder, None, is_training)
        classes, scores = model.predict()

        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())

        saver.restore(sess, ckpt_dir + 'senceCls-' + str(epoch))
        sess.run(tf.local_variables_initializer())

        for idx in tqdm(range(1, 89234)):
            img = cv2.imread(os.path.join(img_path, str(idx).zfill(5) + '.jpg'))
            image = cv2.resize(img, (224, 224))
            img_data = image.astype(np.float32) / 255.0 * 2.0

            classes_index, scores_0 = sess.run([classes, scores], feed_dict={imgs_holder: np.reshape(img_data, [1, 224, 224, 3])})
            submit_f.write(str(idx).zfill(5) + '.jpg' + ' ' + str(classes_index[0] + 1) + '\n')

    submit_f.close()
    zf = zipfile.ZipFile('./submit/classification.zip', 'w', zipfile.zlib.DEFLATED)
    zf.write(submit_file)
    zf.close()

    tf.reset_default_graph() 
示例25
def loss(preds, labels):
    labels = tf.cast(labels, tf.int64)
    if PRINT_LAYER_LOG:
        print('pre labels', labels.get_shape())
    labels = tf.reshape(labels, (cfg.batch_size, -1))
    if PRINT_LAYER_LOG:
        print('labels', labels.get_shape())
    labels = tf.one_hot(labels, cfg.classes)
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=preds, labels=labels)
    pred_loss = tf.reduce_mean(cross_entropy)
    return pred_loss 
示例26
def gen_data_batch(tf_records_filename, batch_size):
    dt = tf.data.TFRecordDataset(tf_records_filename)
    dt = dt.map(parser, num_parallel_calls=4)
    dt = dt.prefetch(batch_size)
    dt = dt.shuffle(buffer_size=8*batch_size)
    dt = dt.repeat()
    dt = dt.batch(batch_size)
    iterator = dt.make_one_shot_iterator()
    imgs, true_boxes = iterator.get_next()

    return imgs, true_boxes 
示例27
def _make_batch_generator(self):
        # data load and construct batch generator
        self.logger.info("Creating dataset...")
        trainset_loader = []
        for i in range(len(cfg.trainset)):
            trainset_loader.append(DatasetLoader(eval(cfg.trainset[i])("train"), True, transforms.Compose([\
                                                                                                        transforms.ToTensor(),
                                                                                                        transforms.Normalize(mean=cfg.pixel_mean, std=cfg.pixel_std)]\
                                                                                                        )))
        trainset_loader = MultipleDatasets(trainset_loader) 
        self.itr_per_epoch = math.ceil(len(trainset_loader) / cfg.num_gpus / cfg.batch_size)
        self.batch_generator = DataLoader(dataset=trainset_loader, batch_size=cfg.num_gpus*cfg.batch_size, shuffle=True, num_workers=cfg.num_thread, pin_memory=True) 
示例28
def _make_batch_generator(self):
        # data load and construct batch generator
        self.logger.info("Creating dataset...")
        testset = eval(cfg.testset)("test")
        testset_loader = DatasetLoader(testset, False, transforms.Compose([\
                                                                        transforms.ToTensor(),
                                                                        transforms.Normalize(mean=cfg.pixel_mean, std=cfg.pixel_std)]\
                                                                        ))
        batch_generator = DataLoader(dataset=testset_loader, batch_size=cfg.num_gpus*cfg.test_batch_size, shuffle=False, num_workers=cfg.num_thread, pin_memory=True)
        
        self.testset = testset
        self.batch_generator = batch_generator 
示例29
def make_data(self):
        ''' Load PoseTrack data '''
        from AllJoints_PoseTrack import PoseTrackJoints
        from AllJoints_COCO import PoseTrackJoints_COCO
        from dataset import Preprocessing

        d = PoseTrackJoints()
        train_data, _ = d.load_data(cfg.min_kps)
        print(len(train_data))
        #'''
        d2 = PoseTrackJoints_COCO()
        train_data_coco, _ = d2.load_data(cfg.min_kps)
        print(len(train_data_coco))

        train_data.extend(train_data_coco)
        print(len(train_data))
        #'''
        from random import shuffle
        shuffle(train_data)
        #print(train_data)

        from tfflat.data_provider import DataFromList, MultiProcessMapDataZMQ, BatchData, MapData
        dp = DataFromList(train_data)
        if cfg.dpflow_enable:
            dp = MultiProcessMapDataZMQ(dp, cfg.nr_dpflows, Preprocessing)
        else:
            dp = MapData(dp, Preprocessing)
        dp = BatchData(dp, cfg.batch_size // cfg.nr_aug)
        dp.reset_state()
        dataiter = dp.get_data()

        return dataiter 
示例30
def make_data(self):
        ''' Load PoseTrack data '''
        from AllJoints_PoseTrack import PoseTrackJoints
        from AllJoints_COCO import PoseTrackJoints_COCO
        from dataset import Preprocessing

        d = PoseTrackJoints()
        train_data, _ = d.load_data(cfg.min_kps)
        print(len(train_data))

        #'''
        d2 = PoseTrackJoints_COCO()
        train_data_coco, _ = d2.load_data(cfg.min_kps)
        print(len(train_data_coco))

        train_data.extend(train_data_coco)
        print(len(train_data))
        #'''

        from random import shuffle
        shuffle(train_data)

        from tfflat.data_provider import DataFromList, MultiProcessMapDataZMQ, BatchData, MapData
        dp = DataFromList(train_data)
        if cfg.dpflow_enable:
            dp = MultiProcessMapDataZMQ(dp, cfg.nr_dpflows, Preprocessing)
        else:
            dp = MapData(dp, Preprocessing)
        dp = BatchData(dp, cfg.batch_size // cfg.nr_aug)
        dp.reset_state()
        dataiter = dp.get_data()

        return dataiter