Python源码示例:input.get()

示例1
def get_inputs(dataset_dir, dataset_name, split_name, batch_size, image_size,
               is_training):
  """Loads the given dataset and split."""
  del image_size  # Unused
  with tf.variable_scope('data_loading_%s/%s' % (dataset_name, split_name)):
    common_queue_min = 50
    common_queue_capacity = 256
    num_readers = 4

    inputs = input_generator.get(
        dataset_dir,
        dataset_name,
        split_name,
        shuffle=is_training,
        num_readers=num_readers,
        common_queue_min=common_queue_min,
        common_queue_capacity=common_queue_capacity)

    return _get_data_from_provider(inputs, batch_size, split_name) 
示例2
def get_inputs(dataset_dir, dataset_name, split_name, batch_size, image_size,
               is_training):
  """Loads the given dataset and split."""
  del image_size  # Unused
  with tf.variable_scope('data_loading_%s/%s' % (dataset_name, split_name)):
    common_queue_min = 50
    common_queue_capacity = 256
    num_readers = 4

    inputs = input_generator.get(
        dataset_dir,
        dataset_name,
        split_name,
        shuffle=is_training,
        num_readers=num_readers,
        common_queue_min=common_queue_min,
        common_queue_capacity=common_queue_capacity)

    return _get_data_from_provider(inputs, batch_size, split_name) 
示例3
def get_inputs(dataset_dir, dataset_name, split_name, batch_size, image_size,
               is_training):
  """Loads the given dataset and split."""
  del image_size  # Unused
  with tf.variable_scope('data_loading_%s/%s' % (dataset_name, split_name)):
    common_queue_min = 50
    common_queue_capacity = 256
    num_readers = 4

    inputs = input_generator.get(
        dataset_dir,
        dataset_name,
        split_name,
        shuffle=is_training,
        num_readers=num_readers,
        common_queue_min=common_queue_min,
        common_queue_capacity=common_queue_capacity)

    return _get_data_from_provider(inputs, batch_size, split_name) 
示例4
def get_inputs(dataset_dir, dataset_name, split_name, batch_size, image_size,
               is_training):
  """Loads the given dataset and split."""
  del image_size  # Unused
  with tf.variable_scope('data_loading_%s/%s' % (dataset_name, split_name)):
    common_queue_min = 50
    common_queue_capacity = 256
    num_readers = 4

    inputs = input_generator.get(
        dataset_dir,
        dataset_name,
        split_name,
        shuffle=is_training,
        num_readers=num_readers,
        common_queue_min=common_queue_min,
        common_queue_capacity=common_queue_capacity)

    return _get_data_from_provider(inputs, batch_size, split_name) 
示例5
def get_inputs(dataset_dir, dataset_name, split_name, batch_size, image_size,
               is_training):
  """Loads the given dataset and split."""
  del image_size  # Unused
  with tf.variable_scope('data_loading_%s/%s' % (dataset_name, split_name)):
    common_queue_min = 50
    common_queue_capacity = 256
    num_readers = 4

    inputs = input_generator.get(
        dataset_dir,
        dataset_name,
        split_name,
        shuffle=is_training,
        num_readers=num_readers,
        common_queue_min=common_queue_min,
        common_queue_capacity=common_queue_capacity)

    return _get_data_from_provider(inputs, batch_size, split_name) 
示例6
def get_inputs(dataset_dir, dataset_name, split_name, batch_size, image_size,
               is_training):
  """Loads the given dataset and split."""
  del image_size  # Unused
  with tf.variable_scope('data_loading_%s/%s' % (dataset_name, split_name)):
    common_queue_min = 50
    common_queue_capacity = 256
    num_readers = 4

    inputs = input_generator.get(
        dataset_dir,
        dataset_name,
        split_name,
        shuffle=is_training,
        num_readers=num_readers,
        common_queue_min=common_queue_min,
        common_queue_capacity=common_queue_capacity)

    return _get_data_from_provider(inputs, batch_size, split_name) 
示例7
def get_inputs(dataset_dir, dataset_name, split_name, batch_size, image_size,
               is_training):
  """Loads the given dataset and split."""
  del image_size  # Unused
  with tf.variable_scope('data_loading_%s/%s' % (dataset_name, split_name)):
    common_queue_min = 50
    common_queue_capacity = 256
    num_readers = 4

    inputs = input_generator.get(
        dataset_dir,
        dataset_name,
        split_name,
        shuffle=is_training,
        num_readers=num_readers,
        common_queue_min=common_queue_min,
        common_queue_capacity=common_queue_capacity)

    return _get_data_from_provider(inputs, batch_size, split_name) 
示例8
def get_inputs(dataset_dir, dataset_name, split_name, batch_size, image_size,
               is_training):
  """Loads the given dataset and split."""
  del image_size  # Unused
  with tf.variable_scope('data_loading_%s/%s' % (dataset_name, split_name)):
    common_queue_min = 50
    common_queue_capacity = 256
    num_readers = 4

    inputs = input_generator.get(
        dataset_dir,
        dataset_name,
        split_name,
        shuffle=is_training,
        num_readers=num_readers,
        common_queue_min=common_queue_min,
        common_queue_capacity=common_queue_capacity)

    return _get_data_from_provider(inputs, batch_size, split_name) 
示例9
def get_model_fn(params, is_training, reuse=False):
  return deeprotator_factory.get(params, is_training, reuse) 
示例10
def get_inputs(self,
                 dataset_dir,
                 dataset_name,
                 split_name,
                 batch_size,
                 image_size,
                 vox_size,
                 is_training=True):
    """Loads data for a specified dataset and split."""
    del image_size, vox_size
    with tf.variable_scope('data_loading_%s/%s' % (dataset_name, split_name)):
      common_queue_min = 64
      common_queue_capacity = 256
      num_readers = 4

      inputs = input_generator.get(
          dataset_dir,
          dataset_name,
          split_name,
          shuffle=is_training,
          num_readers=num_readers,
          common_queue_min=common_queue_min,
          common_queue_capacity=common_queue_capacity)

      images, voxels = tf.train.batch(
          [inputs['image'], inputs['voxel']],
          batch_size=batch_size,
          num_threads=8,
          capacity=8 * batch_size,
          name='batching_queues/%s/%s' % (dataset_name, split_name))

      outputs = dict()
      outputs['images'] = images
      outputs['voxels'] = voxels
      outputs['num_samples'] = inputs['num_samples']

    return outputs 
示例11
def get_model_fn(params, is_training, reuse=False):
  return deeprotator_factory.get(params, is_training, reuse) 
示例12
def get_inputs(self,
                 dataset_dir,
                 dataset_name,
                 split_name,
                 batch_size,
                 image_size,
                 vox_size,
                 is_training=True):
    """Loads data for a specified dataset and split."""
    del image_size, vox_size
    with tf.variable_scope('data_loading_%s/%s' % (dataset_name, split_name)):
      common_queue_min = 64
      common_queue_capacity = 256
      num_readers = 4

      inputs = input_generator.get(
          dataset_dir,
          dataset_name,
          split_name,
          shuffle=is_training,
          num_readers=num_readers,
          common_queue_min=common_queue_min,
          common_queue_capacity=common_queue_capacity)

      images, voxels = tf.train.batch(
          [inputs['image'], inputs['voxel']],
          batch_size=batch_size,
          num_threads=8,
          capacity=8 * batch_size,
          name='batching_queues/%s/%s' % (dataset_name, split_name))

      outputs = dict()
      outputs['images'] = images
      outputs['voxels'] = voxels
      outputs['num_samples'] = inputs['num_samples']

    return outputs 
示例13
def get_model_fn(params, is_training, reuse=False):
  return deeprotator_factory.get(params, is_training, reuse) 
示例14
def get_inputs(self,
                 dataset_dir,
                 dataset_name,
                 split_name,
                 batch_size,
                 image_size,
                 vox_size,
                 is_training=True):
    """Loads data for a specified dataset and split."""
    del image_size, vox_size
    with tf.variable_scope('data_loading_%s/%s' % (dataset_name, split_name)):
      common_queue_min = 64
      common_queue_capacity = 256
      num_readers = 4

      inputs = input_generator.get(
          dataset_dir,
          dataset_name,
          split_name,
          shuffle=is_training,
          num_readers=num_readers,
          common_queue_min=common_queue_min,
          common_queue_capacity=common_queue_capacity)

      images, voxels = tf.train.batch(
          [inputs['image'], inputs['voxel']],
          batch_size=batch_size,
          num_threads=8,
          capacity=8 * batch_size,
          name='batching_queues/%s/%s' % (dataset_name, split_name))

      outputs = dict()
      outputs['images'] = images
      outputs['voxels'] = voxels
      outputs['num_samples'] = inputs['num_samples']

    return outputs 
示例15
def get_model_fn(params, is_training, reuse=False):
  return deeprotator_factory.get(params, is_training, reuse) 
示例16
def get_inputs(self,
                 dataset_dir,
                 dataset_name,
                 split_name,
                 batch_size,
                 image_size,
                 vox_size,
                 is_training=True):
    """Loads data for a specified dataset and split."""
    del image_size, vox_size
    with tf.variable_scope('data_loading_%s/%s' % (dataset_name, split_name)):
      common_queue_min = 64
      common_queue_capacity = 256
      num_readers = 4

      inputs = input_generator.get(
          dataset_dir,
          dataset_name,
          split_name,
          shuffle=is_training,
          num_readers=num_readers,
          common_queue_min=common_queue_min,
          common_queue_capacity=common_queue_capacity)

      images, voxels = tf.train.batch(
          [inputs['image'], inputs['voxel']],
          batch_size=batch_size,
          num_threads=8,
          capacity=8 * batch_size,
          name='batching_queues/%s/%s' % (dataset_name, split_name))

      outputs = dict()
      outputs['images'] = images
      outputs['voxels'] = voxels
      outputs['num_samples'] = inputs['num_samples']

    return outputs 
示例17
def get_model_fn(params, is_training, reuse=False):
  return deeprotator_factory.get(params, is_training, reuse) 
示例18
def get_inputs(self,
                 dataset_dir,
                 dataset_name,
                 split_name,
                 batch_size,
                 image_size,
                 vox_size,
                 is_training=True):
    """Loads data for a specified dataset and split."""
    del image_size, vox_size
    with tf.variable_scope('data_loading_%s/%s' % (dataset_name, split_name)):
      common_queue_min = 64
      common_queue_capacity = 256
      num_readers = 4

      inputs = input_generator.get(
          dataset_dir,
          dataset_name,
          split_name,
          shuffle=is_training,
          num_readers=num_readers,
          common_queue_min=common_queue_min,
          common_queue_capacity=common_queue_capacity)

      images, voxels = tf.train.batch(
          [inputs['image'], inputs['voxel']],
          batch_size=batch_size,
          num_threads=8,
          capacity=8 * batch_size,
          name='batching_queues/%s/%s' % (dataset_name, split_name))

      outputs = dict()
      outputs['images'] = images
      outputs['voxels'] = voxels
      outputs['num_samples'] = inputs['num_samples']

    return outputs 
示例19
def get_model_fn(params, is_training, reuse=False):
  return deeprotator_factory.get(params, is_training, reuse) 
示例20
def get_inputs(self,
                 dataset_dir,
                 dataset_name,
                 split_name,
                 batch_size,
                 image_size,
                 vox_size,
                 is_training=True):
    """Loads data for a specified dataset and split."""
    del image_size, vox_size
    with tf.variable_scope('data_loading_%s/%s' % (dataset_name, split_name)):
      common_queue_min = 64
      common_queue_capacity = 256
      num_readers = 4

      inputs = input_generator.get(
          dataset_dir,
          dataset_name,
          split_name,
          shuffle=is_training,
          num_readers=num_readers,
          common_queue_min=common_queue_min,
          common_queue_capacity=common_queue_capacity)

      images, voxels = tf.train.batch(
          [inputs['image'], inputs['voxel']],
          batch_size=batch_size,
          num_threads=8,
          capacity=8 * batch_size,
          name='batching_queues/%s/%s' % (dataset_name, split_name))

      outputs = dict()
      outputs['images'] = images
      outputs['voxels'] = voxels
      outputs['num_samples'] = inputs['num_samples']

    return outputs 
示例21
def get_model_fn(params, is_training, reuse=False):
  return deeprotator_factory.get(params, is_training, reuse) 
示例22
def get_inputs(self,
                 dataset_dir,
                 dataset_name,
                 split_name,
                 batch_size,
                 image_size,
                 vox_size,
                 is_training=True):
    """Loads data for a specified dataset and split."""
    del image_size, vox_size
    with tf.variable_scope('data_loading_%s/%s' % (dataset_name, split_name)):
      common_queue_min = 64
      common_queue_capacity = 256
      num_readers = 4

      inputs = input_generator.get(
          dataset_dir,
          dataset_name,
          split_name,
          shuffle=is_training,
          num_readers=num_readers,
          common_queue_min=common_queue_min,
          common_queue_capacity=common_queue_capacity)

      images, voxels = tf.train.batch(
          [inputs['image'], inputs['voxel']],
          batch_size=batch_size,
          num_threads=8,
          capacity=8 * batch_size,
          name='batching_queues/%s/%s' % (dataset_name, split_name))

      outputs = dict()
      outputs['images'] = images
      outputs['voxels'] = voxels
      outputs['num_samples'] = inputs['num_samples']

    return outputs 
示例23
def get_model_fn(params, is_training, reuse=False):
  return deeprotator_factory.get(params, is_training, reuse) 
示例24
def get_inputs(self,
                 dataset_dir,
                 dataset_name,
                 split_name,
                 batch_size,
                 image_size,
                 vox_size,
                 is_training=True):
    """Loads data for a specified dataset and split."""
    del image_size, vox_size
    with tf.variable_scope('data_loading_%s/%s' % (dataset_name, split_name)):
      common_queue_min = 64
      common_queue_capacity = 256
      num_readers = 4

      inputs = input_generator.get(
          dataset_dir,
          dataset_name,
          split_name,
          shuffle=is_training,
          num_readers=num_readers,
          common_queue_min=common_queue_min,
          common_queue_capacity=common_queue_capacity)

      images, voxels = tf.train.batch(
          [inputs['image'], inputs['voxel']],
          batch_size=batch_size,
          num_threads=8,
          capacity=8 * batch_size,
          name='batching_queues/%s/%s' % (dataset_name, split_name))

      outputs = dict()
      outputs['images'] = images
      outputs['voxels'] = voxels
      outputs['num_samples'] = inputs['num_samples']

    return outputs