Python源码示例:joblib.load()

示例1
def load_variables(load_path, variables=None, sess=None):
    sess = sess or get_session()
    variables = variables or tf.trainable_variables()

    loaded_params = joblib.load(os.path.expanduser(load_path))
    restores = []
    if isinstance(loaded_params, list):
        assert len(loaded_params) == len(variables), 'number of variables loaded mismatches len(variables)'
        for d, v in zip(loaded_params, variables):
            restores.append(v.assign(d))
    else:
        for v in variables:
            restores.append(v.assign(loaded_params[v.name]))

    sess.run(restores)

# ================================================================
# Shape adjustment for feeding into tf placeholders
# ================================================================ 
示例2
def load_variables(load_path, variables=None, sess=None):
    sess = sess or get_session()
    variables = variables or tf.trainable_variables()

    loaded_params = joblib.load(os.path.expanduser(load_path))
    restores = []
    if isinstance(loaded_params, list):
        assert len(loaded_params) == len(variables), 'number of variables loaded mismatches len(variables)'
        for d, v in zip(loaded_params, variables):
            restores.append(v.assign(d))
    else:
        for v in variables:
            restores.append(v.assign(loaded_params[v.name]))

    sess.run(restores)

# ================================================================
# Shape adjustment for feeding into tf placeholders
# ================================================================ 
示例3
def load_variables(load_path, variables=None, sess=None):
    sess = sess or get_session()
    variables = variables or tf.trainable_variables()

    loaded_params = joblib.load(os.path.expanduser(load_path))
    restores = []
    if isinstance(loaded_params, list):
        assert len(loaded_params) == len(variables), 'number of variables loaded mismatches len(variables)'
        for d, v in zip(loaded_params, variables):
            restores.append(v.assign(d))
    else:
        for v in variables:
            restores.append(v.assign(loaded_params[v.name]))

    sess.run(restores)

# ================================================================
# Shape adjustment for feeding into tf placeholders
# ================================================================ 
示例4
def main():
    test_args = parse_args()

    args = joblib.load('models/%s/args.pkl' %test_args.name)

    folds = []
    losses = []
    scores = []
    for fold in range(args.n_splits):
        log_path = 'models/%s/log_%d.csv' %(args.name, fold+1)
        if not os.path.exists(log_path):
            continue
        log = pd.read_csv('models/%s/log_%d.csv' %(args.name, fold+1))
        loss, score = log.loc[log['val_loss'].values.argmin(), ['val_loss', 'val_score']].values
        print(loss, score)
        folds.append(str(fold+1))
        losses.append(loss)
        scores.append(score)
    results = pd.DataFrame({
        'fold': folds + ['mean'],
        'loss': losses + [np.mean(losses)],
        'score': scores + [np.mean(scores)],
    })
    print(results)
    results.to_csv('models/%s/results.csv' % args.name, index=False) 
示例5
def load_from_disk(filename):
  """Load a dataset from file."""
  name = filename
  if os.path.splitext(name)[1] == ".gz":
    name = os.path.splitext(name)[0]
  extension = os.path.splitext(name)[1]
  if extension == ".pkl":
    return load_pickle_from_disk(filename)
  elif extension == ".joblib":
    return joblib.load(filename)
  elif extension == ".csv":
    # First line of user-specified CSV *must* be header.
    df = pd.read_csv(filename, header=0)
    df = df.replace(np.nan, str(""), regex=True)
    return df
  elif extension == ".npy":
    return np.load(filename, allow_pickle=True)
  else:
    raise ValueError("Unrecognized filetype for %s" % filename) 
示例6
def load_trajectories(filenames, max_steps=None):
    assert len(filenames) > 0
    paths = []
    for filename in filenames:
        paths.append(joblib.load(filename))

    def get_obs_and_act(path):
        obses = path['obs'][:-1]
        next_obses = path['obs'][1:]
        actions = path['act'][:-1]
        if max_steps is not None:
            return obses[:max_steps], next_obses[:max_steps], actions[:max_steps-1]
        else:
            return obses, next_obses, actions

    for i, path in enumerate(paths):
        if i == 0:
            obses, next_obses, acts = get_obs_and_act(path)
        else:
            obs, next_obs, act = get_obs_and_act(path)
            obses = np.vstack((obs, obses))
            next_obses = np.vstack((next_obs, next_obses))
            acts = np.vstack((act, acts))
    return {'obses': obses, 'next_obses': next_obses, 'acts': acts} 
示例7
def load_from_disk(filename):
  """Load a dataset from file."""
  name = filename
  if os.path.splitext(name)[1] == ".gz":
    name = os.path.splitext(name)[0]
  if os.path.splitext(name)[1] == ".pkl":
    return load_pickle_from_disk(filename)
  elif os.path.splitext(name)[1] == ".joblib":
    try:
      return joblib.load(filename)
    except KeyError:
      # Try older joblib version for legacy files.
      return old_joblib.load(filename)
    except ValueError:
      return old_joblib.load(filename)
  elif os.path.splitext(name)[1] == ".csv":
    # First line of user-specified CSV *must* be header.
    df = pd.read_csv(filename, header=0)
    df = df.replace(np.nan, str(""), regex=True)
    return df
  else:
    raise ValueError("Unrecognized filetype for %s" % filename) 
示例8
def load_cv_dataset_from_disk(save_dir, fold_num):
  assert fold_num > 1
  loaded = False
  train_data = []
  valid_data = []
  for i in range(fold_num):
    fold_dir = os.path.join(save_dir, "fold" + str(i + 1))
    train_dir = os.path.join(fold_dir, "train_dir")
    valid_dir = os.path.join(fold_dir, "valid_dir")
    if not os.path.exists(train_dir) or not os.path.exists(valid_dir):
      return False, None, list()
    train = dcCustom.data.DiskDataset(train_dir)
    valid = dcCustom.data.DiskDataset(valid_dir)
    train_data.append(train)
    valid_data.append(valid)
  
  loaded = True  
  with open(os.path.join(save_dir, "transformers.pkl"), 'rb') as f:
    transformers = pickle.load(f)
    return loaded, list(zip(train_data, valid_data)), transformers 
示例9
def fit_fold_parallel(*args, **kwargs):
     verbose = args[-1]
     data_path = '{}/{}'.format(args[4], str(uuid.uuid4()))
     cmd = "python3 models/apex/fit_fold.py \
                  --data_path='{}' \
                  --args='{}' \
                  --kwargs='{}'".format(data_path, json.dumps(args), json.dumps(kwargs))
            
     p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
     for line in iter(p.stdout.readline, ''):
          if verbose:
            logger.info(line.strip())
          else:
            sys.stdout.write('\r{0: <140}'.format(line.strip())[:140])
            sys.stdout.flush()

     sys.stdout.write('\r{0: <140}'.format(''))
     sys.stdout.flush()
     retval = p.wait()

     time.sleep(10)

     return joblib.load(data_path) 
示例10
def load_tf_policy(fpath, itr, deterministic=False):
    """ Load a tensorflow policy saved with Spinning Up Logger."""

    fname = osp.join(fpath, 'tf1_save'+itr)
    print('\n\nLoading from %s.\n\n'%fname)

    # load the things!
    sess = tf.Session()
    model = restore_tf_graph(sess, fname)

    # get the correct op for executing actions
    if deterministic and 'mu' in model.keys():
        # 'deterministic' is only a valid option for SAC policies
        print('Using deterministic action op.')
        action_op = model['mu']
    else:
        print('Using default action op.')
        action_op = model['pi']

    # make function for producing an action given a single state
    get_action = lambda x : sess.run(action_op, feed_dict={model['x']: x[None,:]})[0]

    return get_action 
示例11
def load_pytorch_policy(fpath, itr, deterministic=False):
    """ Load a pytorch policy saved with Spinning Up Logger."""
    
    fname = osp.join(fpath, 'pyt_save', 'model'+itr+'.pt')
    print('\n\nLoading from %s.\n\n'%fname)

    model = torch.load(fname)

    # make function for producing an action given a single state
    def get_action(x):
        with torch.no_grad():
            x = torch.as_tensor(x, dtype=torch.float32)
            action = model.act(x)
        return action

    return get_action 
示例12
def loadmodel(self, nameprefix):
        """ Load the classification model together with the topic model.

        :param nameprefix: prefix of the paths of the model files
        :return: None
        :type nameprefix: str
        """
        self.topicmodeler.loadmodel(nameprefix)
        self.classifier = joblib.load(nameprefix+'.pkl')
        # for backward compatibility, shorttext<1.0.0 does not have _classlabels.txt
        if os.path.exists(nameprefix+'_classlabels.txt'):
            labelfile = open(nameprefix+'_classlabels.txt', 'r')
            self.classlabels = [s.strip() for s in labelfile.readlines()]
            labelfile.close()
        else:
            self.classlabels = self.topicmodeler.classlabels 
示例13
def test_calculate_illumination_raster(monkeypatch):

    # The generate latlon array function is massively time-consuming.
    # This replaces it with precomputed data.
    def mock_latlon(foo, bar, baz):
        lat = joblib.load("test_data/lat_array_indo")
        lon = joblib.load("test_data/lon_array_indo")
        return lat, lon
    monkeypatch.setattr(terrain_correction, "_generate_latlon_arrays", mock_latlon)

    os.chdir(pathlib.Path(__file__).parent)
    dem_path = "test_data/dem_test_indonesia.tif"
    raster_timezone = pytz.timezone("Asia/Jakarta")
    raster_datetime = dt.datetime(2019, 6, 1, 12, 00, 00, tzinfo=raster_timezone)
    out_path = "test_outputs/illumination_indonesia.tif"
    terrain_correction.calculate_illumination_condition_array(dem_path, raster_datetime, out_path) 
示例14
def load_local_or_remote_file(filepath, file_type=None):
    local_path = local_path_from_s3_or_local_path(filepath)
    if local_path is None:
        return None
    if file_type is None:
        extension = local_path.split('.')[-1]
        if extension == 'npy':
            file_type = NUMPY
        else:
            file_type = PICKLE
    else:
        file_type = PICKLE
    if file_type == NUMPY:
        object = np.load(open(local_path, "rb"))
    elif file_type == JOBLIB:
        object = joblib.load(local_path)
    else:
        object = pickle.load(open(local_path, "rb"))
    print("loaded", local_path)
    return object 
示例15
def load_variables(load_path, variables=None, sess=None):
    sess = sess or get_session()
    variables = variables or tf.trainable_variables()

    loaded_params = joblib.load(os.path.expanduser(load_path))
    restores = []
    if isinstance(loaded_params, list):
        assert len(loaded_params) == len(variables), 'number of variables loaded mismatches len(variables)'
        for d, v in zip(loaded_params, variables):
            restores.append(v.assign(d))
    else:
        for v in variables:
            restores.append(v.assign(loaded_params[v.name]))

    sess.run(restores)

# ================================================================
# Shape adjustment for feeding into tf placeholders
# ================================================================ 
示例16
def load_variables(load_path, variables=None, sess=None):
    sess = sess or get_session()
    variables = variables or tf.trainable_variables()

    loaded_params = joblib.load(os.path.expanduser(load_path))
    restores = []
    if isinstance(loaded_params, list):
        assert len(loaded_params) == len(variables), 'number of variables loaded mismatches len(variables)'
        for d, v in zip(loaded_params, variables):
            restores.append(v.assign(d))
    else:
        for v in variables:
            restores.append(v.assign(loaded_params[v.name]))

    sess.run(restores)

# ================================================================
# Shape adjustment for feeding into tf placeholders
# ================================================================ 
示例17
def main():
    cap = cv2.VideoCapture(0)
    face_recogniser = joblib.load(MODEL_PATH)
    preprocess = preprocessing.ExifOrientationNormalize()

    while True:
        # Capture frame-by-frame
        ret, frame = cap.read()
        frame = cv2.flip(frame, 1)

        img = Image.fromarray(frame)
        faces = face_recogniser(preprocess(img))
        if faces is not None:
            draw_bb_on_img(faces, img)

        # Display the resulting frame
        cv2.imshow('video', np.array(img))
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything done, release the captureq
    cap.release()
    cv2.destroyAllWindows() 
示例18
def _copy_model(self, dest_dir):
        """Copies the files needed to recreate a DeepChem NN model from the current model
        directory to a destination directory.

        Args:
            dest_dir (str): The destination directory for the model files
        """

        chkpt_file = os.path.join(self.model_dir, 'checkpoint')
        with open(chkpt_file, 'r') as chkpt_in:
            chkpt_dict = yaml.load(chkpt_in.read())
        chkpt_prefix = chkpt_dict['model_checkpoint_path']
        files = [chkpt_file]
        files.append(os.path.join(self.model_dir, 'model.pickle'))
        files.append(os.path.join(self.model_dir, '%s.index' % chkpt_prefix))
        files.append(os.path.join(self.model_dir, '%s.meta' % chkpt_prefix))
        files = files + glob.glob(os.path.join(self.model_dir, '%s.data-*' % chkpt_prefix))
        self._clean_up_excess_files(dest_dir)
        for file in files:
            shutil.copy2(file, dest_dir)
        self.log.info("Saved model files to '%s'" % dest_dir)


    # **************************************************************************************** 
示例19
def save(self, filename, ensure_compatibility = True):
        """
        Pickle a class instance. E.g., corex.save('saved.pkl')
        When set to True, ensure_compatibility resets self.words before saving
        a pickle to avoid Unicode loading issues usually seen when trying to load
        the pickle from a Python 2 implementation.
        It is recommended to set it to False if you know you are going to load the
        model in an all Python 3 implementation as self.words is required for fetching
        the topics via get_topics().
        """
        # Avoid saving words with object.
        #TODO: figure out why Unicode sometimes causes an issue with loading after pickling
        temp_words = self.words
        if ensure_compatibility and (self.words is not None):
            self.words = None

        # Save CorEx object
        import pickle
        if path.dirname(filename) and not path.exists(path.dirname(filename)):
            makedirs(path.dirname(filename))
        pickle.dump(self, open(filename, 'wb'), protocol=-1)
        # Restore words to CorEx object
        self.words = temp_words 
示例20
def like(self):
        # like and dislike Tinder profiles using your trained logistic
        # model. Note this requires that you first run tindetheus browse to
        # build a database. Then run tindetheus train to train a model.

        # load the pretrained model
        self.model = joblib.load('log_reg_model.pkl')

        while self.likes_left > 0:
            try:
                users = self.session.nearby_users()
                self.like_or_dislike_users(users)
            except RecsTimeout:
                self.search_distance += 5
                self.session.profile.distance_filter += 5
                self.like() 
示例21
def read_object(filename):
    if OUTPUT_DIR is None:
        print("No output directory set but attempted to read " + filename)
    else:
        # first try OUTPUT_DIR then FINAL_OUTPUT_DIR
        path = os.path.join(OUTPUT_DIR, filename)
        if not os.path.isfile(path):
            if FINAL_OUTPUT_DIR is not None:
                path = os.path.join(FINAL_OUTPUT_DIR, filename)
                if not os.path.isfile(path):
                    raise Exception("File %s cannot be found in either output dir (%s) or final output dir (%s)"
                        % (filename, OUTPUT_DIR, FINAL_OUTPUT_DIR))
            else:
                raise Exception("File %s cannot be found in output dir (%s)"
                    % (filename, OUTPUT_DIR))
        obj = joblib.load(path)
    return obj 
示例22
def plot_two(k=1):
    res_entropy = joblib.load('res_entropy.pkl')
    itos_entropy = joblib.load('itos_entropy.pkl')

    res_l2 = joblib.load('res_l2.pkl')
    itos_l2 = joblib.load('itos_l2.pkl')

    fig, axes = plt.subplots(2, 1, figsize=(8, 7))
    plot(axes[0], res_entropy, itos_entropy, k)
    axes[0].annotate('Entropy regularization',
                     xy=(.5, 1.02),
                     xycoords='axes fraction',
                     ha='center', va='bottom', fontsize=16)

    axes[0].set_xticks([])
    plot(axes[1], res_l2, itos_l2, k)
    axes[1].annotate('L2 regularization',
                     xy=(.5, 1.02),
                     xycoords='axes fraction',
                     ha='center', va='bottom', fontsize=16)
    fig.subplots_adjust(bottom=0.18, top=0.93, right=0.98,
                        left=0.1,
                        hspace=0.15, )
    plt.savefig('ner_%s.pdf' % k)
    plt.show() 
示例23
def load(output_dir_path='', out_file_name=''):
    """load."""
    full_out_file_name = os.path.join(output_dir_path, out_file_name) + ".pkl"
    obj = joblib.load(full_out_file_name)
    return obj 
示例24
def load(self, obj):
        """load."""
        self.__dict__.update(joblib.load(obj).__dict__) 
示例25
def load(filepath):
    """Returns an object stored via `save`
    """

    obj = joblib.load(filepath)

    return obj 
示例26
def load_variables(load_path, variables=None, sess=None):
    sess = sess or get_session()
    variables = variables or tf.trainable_variables()

    loaded_params = joblib.load(os.path.expanduser(load_path))
    restores = []
    for v in variables:
        restores.append(v.assign(loaded_params[v.name]))
    sess.run(restores)


# ================================================================
# Shape adjustment for feeding into tf placeholders
# ================================================================ 
示例27
def mmb_init_model(self, modelRebuild=False, exclude=None, labeled_df=None):
        """
        Initiates the machine learning models used in order to begin making predictions.

        :param modelRebuild: boolean used to rebuild the model by looking for new samples
        on disk or just load the old model without checking for new samples.  If no
        saved models are found, it will attempt to rebuild from samples in the model directories.
        :param exclude: if samples used in the model contain this string,
        they will be omitted from the model.  This is primarily used to hold malware
        families from consideration in the model to test the algorithm for classification generalization
        to unknown families and techniques.
        :param labeled_df: used mostly internally to test out new models.  If the dataframe has 
        at least columns for ['label', 'extracted_vba', 'md5'], then this dataframe will be used
        to rebuild the model.
        :return: True if successful and False otherwise.
        """
        if labeled_df:
            self.modeldata = labeled_df
            self.clear_model_features()
            self.get_language_features()
            self.build_models()
            modelsLoaded = True
        else:
            modelsLoaded = self.load_model()
            if modelRebuild or not modelsLoaded:
                newdoc_cnt = self.load_model_data(exclude)
                if newdoc_cnt > 0:
                    self.clear_model_features()
                    self.get_language_features()
                    self.build_models()
                    modelsLoaded = self.save_model()
                if (self.modeldata is None) or (len(self.modeldata) == 0):
                    logging.error("""No model data found, supervised machine learning requires
                                     labeled samples.  Check that samples exist in the benign_samples and
                                     malicious_samples directories and that existing model files with .pickle
                                     extensions exist in the existsmodels""")
                    modelsLoaded = False
        return modelsLoaded 
示例28
def load(filepath):
        return joblib.load(filepath) 
示例29
def load_pickle_from_disk(filename):
  """Load dataset from pickle file."""
  if ".gz" in filename:
    with gzip.open(filename, "rb") as f:
      df = pickle.load(f)
  else:
    with open(filename, "rb") as f:
      df = pickle.load(f)
  return df 
示例30
def load_dataset_from_disk(save_dir):
  """
  Parameters
  ----------
  save_dir: str

  Returns
  -------
  loaded: bool
    Whether the load succeeded
  all_dataset: (dc.data.Dataset, dc.data.Dataset, dc.data.Dataset)
    The train, valid, test datasets
  transformers: list of dc.trans.Transformer
    The transformers used for this dataset

  """

  train_dir = os.path.join(save_dir, "train_dir")
  valid_dir = os.path.join(save_dir, "valid_dir")
  test_dir = os.path.join(save_dir, "test_dir")
  if not os.path.exists(train_dir) or not os.path.exists(
      valid_dir) or not os.path.exists(test_dir):
    return False, None, list()
  loaded = True
  train = deepchem.data.DiskDataset(train_dir)
  valid = deepchem.data.DiskDataset(valid_dir)
  test = deepchem.data.DiskDataset(test_dir)
  train.memory_cache_size = 40 * (1 << 20)  # 40 MB
  all_dataset = (train, valid, test)
  with open(os.path.join(save_dir, "transformers.pkl"), 'rb') as f:
    transformers = pickle.load(f)
    return loaded, all_dataset, transformers