def data_loader():
# train
train_dataset = dataset.lmdbDataset(root=args.trainroot)
assert train_dataset
if not params.random_sample:
sampler = dataset.randomSequentialSampler(train_dataset, params.batchSize)
else:
sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=params.batchSize, \
shuffle=True, sampler=sampler, num_workers=int(params.workers), \
collate_fn=dataset.alignCollate(imgH=params.imgH, imgW=params.imgW, keep_ratio=params.keep_ratio))
# val
val_dataset = dataset.lmdbDataset(root=args.valroot, transform=dataset.resizeNormalize((params.imgW, params.imgH)))
assert val_dataset
val_loader = torch.utils.data.DataLoader(val_dataset, shuffle=True, batch_size=params.batchSize, num_workers=int(params.workers))
return train_loader, val_loader
def crnn_recognition(cropped_image, model):
converter = utils.strLabelConverter(alphabet)
image = cropped_image.convert('L')
##
# w = int(image.size[0] / (280 * 1.0 / 160))
transformer = dataset.resizeNormalize((280, 32))
image = transformer(image)
# if torch.cuda.is_available():
# image = image.cuda()
image = image.view(1, *image.size())
image = Variable(image)
model.eval()
preds = model(image)
_, preds = preds.max(2)
preds = preds.transpose(1, 0).contiguous().view(-1)
preds_size = Variable(torch.IntTensor([preds.size(0)]))
sim_pred = converter.decode(preds.data, preds_size.data, raw=False)
print('results: {0}'.format(sim_pred))
return sim_pred