Java源码示例:org.apache.kylin.job.execution.DefaultChainedExecutable
示例1
private MapReduceExecutable createMergeDictStep(String streamingStoragePath, String jobId, DefaultChainedExecutable jobFlow) {
MapReduceExecutable mergeDict = new MapReduceExecutable();
mergeDict.setName(ExecutableConstants.STEP_NAME_STREAMING_CREATE_DICTIONARY);
StringBuilder cmd = new StringBuilder();
appendMapReduceParameters(cmd, JobEngineConfig.CUBE_MERGE_JOB_CONF_SUFFIX);
appendExecCmdParameters(cmd, BatchConstants.ARG_JOB_NAME,
ExecutableConstants.STEP_NAME_STREAMING_CREATE_DICTIONARY);
appendExecCmdParameters(cmd, BatchConstants.ARG_INPUT, streamingStoragePath);
appendExecCmdParameters(cmd, BatchConstants.ARG_CUBE_NAME, seg.getRealization().getName());
appendExecCmdParameters(cmd, BatchConstants.ARG_SEGMENT_NAME, seg.getName());
//Instead of using mr job output, trySaveNewDict api is used, so output path is useless here
appendExecCmdParameters(cmd, BatchConstants.ARG_OUTPUT, getDictPath(jobId));
final String cubeName = CubingExecutableUtil.getCubeName(jobFlow.getParams());
mergeDict.setMapReduceParams(cmd.toString());
mergeDict.setMapReduceJobClass(MergeDictJob.class);
mergeDict.setLockPathName(cubeName);
mergeDict.setIsNeedLock(true);
mergeDict.setIsNeedReleaseLock(false);
mergeDict.setJobFlowJobId(jobFlow.getId());
return mergeDict;
}
示例2
@Test
public void testSchedulerStop() throws Exception {
logger.info("testSchedulerStop");
thrown.expect(RuntimeException.class);
thrown.expectMessage("too long wait time");
DefaultChainedExecutable job = new DefaultChainedExecutable();
BaseTestExecutable task1 = new FiveSecondSucceedTestExecutable();
job.addTask(task1);
execMgr.addJob(job);
//sleep 3s to make sure SucceedTestExecutable is running
Thread.sleep(3000);
//scheduler failed due to some reason
scheduler.shutdown();
waitForJobFinish(job.getId(), 6000);
}
示例3
private void addLookupTableConvertToHFilesStep(DefaultChainedExecutable jobFlow, String tableName, String snapshotID) {
MapReduceExecutable createHFilesStep = new MapReduceExecutable();
createHFilesStep
.setName(ExecutableConstants.STEP_NAME_MATERIALIZE_LOOKUP_TABLE_CONVERT_HFILE + ":" + tableName);
StringBuilder cmd = new StringBuilder();
appendMapReduceParameters(cmd);
JobBuilderSupport.appendExecCmdParameters(cmd, BatchConstants.ARG_CUBE_NAME, cube.getName());
JobBuilderSupport.appendExecCmdParameters(cmd, BatchConstants.ARG_OUTPUT,
getLookupTableHFilePath(tableName, jobFlow.getId()));
JobBuilderSupport.appendExecCmdParameters(cmd, BatchConstants.ARG_TABLE_NAME, tableName);
JobBuilderSupport.appendExecCmdParameters(cmd, BatchConstants.ARG_CUBING_JOB_ID, jobFlow.getId());
JobBuilderSupport.appendExecCmdParameters(cmd, BatchConstants.ARG_LOOKUP_SNAPSHOT_ID, snapshotID);
JobBuilderSupport.appendExecCmdParameters(cmd, BatchConstants.ARG_JOB_NAME,
"Kylin_LookupTable_HFile_Generator_" + tableName + "_Step");
createHFilesStep.setMapReduceParams(cmd.toString());
createHFilesStep.setMapReduceJobClass(LookupTableToHFileJob.class);
createHFilesStep.setCounterSaveAs(BatchConstants.LOOKUP_EXT_SNAPSHOT_SRC_RECORD_CNT_PFX + tableName);
jobFlow.addTask(createHFilesStep);
}
示例4
private void addLookupTableConvertToHFilesStep(DefaultChainedExecutable jobFlow, String tableName, String snapshotID) {
MapReduceExecutable createHFilesStep = new MapReduceExecutable();
createHFilesStep
.setName(ExecutableConstants.STEP_NAME_MATERIALIZE_LOOKUP_TABLE_CONVERT_HFILE + ":" + tableName);
StringBuilder cmd = new StringBuilder();
appendMapReduceParameters(cmd);
JobBuilderSupport.appendExecCmdParameters(cmd, BatchConstants.ARG_CUBE_NAME, cube.getName());
JobBuilderSupport.appendExecCmdParameters(cmd, BatchConstants.ARG_OUTPUT,
getLookupTableHFilePath(tableName, jobFlow.getId()));
JobBuilderSupport.appendExecCmdParameters(cmd, BatchConstants.ARG_TABLE_NAME, tableName);
JobBuilderSupport.appendExecCmdParameters(cmd, BatchConstants.ARG_CUBING_JOB_ID, jobFlow.getId());
JobBuilderSupport.appendExecCmdParameters(cmd, BatchConstants.ARG_LOOKUP_SNAPSHOT_ID, snapshotID);
JobBuilderSupport.appendExecCmdParameters(cmd, BatchConstants.ARG_JOB_NAME,
"Kylin_LookupTable_HFile_Generator_" + tableName + "_Step");
createHFilesStep.setMapReduceParams(cmd.toString());
createHFilesStep.setMapReduceJobClass(LookupTableToHFileJob.class);
createHFilesStep.setCounterSaveAs(BatchConstants.LOOKUP_EXT_SNAPSHOT_SRC_RECORD_CNT_PFX + tableName);
jobFlow.addTask(createHFilesStep);
}
示例5
private static AbstractExecutable parseTo(ExecutablePO executablePO) {
if (executablePO == null) {
return null;
}
String type = executablePO.getType();
try {
Class<? extends AbstractExecutable> clazz = ClassUtil.forName(type, AbstractExecutable.class);
Constructor<? extends AbstractExecutable> constructor = clazz.getConstructor();
AbstractExecutable result = constructor.newInstance();
result.setId(executablePO.getUuid());
result.setName(executablePO.getName());
result.setParams(executablePO.getParams());
List<ExecutablePO> tasks = executablePO.getTasks();
if (tasks != null && !tasks.isEmpty()) {
Preconditions.checkArgument(result instanceof DefaultChainedExecutable);
for (ExecutablePO subTask: tasks) {
((DefaultChainedExecutable) result).addTask(parseTo(subTask));
}
}
return result;
} catch (ReflectiveOperationException e) {
throw new IllegalArgumentException("cannot parse this job:" + executablePO.getId(), e);
}
}
示例6
private void addUpdateSnapshotQueryCacheStep(DefaultChainedExecutable jobFlow, String tableName, String snapshotID) {
UpdateSnapshotCacheForQueryServersStep updateSnapshotCacheStep = new UpdateSnapshotCacheForQueryServersStep();
updateSnapshotCacheStep.setName(ExecutableConstants.STEP_NAME_LOOKUP_SNAPSHOT_CACHE_UPDATE + ":" + tableName);
LookupExecutableUtil.setProjectName(cube.getProject(), updateSnapshotCacheStep.getParams());
LookupExecutableUtil.setLookupTableName(tableName, updateSnapshotCacheStep.getParams());
LookupExecutableUtil.setLookupSnapshotID(snapshotID, updateSnapshotCacheStep.getParams());
jobFlow.addTask(updateSnapshotCacheStep);
}
示例7
public void addMergingGarbageCollectionSteps(DefaultChainedExecutable jobFlow) {
String jobId = jobFlow.getId();
MergeGCStep hBaseGCStep = createHBaseGCStep(getMergingHTables());
jobFlow.addTask(hBaseGCStep);
List<String> toDeletePaths = new ArrayList<>();
toDeletePaths.addAll(getMergingHDFSPaths());
toDeletePaths.add(getHFilePath(jobId));
HDFSPathGarbageCollectionStep step = createHDFSPathGCStep(toDeletePaths, jobId);
jobFlow.addTask(step);
}
示例8
@Override
public void addStepPhase4_Cleanup(DefaultChainedExecutable jobFlow) {
final String jobWorkingDir = getJobWorkingDir(jobFlow, hdfsWorkingDir);
org.apache.kylin.source.hive.GarbageCollectionStep step = new org.apache.kylin.source.hive.GarbageCollectionStep();
step.setName(ExecutableConstants.STEP_NAME_HIVE_CLEANUP);
List<String> deleteTables = new ArrayList<>();
deleteTables.add(getIntermediateTableIdentity());
// mr-hive dict and inner table do not need delete hdfs
String[] mrHiveDicts = flatDesc.getSegment().getConfig().getMrHiveDictColumns();
if (Objects.nonNull(mrHiveDicts) && mrHiveDicts.length > 0) {
String dictDb = flatDesc.getSegment().getConfig().getMrHiveDictDB();
String tableName = dictDb + "." + flatDesc.getTableName()
+ flatDesc.getSegment().getConfig().getMrHiveDistinctValueTableSuffix();
String tableName2 = dictDb + "." + flatDesc.getTableName()
+ flatDesc.getSegment().getConfig().getMrHiveDictTableSuffix();
deleteTables.add(tableName);
deleteTables.add(tableName2);
}
step.setIntermediateTables(deleteTables);
step.setExternalDataPaths(Collections.singletonList(JoinedFlatTable.getTableDir(flatDesc, jobWorkingDir)));
step.setHiveViewIntermediateTableIdentities(StringUtil.join(hiveViewIntermediateTables, ","));
jobFlow.addTask(step);
}
示例9
@Test
public void testDefaultChainedExecutable() throws Exception {
DefaultChainedExecutable job = new DefaultChainedExecutable();
job.addTask(new SucceedTestExecutable());
job.addTask(new SucceedTestExecutable());
service.addJob(job);
assertEquals(2, job.getTasks().size());
AbstractExecutable anotherJob = service.getJob(job.getId());
assertEquals(DefaultChainedExecutable.class, anotherJob.getClass());
assertEquals(2, ((DefaultChainedExecutable) anotherJob).getTasks().size());
assertJobEqual(job, anotherJob);
}
示例10
@Override
public IBatchMergeInputSide getBatchMergeInputSide(ISegment seg) {
return new IMRBatchMergeInputSide() {
@Override
public void addStepPhase1_MergeDictionary(DefaultChainedExecutable jobFlow) {
// doing nothing
}
};
}
示例11
public static NSparkExecutable addStep(DefaultChainedExecutable parent, JobStepType type,
CubeInstance cube) {
NSparkExecutable step;
KylinConfig config = cube.getConfig();
switch (type) {
case RESOURCE_DETECT:
step = new NResourceDetectStep(parent);
break;
case CUBING:
step = new NSparkCubingStep(config.getSparkBuildClassName());
break;
case MERGING:
step = new NSparkMergingStep(config.getSparkMergeClassName());
break;
case CLEAN_UP_AFTER_MERGE:
step = new NSparkUpdateMetaAndCleanupAfterMergeStep();
break;
default:
throw new IllegalArgumentException();
}
step.setParams(parent.getParams());
step.setProject(parent.getProject());
step.setTargetSubject(parent.getTargetSubject());
if (step instanceof NSparkUpdateMetaAndCleanupAfterMergeStep) {
CubeSegment mergeSegment = cube.getSegmentById(parent.getTargetSegments().iterator().next());
final Segments<CubeSegment> mergingSegments = cube.getMergingSegments(mergeSegment);
step.setParam(MetadataConstants.P_SEGMENT_NAMES,
String.join(",", NSparkCubingUtil.toSegmentNames(mergingSegments)));
step.setParam(CubingExecutableUtil.SEGMENT_ID, parent.getParam(CubingExecutableUtil.SEGMENT_ID));
step.setParam(MetadataConstants.P_JOB_TYPE, parent.getParam(MetadataConstants.P_JOB_TYPE));
step.setParam(MetadataConstants.P_OUTPUT_META_URL, parent.getParam(MetadataConstants.P_OUTPUT_META_URL));
}
parent.addTask(step);
//after addTask, step's id is changed
step.setDistMetaUrl(config.getJobTmpMetaStoreUrl(parent.getProject(), step.getId()));
return step;
}
示例12
protected void addStepPhase1_DoMaterializeLookupTable(DefaultChainedExecutable jobFlow) {
final String hiveInitStatements = JoinedFlatTable.generateHiveInitStatements(flatTableDatabase);
final String jobWorkingDir = getJobWorkingDir(jobFlow, hdfsWorkingDir);
AbstractExecutable task = createLookupHiveViewMaterializationStep(hiveInitStatements, jobWorkingDir,
flatDesc, hiveViewIntermediateTables, jobFlow.getId());
if (task != null) {
jobFlow.addTask(task);
}
}
示例13
@Test
public void testDefaultChainedExecutable() throws Exception {
DefaultChainedExecutable job = new DefaultChainedExecutable();
job.addTask(new SucceedTestExecutable());
job.addTask(new SucceedTestExecutable());
service.addJob(job);
assertEquals(2, job.getTasks().size());
AbstractExecutable anotherJob = service.getJob(job.getId());
assertEquals(DefaultChainedExecutable.class, anotherJob.getClass());
assertEquals(2, ((DefaultChainedExecutable) anotherJob).getTasks().size());
assertJobEqual(job, anotherJob);
}
示例14
/**
* Generate cardinality for table This will trigger a hadoop job
* The result will be merged into table exd info
*
* @param tableName
*/
public void calculateCardinality(String tableName, String submitter) {
String[] dbTableName = HadoopUtil.parseHiveTableName(tableName);
tableName = dbTableName[0] + "." + dbTableName[1];
TableDesc table = getMetadataManager().getTableDesc(tableName);
final Map<String, String> tableExd = getMetadataManager().getTableDescExd(tableName);
if (tableExd == null || table == null) {
IllegalArgumentException e = new IllegalArgumentException("Cannot find table descirptor " + tableName);
logger.error("Cannot find table descirptor " + tableName, e);
throw e;
}
DefaultChainedExecutable job = new DefaultChainedExecutable();
job.setName("Hive Column Cardinality calculation for table '" + tableName + "'");
job.setSubmitter(submitter);
String outPath = HiveColumnCardinalityJob.OUTPUT_PATH + "/" + tableName;
String param = "-table " + tableName + " -output " + outPath;
HadoopShellExecutable step1 = new HadoopShellExecutable();
step1.setJobClass(HiveColumnCardinalityJob.class);
step1.setJobParams(param);
job.addTask(step1);
HadoopShellExecutable step2 = new HadoopShellExecutable();
step2.setJobClass(HiveColumnCardinalityUpdateJob.class);
step2.setJobParams(param);
job.addTask(step2);
getExecutableManager().addJob(job);
}
示例15
public void resubmitJob(JobInstance job) throws IOException {
aclEvaluate.checkProjectOperationPermission(job);
Coordinator coordinator = Coordinator.getInstance();
CubeManager cubeManager = CubeManager.getInstance(KylinConfig.getInstanceFromEnv());
String cubeName = job.getRelatedCube();
CubeInstance cubeInstance = cubeManager.getCube(cubeName);
String segmentName = job.getRelatedSegmentName();
try {
Pair<Long, Long> segmentRange = CubeSegment.parseSegmentName(segmentName);
logger.info("submit streaming segment build, cube:{} segment:{}", cubeName, segmentName);
CubeSegment newSeg = coordinator.getCubeManager().appendSegment(cubeInstance,
new SegmentRange.TSRange(segmentRange.getFirst(), segmentRange.getSecond()));
DefaultChainedExecutable executable = new StreamingCubingEngine().createStreamingCubingJob(newSeg, aclEvaluate.getCurrentUserName());
coordinator.getExecutableManager().addJob(executable);
CubingJob cubingJob = (CubingJob) executable;
newSeg.setLastBuildJobID(cubingJob.getId());
SegmentBuildState.BuildState state = new SegmentBuildState.BuildState();
state.setBuildStartTime(System.currentTimeMillis());
state.setState(SegmentBuildState.BuildState.State.BUILDING);
state.setJobId(cubingJob.getId());
coordinator.getStreamMetadataStore().updateSegmentBuildState(cubeName, segmentName, state);
} catch (Exception e) {
logger.error("streaming job submit fail, cubeName:" + cubeName + " segment:" + segmentName, e);
throw e;
}
}
示例16
private boolean triggerSegmentBuild(String cubeName, String segmentName) {
CubeManager cubeManager = CubeManager.getInstance(KylinConfig.getInstanceFromEnv());
CubeInstance cubeInstance = cubeManager.getCube(cubeName);
try {
Pair<Long, Long> segmentRange = CubeSegment.parseSegmentName(segmentName);
logger.info("submit streaming segment build, cube:{} segment:{}", cubeName, segmentName);
CubeSegment newSeg = getCubeManager().appendSegment(cubeInstance,
new TSRange(segmentRange.getFirst(), segmentRange.getSecond()));
DefaultChainedExecutable executable = new StreamingCubingEngine().createStreamingCubingJob(newSeg,
"SYSTEM");
getExecutableManager().addJob(executable);
CubingJob cubingJob = (CubingJob) executable;
newSeg.setLastBuildJobID(cubingJob.getId());
SegmentJobBuildInfo segmentJobBuildInfo = new SegmentJobBuildInfo(cubeName, segmentName, cubingJob.getId());
jobStatusChecker.addSegmentBuildJob(segmentJobBuildInfo);
SegmentBuildState.BuildState state = new SegmentBuildState.BuildState();
state.setBuildStartTime(System.currentTimeMillis());
state.setState(SegmentBuildState.BuildState.State.BUILDING);
state.setJobId(cubingJob.getId());
streamMetadataStore.updateSegmentBuildState(cubeName, segmentName, state);
return true;
} catch (Exception e) {
logger.error("streaming job submit fail, cubeName:" + cubeName + " segment:" + segmentName, e);
return false;
}
}
示例17
@Override
public void addStepPhase4_Cleanup(DefaultChainedExecutable jobFlow) {
final String jobWorkingDir = getJobWorkingDir(jobFlow, hdfsWorkingDir);
GarbageCollectionStep step = new GarbageCollectionStep();
step.setName(ExecutableConstants.STEP_NAME_HIVE_CLEANUP);
step.setIntermediateTables(Collections.singletonList(getIntermediateTableIdentity()));
step.setExternalDataPaths(Collections.singletonList(JoinedFlatTable.getTableDir(flatDesc, jobWorkingDir)));
step.setHiveViewIntermediateTableIdentities(StringUtil.join(hiveViewIntermediateTables, ","));
jobFlow.addTask(step);
}
示例18
protected void addStepPhase1_DoMaterializeLookupTable(DefaultChainedExecutable jobFlow) {
final String hiveInitStatements = JoinedFlatTable.generateHiveInitStatements(flatTableDatabase);
final String jobWorkingDir = getJobWorkingDir(jobFlow, hdfsWorkingDir);
AbstractExecutable task = createLookupHiveViewMaterializationStep(hiveInitStatements, jobWorkingDir,
flatDesc, hiveViewIntermediateTables, jobFlow.getId());
if (task != null) {
jobFlow.addTask(task);
}
}
示例19
private Boolean buildSegment(String cubeName, long startDate, long endDate, boolean isEmpty) throws Exception {
CubeInstance cubeInstance = cubeManager.getCube(cubeName);
CubeSegment segment = cubeManager.appendSegment(cubeInstance, new TSRange(0L, endDate));
DefaultChainedExecutable job = EngineFactory.createBatchCubingJob(segment, "TEST");
jobService.addJob(job);
if (fastBuildMode) {
jobSegmentMap.put(job.getId(), segment);
jobCheckActionMap.put(job.getId(), isEmpty ? "checkEmptySegRangeInfo" : "checkNormalSegRangeInfo");
return true;
}
ExecutableState state = waitForJob(job.getId());
return Boolean.valueOf(ExecutableState.SUCCEED == state);
}
示例20
@Test
public void testSchedulerTakeOver() throws Exception {
if (!lock(jobLock1, jobId2)) {
throw new JobException("fail to get the lock");
}
DefaultChainedExecutable job = new DefaultChainedExecutable();
job.setId(jobId2);
AbstractExecutable task1 = new SucceedTestExecutable();
AbstractExecutable task2 = new SucceedTestExecutable();
AbstractExecutable task3 = new SucceedTestExecutable();
job.addTask(task1);
job.addTask(task2);
job.addTask(task3);
execMgr.addJob(job);
waitForJobStatus(job.getId(), ExecutableState.RUNNING, 500);
scheduler1.shutdown();
scheduler1 = null;
waitForJobFinish(job.getId());
Assert.assertEquals(ExecutableState.SUCCEED, execMgr.getOutput(task1.getId()).getState());
Assert.assertEquals(ExecutableState.SUCCEED, execMgr.getOutput(task2.getId()).getState());
Assert.assertEquals(ExecutableState.SUCCEED, execMgr.getOutput(task3.getId()).getState());
Assert.assertEquals(ExecutableState.SUCCEED, execMgr.getOutput(job.getId()).getState());
}
示例21
private boolean triggerSegmentBuild(String cubeName, String segmentName) {
CubeManager cubeManager = CubeManager.getInstance(KylinConfig.getInstanceFromEnv());
CubeInstance cubeInstance = cubeManager.getCube(cubeName);
try {
Pair<Long, Long> segmentRange = CubeSegment.parseSegmentName(segmentName);
logger.info("submit streaming segment build, cube:{} segment:{}", cubeName, segmentName);
CubeSegment newSeg = getCubeManager().appendSegment(cubeInstance,
new TSRange(segmentRange.getFirst(), segmentRange.getSecond()));
DefaultChainedExecutable executable = new StreamingCubingEngine().createStreamingCubingJob(newSeg,
"SYSTEM");
getExecutableManager().addJob(executable);
CubingJob cubingJob = (CubingJob) executable;
newSeg.setLastBuildJobID(cubingJob.getId());
SegmentJobBuildInfo segmentJobBuildInfo = new SegmentJobBuildInfo(cubeName, segmentName, cubingJob.getId());
jobStatusChecker.addSegmentBuildJob(segmentJobBuildInfo);
SegmentBuildState.BuildState state = new SegmentBuildState.BuildState();
state.setBuildStartTime(System.currentTimeMillis());
state.setState(SegmentBuildState.BuildState.State.BUILDING);
state.setJobId(cubingJob.getId());
streamMetadataStore.updateSegmentBuildState(cubeName, segmentName, state);
return true;
} catch (Exception e) {
logger.error("streaming job submit fail, cubeName:" + cubeName + " segment:" + segmentName, e);
return false;
}
}
示例22
@Override
public IBatchMergeInputSide getBatchMergeInputSide(ISegment seg) {
return new IMRBatchMergeInputSide() {
@Override
public void addStepPhase1_MergeDictionary(DefaultChainedExecutable jobFlow) {
// doing nothing
}
};
}
示例23
public void discardJob(String jobId) {
AbstractExecutable job = getJob(jobId);
if (job instanceof DefaultChainedExecutable) {
List<AbstractExecutable> tasks = ((DefaultChainedExecutable) job).getTasks();
for (AbstractExecutable task : tasks) {
if (!task.getStatus().isFinalState()) {
updateJobOutput(task.getId(), ExecutableState.DISCARDED, null, null);
}
}
}
updateJobOutput(jobId, ExecutableState.DISCARDED, null, null);
}
示例24
private SaveDictStep createSaveDictStep(String jobId, DefaultChainedExecutable jobFlow) {
SaveDictStep result = new SaveDictStep();
final String cubeName = CubingExecutableUtil.getCubeName(jobFlow.getParams());
result.setName(ExecutableConstants.STEP_NAME_STREAMING_SAVE_DICTS);
CubingExecutableUtil.setCubeName(seg.getRealization().getName(), result.getParams());
CubingExecutableUtil.setSegmentId(seg.getUuid(), result.getParams());
CubingExecutableUtil.setDictsPath(getDictPath(jobId), result.getParams());
CubingExecutableUtil.setCubingJobId(jobId, result.getParams());
result.setIsNeedReleaseLock(true);
result.setJobFlowJobId(jobFlow.getId());
result.setLockPathName(cubeName);
return result;
}
示例25
@Test
public void testSucceed() throws Exception {
DefaultChainedExecutable job = new DefaultChainedExecutable();
BaseTestExecutable task1 = new SucceedTestExecutable();
BaseTestExecutable task2 = new SucceedTestExecutable();
job.addTask(task1);
job.addTask(task2);
jobService.addJob(job);
waitForJobFinish(job.getId());
assertEquals(ExecutableState.SUCCEED, jobService.getOutput(job.getId()).getState());
assertEquals(ExecutableState.SUCCEED, jobService.getOutput(task1.getId()).getState());
assertEquals(ExecutableState.SUCCEED, jobService.getOutput(task2.getId()).getState());
}
示例26
@Test
public void testDiscard() throws Exception {
logger.info("testDiscard");
DefaultChainedExecutable job = new DefaultChainedExecutable();
SelfStopExecutable task1 = new SelfStopExecutable();
job.addTask(task1);
execMgr.addJob(job);
Thread.sleep(1100); // give time to launch job/task1
waitForJobStatus(job.getId(), ExecutableState.RUNNING, 500);
execMgr.discardJob(job.getId());
waitForJobFinish(job.getId(), MAX_WAIT_TIME);
Assert.assertEquals(ExecutableState.DISCARDED, execMgr.getOutput(job.getId()).getState());
Assert.assertEquals(ExecutableState.DISCARDED, execMgr.getOutput(task1.getId()).getState());
task1.waitForDoWork();
}
示例27
@Test
public void testSucceedAndError() throws Exception {
DefaultChainedExecutable job = new DefaultChainedExecutable();
BaseTestExecutable task1 = new ErrorTestExecutable();
BaseTestExecutable task2 = new SucceedTestExecutable();
job.addTask(task1);
job.addTask(task2);
jobService.addJob(job);
waitForJobFinish(job.getId());
assertEquals(ExecutableState.ERROR, jobService.getOutput(job.getId()).getState());
assertEquals(ExecutableState.ERROR, jobService.getOutput(task1.getId()).getState());
assertEquals(ExecutableState.READY, jobService.getOutput(task2.getId()).getState());
}
示例28
@Test
public void testSucceedAndFailed() throws Exception {
logger.info("testSucceedAndFailed");
DefaultChainedExecutable job = new DefaultChainedExecutable();
BaseTestExecutable task1 = new SucceedTestExecutable();
BaseTestExecutable task2 = new FailedTestExecutable();
job.addTask(task1);
job.addTask(task2);
execMgr.addJob(job);
waitForJobFinish(job.getId(), MAX_WAIT_TIME);
Assert.assertEquals(ExecutableState.ERROR, execMgr.getOutput(job.getId()).getState());
Assert.assertEquals(ExecutableState.SUCCEED, execMgr.getOutput(task1.getId()).getState());
Assert.assertEquals(ExecutableState.ERROR, execMgr.getOutput(task2.getId()).getState());
}
示例29
@Override
public IBatchMergeInputSide getBatchMergeInputSide(ISegment seg) {
return new IMRBatchMergeInputSide() {
@Override
public void addStepPhase1_MergeDictionary(DefaultChainedExecutable jobFlow) {
// doing nothing
}
};
}
示例30
private SaveDictStep createSaveDictStep(String jobId, DefaultChainedExecutable jobFlow) {
SaveDictStep result = new SaveDictStep();
final String cubeName = CubingExecutableUtil.getCubeName(jobFlow.getParams());
result.setName(ExecutableConstants.STEP_NAME_STREAMING_SAVE_DICTS);
CubingExecutableUtil.setCubeName(seg.getRealization().getName(), result.getParams());
CubingExecutableUtil.setSegmentId(seg.getUuid(), result.getParams());
CubingExecutableUtil.setDictsPath(getDictPath(jobId), result.getParams());
CubingExecutableUtil.setCubingJobId(jobId, result.getParams());
result.setIsNeedReleaseLock(true);
result.setJobFlowJobId(jobFlow.getId());
result.setLockPathName(cubeName);
return result;
}