Java源码示例:org.apache.flink.api.java.hadoop.mapreduce.utils.HadoopUtils
示例1
public HiveTableOutputFormat(JobConf jobConf, ObjectPath tablePath, CatalogTable table, HiveTablePartition hiveTablePartition,
Properties tableProperties, boolean overwrite) {
super(jobConf.getCredentials());
Preconditions.checkNotNull(table, "table cannot be null");
Preconditions.checkNotNull(hiveTablePartition, "HiveTablePartition cannot be null");
Preconditions.checkNotNull(tableProperties, "Table properties cannot be null");
HadoopUtils.mergeHadoopConf(jobConf);
this.jobConf = jobConf;
this.tablePath = tablePath;
this.partitionColumns = table.getPartitionKeys();
TableSchema tableSchema = table.getSchema();
this.fieldNames = tableSchema.getFieldNames();
this.fieldTypes = tableSchema.getFieldDataTypes();
this.hiveTablePartition = hiveTablePartition;
this.tableProperties = tableProperties;
this.overwrite = overwrite;
isPartitioned = partitionColumns != null && !partitionColumns.isEmpty();
isDynamicPartition = isPartitioned && partitionColumns.size() > hiveTablePartition.getPartitionSpec().size();
hiveVersion = Preconditions.checkNotNull(jobConf.get(HiveCatalogValidator.CATALOG_HIVE_VERSION),
"Hive version is not defined");
}
示例2
public HadoopInputFormatBase(org.apache.hadoop.mapreduce.InputFormat<K, V> mapreduceInputFormat, Class<K> key, Class<V> value, Job job) {
super(Preconditions.checkNotNull(job, "Job can not be null").getCredentials());
this.mapreduceInputFormat = Preconditions.checkNotNull(mapreduceInputFormat);
this.keyClass = Preconditions.checkNotNull(key);
this.valueClass = Preconditions.checkNotNull(value);
this.configuration = job.getConfiguration();
HadoopUtils.mergeHadoopConf(configuration);
}
示例3
/**
* Creates a HCatInputFormat for the given database, table, and
* {@link org.apache.hadoop.conf.Configuration}.
* By default, the InputFormat returns {@link org.apache.hive.hcatalog.data.HCatRecord}.
* The return type of the InputFormat can be changed to Flink-native tuples by calling
* {@link HCatInputFormatBase#asFlinkTuples()}.
*
* @param database The name of the database to read from.
* @param table The name of the table to read.
* @param config The Configuration for the InputFormat.
* @throws java.io.IOException
*/
public HCatInputFormatBase(String database, String table, Configuration config) throws IOException {
super();
this.configuration = config;
HadoopUtils.mergeHadoopConf(this.configuration);
this.hCatInputFormat = org.apache.hive.hcatalog.mapreduce.HCatInputFormat.setInput(this.configuration, database, table);
this.outputSchema = org.apache.hive.hcatalog.mapreduce.HCatInputFormat.getTableSchema(this.configuration);
// configure output schema of HCatFormat
configuration.set("mapreduce.lib.hcat.output.schema", HCatUtil.serialize(outputSchema));
// set type information
this.resultType = new WritableTypeInfo(DefaultHCatRecord.class);
}
示例4
public HadoopInputFormatBase(org.apache.hadoop.mapreduce.InputFormat<K, V> mapreduceInputFormat, Class<K> key, Class<V> value, Job job) {
super(Preconditions.checkNotNull(job, "Job can not be null").getCredentials());
this.mapreduceInputFormat = Preconditions.checkNotNull(mapreduceInputFormat);
this.keyClass = Preconditions.checkNotNull(key);
this.valueClass = Preconditions.checkNotNull(value);
this.configuration = job.getConfiguration();
HadoopUtils.mergeHadoopConf(configuration);
}
示例5
/**
* Creates a HCatInputFormat for the given database, table, and
* {@link org.apache.hadoop.conf.Configuration}.
* By default, the InputFormat returns {@link org.apache.hive.hcatalog.data.HCatRecord}.
* The return type of the InputFormat can be changed to Flink-native tuples by calling
* {@link HCatInputFormatBase#asFlinkTuples()}.
*
* @param database The name of the database to read from.
* @param table The name of the table to read.
* @param config The Configuration for the InputFormat.
* @throws java.io.IOException
*/
public HCatInputFormatBase(String database, String table, Configuration config) throws IOException {
super();
this.configuration = config;
HadoopUtils.mergeHadoopConf(this.configuration);
this.hCatInputFormat = org.apache.hive.hcatalog.mapreduce.HCatInputFormat.setInput(this.configuration, database, table);
this.outputSchema = org.apache.hive.hcatalog.mapreduce.HCatInputFormat.getTableSchema(this.configuration);
// configure output schema of HCatFormat
configuration.set("mapreduce.lib.hcat.output.schema", HCatUtil.serialize(outputSchema));
// set type information
this.resultType = new WritableTypeInfo(DefaultHCatRecord.class);
}
示例6
public HadoopInputFormatBase(org.apache.hadoop.mapreduce.InputFormat<K, V> mapreduceInputFormat, Class<K> key, Class<V> value, Job job) {
super(Preconditions.checkNotNull(job, "Job can not be null").getCredentials());
this.mapreduceInputFormat = Preconditions.checkNotNull(mapreduceInputFormat);
this.keyClass = Preconditions.checkNotNull(key);
this.valueClass = Preconditions.checkNotNull(value);
this.configuration = job.getConfiguration();
HadoopUtils.mergeHadoopConf(configuration);
}
示例7
/**
* Creates a HCatInputFormat for the given database, table, and
* {@link org.apache.hadoop.conf.Configuration}.
* By default, the InputFormat returns {@link org.apache.hive.hcatalog.data.HCatRecord}.
* The return type of the InputFormat can be changed to Flink-native tuples by calling
* {@link HCatInputFormatBase#asFlinkTuples()}.
*
* @param database The name of the database to read from.
* @param table The name of the table to read.
* @param config The Configuration for the InputFormat.
* @throws java.io.IOException
*/
public HCatInputFormatBase(String database, String table, Configuration config) throws IOException {
super();
this.configuration = config;
HadoopUtils.mergeHadoopConf(this.configuration);
this.hCatInputFormat = org.apache.hive.hcatalog.mapreduce.HCatInputFormat.setInput(this.configuration, database, table);
this.outputSchema = org.apache.hive.hcatalog.mapreduce.HCatInputFormat.getTableSchema(this.configuration);
// configure output schema of HCatFormat
configuration.set("mapreduce.lib.hcat.output.schema", HCatUtil.serialize(outputSchema));
// set type information
this.resultType = new WritableTypeInfo(DefaultHCatRecord.class);
}
示例8
public HadoopOutputFormatBase(org.apache.hadoop.mapreduce.OutputFormat<K, V> mapreduceOutputFormat, Job job) {
super(job.getCredentials());
this.mapreduceOutputFormat = mapreduceOutputFormat;
this.configuration = job.getConfiguration();
HadoopUtils.mergeHadoopConf(configuration);
}
示例9
public HadoopOutputFormatBase(org.apache.hadoop.mapreduce.OutputFormat<K, V> mapreduceOutputFormat, Job job) {
super(job.getCredentials());
this.mapreduceOutputFormat = mapreduceOutputFormat;
this.configuration = job.getConfiguration();
HadoopUtils.mergeHadoopConf(configuration);
}
示例10
public HadoopOutputFormatBase(org.apache.hadoop.mapreduce.OutputFormat<K, V> mapreduceOutputFormat, Job job) {
super(job.getCredentials());
this.mapreduceOutputFormat = mapreduceOutputFormat;
this.configuration = job.getConfiguration();
HadoopUtils.mergeHadoopConf(configuration);
}