Java源码示例:org.apache.hadoop.hdfs.protocol.ClientProtocol
示例1
/**
* This method retrieve all the datanodes of a hdfs cluster
*/
private List<String> getDataNodes() throws IOException {
Configuration conf = new Configuration(false);
conf.addResource(new org.apache.hadoop.fs.Path(HdfsDataContext.getHdfsConfigDirectory(config)));
List<String> datanodesList = new ArrayList<>();
InetSocketAddress namenodeAddress = new InetSocketAddress(
HdfsDataContext.getHdfsNamenodeDefault(config),
HdfsDataContext.getHdfsNamenodePortDefault(config));
DFSClient dfsClient = new DFSClient(namenodeAddress, conf);
ClientProtocol nameNode = dfsClient.getNamenode();
DatanodeInfo[] datanodeReport =
nameNode.getDatanodeReport(HdfsConstants.DatanodeReportType.ALL);
for (DatanodeInfo di : datanodeReport) {
datanodesList.add(di.getHostName());
}
return datanodesList;
}
示例2
private static ClientProtocol getNNProxy(
Token<DelegationTokenIdentifier> token, Configuration conf)
throws IOException {
URI uri = HAUtil.getServiceUriFromToken(HdfsConstants.HDFS_URI_SCHEME,
token);
if (HAUtil.isTokenForLogicalUri(token) &&
!HAUtil.isLogicalUri(conf, uri)) {
// If the token is for a logical nameservice, but the configuration
// we have disagrees about that, we can't actually renew it.
// This can be the case in MR, for example, if the RM doesn't
// have all of the HA clusters configured in its configuration.
throw new IOException("Unable to map logical nameservice URI '" +
uri + "' to a NameNode. Local configuration does not have " +
"a failover proxy provider configured.");
}
NameNodeProxies.ProxyAndInfo<ClientProtocol> info =
NameNodeProxies.createProxy(conf, uri, ClientProtocol.class);
assert info.getDelegationTokenService().equals(token.getService()) :
"Returned service '" + info.getDelegationTokenService().toString() +
"' doesn't match expected service '" +
token.getService().toString() + "'";
return info.getProxy();
}
示例3
public void testSinglePortStartup() throws IOException {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
NameNode nn = cluster.getNameNode();
InetSocketAddress dnAddress = nn.getNameNodeDNAddress();
InetSocketAddress clientAddress = nn.getNameNodeAddress();
assertEquals(clientAddress, dnAddress);
DatanodeProtocol dnProtocol = (DatanodeProtocol) RPC.waitForProxy(
DatanodeProtocol.class, DatanodeProtocol.versionID, dnAddress, conf);
// perform a dummy call
dnProtocol.getProtocolVersion(DatanodeProtocol.class.getName(),
DatanodeProtocol.versionID);
ClientProtocol client = (ClientProtocol) RPC.waitForProxy(
ClientProtocol.class, ClientProtocol.versionID, dnAddress, conf);
client.getProtocolVersion(ClientProtocol.class.getName(),
ClientProtocol.versionID);
cluster.shutdown();
}
示例4
/**
* Create a {@link NameNode} proxy from the current {@link ServletContext}.
*/
protected ClientProtocol createNameNodeProxy() throws IOException {
ServletContext context = getServletContext();
// if we are running in the Name Node, use it directly rather than via
// rpc
NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
if (nn != null) {
return nn.getRpcServer();
}
InetSocketAddress nnAddr =
NameNodeHttpServer.getNameNodeAddressFromContext(context);
Configuration conf = new HdfsConfiguration(
NameNodeHttpServer.getConfFromContext(context));
return NameNodeProxies.createProxy(conf, NameNode.getUri(nnAddr),
ClientProtocol.class).getProxy();
}
示例5
/**
* Dumps DFS data structures into specified file.
* Usage: hdfs dfsadmin -metasave filename
* @param argv List of of command line parameters.
* @param idx The index of the command that is being processed.
* @exception IOException if an error occurred while accessing
* the file or path.
*/
public int metaSave(String[] argv, int idx) throws IOException {
String pathname = argv[idx];
DistributedFileSystem dfs = getDFS();
Configuration dfsConf = dfs.getConf();
URI dfsUri = dfs.getUri();
boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);
if (isHaEnabled) {
String nsId = dfsUri.getHost();
List<ProxyAndInfo<ClientProtocol>> proxies =
HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
nsId, ClientProtocol.class);
for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
proxy.getProxy().metaSave(pathname);
System.out.println("Created metasave file " + pathname + " in the log "
+ "directory of namenode " + proxy.getAddress());
}
} else {
dfs.metaSave(pathname);
System.out.println("Created metasave file " + pathname + " in the log " +
"directory of namenode " + dfs.getUri());
}
return 0;
}
示例6
private DFSClient genClientWithDummyHandler() throws IOException {
URI nnUri = dfs.getUri();
FailoverProxyProvider<ClientProtocol> failoverProxyProvider =
NameNodeProxies.createFailoverProxyProvider(conf,
nnUri, ClientProtocol.class, true, null);
InvocationHandler dummyHandler = new DummyRetryInvocationHandler(
failoverProxyProvider, RetryPolicies
.failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
Integer.MAX_VALUE,
DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT,
DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT));
ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance(
failoverProxyProvider.getInterface().getClassLoader(),
new Class[] { ClientProtocol.class }, dummyHandler);
DFSClient client = new DFSClient(null, proxy, conf, null);
return client;
}
示例7
FanOutOneBlockAsyncDFSOutput(Configuration conf,DistributedFileSystem dfs,
DFSClient client, ClientProtocol namenode, String clientName, String src, long fileId,
LocatedBlock locatedBlock, Encryptor encryptor, List<Channel> datanodeList,
DataChecksum summer, ByteBufAllocator alloc) {
this.conf = conf;
this.dfs = dfs;
this.client = client;
this.namenode = namenode;
this.fileId = fileId;
this.clientName = clientName;
this.src = src;
this.block = locatedBlock.getBlock();
this.locations = locatedBlock.getLocations();
this.encryptor = encryptor;
this.datanodeList = datanodeList;
this.summer = summer;
this.maxDataLen = MAX_DATA_LEN - (MAX_DATA_LEN % summer.getBytesPerChecksum());
this.alloc = alloc;
this.buf = alloc.directBuffer(sendBufSizePRedictor.initialSize());
this.state = State.STREAMING;
setupReceiver(conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT));
}
示例8
@SuppressWarnings("unchecked")
private static void mockCreate(ClientProtocol mcp,
CipherSuite suite, CryptoProtocolVersion version) throws Exception {
Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), "owner", "group", new byte[0], new byte[0],
1010, 0, new FileEncryptionInfo(suite,
version, new byte[suite.getAlgorithmBlockSize()],
new byte[suite.getAlgorithmBlockSize()],
"fakeKey", "fakeVersion"),
(byte) 0))
.when(mcp)
.create(anyString(), (FsPermission) anyObject(), anyString(),
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());
}
示例9
private FileStatus[] versionBasedListPath(String src) throws IOException {
if (namenodeVersion >= ClientProtocol.ITERATIVE_LISTING_VERSION) {
return iterativeListing(src);
} else if (namenodeVersion >= ClientProtocol.OPTIMIZE_FILE_STATUS_VERSION) {
HdfsFileStatus[] hdfsStats = namenode.getHdfsListing(src);
if (hdfsStats == null) {
return null;
}
FileStatus[] stats = new FileStatus[hdfsStats.length];
for (int i=0; i<stats.length; i++) {
stats[i] = toFileStatus(hdfsStats[i], src);
}
return stats;
} else {
return namenode.getListing(src);
}
}
示例10
/** {@inheritDoc} */
public void doGet(HttpServletRequest request, HttpServletResponse response
) throws ServletException, IOException {
final UnixUserGroupInformation ugi = getUGI(request);
final PrintWriter out = response.getWriter();
final String filename = getFilename(request, response);
final XMLOutputter xml = new XMLOutputter(out, "UTF-8");
xml.declaration();
final Configuration conf = new Configuration(DataNode.getDataNode().getConf());
final int socketTimeout = conf.getInt("dfs.socket.timeout", HdfsConstants.READ_TIMEOUT);
final SocketFactory socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
UnixUserGroupInformation.saveToConf(conf,
UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
final ClientProtocol nnproxy = DFSClient.createNamenode(conf);
try {
final MD5MD5CRC32FileChecksum checksum = DFSClient.getFileChecksum(
filename, nnproxy, socketFactory, socketTimeout);
MD5MD5CRC32FileChecksum.write(xml, checksum);
} catch(IOException ioe) {
new RemoteException(ioe.getClass().getName(), ioe.getMessage()
).writeXml(filename, xml);
}
xml.endDocument();
}
示例11
private DFSClient genClientWithDummyHandler() throws IOException {
URI nnUri = dfs.getUri();
FailoverProxyProvider<ClientProtocol> failoverProxyProvider =
NameNodeProxies.createFailoverProxyProvider(conf,
nnUri, ClientProtocol.class, true, null);
InvocationHandler dummyHandler = new DummyRetryInvocationHandler(
failoverProxyProvider, RetryPolicies
.failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
Integer.MAX_VALUE,
DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT,
DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT));
ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance(
failoverProxyProvider.getInterface().getClassLoader(),
new Class[] { ClientProtocol.class }, dummyHandler);
DFSClient client = new DFSClient(null, proxy, conf, null);
return client;
}
示例12
/** Select a datanode to service this request, which is the first one
* in the returned array. The rest of the elements in the datanode
* are possible candidates if the first one fails.
* Currently, this looks at no more than the first five blocks of a file,
* selecting a datanode randomly from the most represented.
*/
private static DatanodeInfo[] pickSrcDatanode(FileStatus i,
ClientProtocol nnproxy) throws IOException {
// a race condition can happen by initializing a static member this way.
// A proper fix should make JspHelper a singleton. Since it doesn't affect
// correctness, we leave it as is for now.
if (jspHelper == null)
jspHelper = new JspHelper();
final LocatedBlocks blks = nnproxy.getBlockLocations(
i.getPath().toUri().getPath(), 0, 1);
if (i.getLen() == 0 || blks.getLocatedBlocks().size() <= 0) {
// pick a random datanode
return new DatanodeInfo[] { jspHelper.randomNode() };
}
return jspHelper.bestNode(blks);
}
示例13
/**
* Create a NameNode proxy for the client if the client and NameNode
* are compatible
*
* @param nameNodeAddr NameNode address
* @param conf configuration
* @param ugi ticket
* @return a NameNode proxy that's compatible with the client
*/
private void createRPCNamenodeIfCompatible(
InetSocketAddress nameNodeAddr,
Configuration conf,
UnixUserGroupInformation ugi) throws IOException {
try {
this.namenodeProtocolProxy = createRPCNamenode(nameNodeAddr, conf, ugi);
this.rpcNamenode = namenodeProtocolProxy.getProxy();
} catch (RPC.VersionMismatch e) {
long clientVersion = e.getClientVersion();
namenodeVersion = e.getServerVersion();
if (clientVersion > namenodeVersion &&
!ProtocolCompatible.isCompatibleClientProtocol(
clientVersion, namenodeVersion)) {
throw new RPC.VersionIncompatible(
ClientProtocol.class.getName(), clientVersion, namenodeVersion);
}
this.rpcNamenode = (ClientProtocol)e.getProxy();
}
}
示例14
public long getProtocolVersion(String protocol,
long clientVersion) throws IOException {
if (protocol.equals(ClientProtocol.class.getName())) {
return ClientProtocol.versionID;
} else if (protocol.equals(DatanodeProtocol.class.getName())){
return DatanodeProtocol.versionID;
} else if (protocol.equals(NamenodeProtocol.class.getName())){
return NamenodeProtocol.versionID;
} else if (protocol.equals(RefreshAuthorizationPolicyProtocol.class.getName())){
return RefreshAuthorizationPolicyProtocol.versionID;
} else if (protocol.equals(RefreshUserMappingsProtocol.class.getName())){
return RefreshUserMappingsProtocol.versionID;
} else if (protocol.equals(RefreshCallQueueProtocol.class.getName())) {
return RefreshCallQueueProtocol.versionID;
} else if (protocol.equals(GetUserMappingsProtocol.class.getName())){
return GetUserMappingsProtocol.versionID;
} else if (protocol.equals(TraceAdminProtocol.class.getName())){
return TraceAdminProtocol.versionID;
} else {
throw new IOException("Unknown protocol to name node: " + protocol);
}
}
示例15
/**
* Creates an explicitly non-HA-enabled proxy object. Most of the time you
* don't want to use this, and should instead use {@link NameNodeProxies#createProxy}.
*
* @param conf the configuration object
* @param nnAddr address of the remote NN to connect to
* @param xface the IPC interface which should be created
* @param ugi the user who is making the calls on the proxy object
* @param withRetries certain interfaces have a non-standard retry policy
* @param fallbackToSimpleAuth - set to true or false during this method to
* indicate if a secure client falls back to simple auth
* @return an object containing both the proxy and the associated
* delegation token service it corresponds to
* @throws IOException
*/
@SuppressWarnings("unchecked")
public static <T> ProxyAndInfo<T> createNonHAProxy(
Configuration conf, InetSocketAddress nnAddr, Class<T> xface,
UserGroupInformation ugi, boolean withRetries,
AtomicBoolean fallbackToSimpleAuth) throws IOException {
Text dtService = SecurityUtil.buildTokenService(nnAddr);
T proxy;
if (xface == ClientProtocol.class) {
proxy = (T) createNNProxyWithClientProtocol(nnAddr, conf, ugi,
withRetries, fallbackToSimpleAuth);
} else if (xface == JournalProtocol.class) {
proxy = (T) createNNProxyWithJournalProtocol(nnAddr, conf, ugi);
} else if (xface == NamenodeProtocol.class) {
proxy = (T) createNNProxyWithNamenodeProtocol(nnAddr, conf, ugi,
withRetries);
} else if (xface == GetUserMappingsProtocol.class) {
proxy = (T) createNNProxyWithGetUserMappingsProtocol(nnAddr, conf, ugi);
} else if (xface == RefreshUserMappingsProtocol.class) {
proxy = (T) createNNProxyWithRefreshUserMappingsProtocol(nnAddr, conf, ugi);
} else if (xface == RefreshAuthorizationPolicyProtocol.class) {
proxy = (T) createNNProxyWithRefreshAuthorizationPolicyProtocol(nnAddr,
conf, ugi);
} else if (xface == RefreshCallQueueProtocol.class) {
proxy = (T) createNNProxyWithRefreshCallQueueProtocol(nnAddr, conf, ugi);
} else {
String message = "Unsupported protocol found when creating the proxy " +
"connection to NameNode: " +
((xface != null) ? xface.getClass().getName() : "null");
LOG.error(message);
throw new IllegalStateException(message);
}
return new ProxyAndInfo<T>(proxy, dtService, nnAddr);
}
示例16
@SuppressWarnings("unchecked")
@Override
public long renew(Token<?> token, Configuration conf) throws IOException {
Token<DelegationTokenIdentifier> delToken =
(Token<DelegationTokenIdentifier>) token;
ClientProtocol nn = getNNProxy(delToken, conf);
try {
return nn.renewDelegationToken(delToken);
} catch (RemoteException re) {
throw re.unwrapRemoteException(InvalidToken.class,
AccessControlException.class);
}
}
示例17
@SuppressWarnings("unchecked")
@Override
public void cancel(Token<?> token, Configuration conf) throws IOException {
Token<DelegationTokenIdentifier> delToken =
(Token<DelegationTokenIdentifier>) token;
LOG.info("Cancelling " +
DelegationTokenIdentifier.stringifyToken(delToken));
ClientProtocol nn = getNNProxy(delToken, conf);
try {
nn.cancelDelegationToken(delToken);
} catch (RemoteException re) {
throw re.unwrapRemoteException(InvalidToken.class,
AccessControlException.class);
}
}
示例18
/**
* @see ClientProtocol#getBlockLocations(String, long, long)
*/
static LocatedBlocks callGetBlockLocations(ClientProtocol namenode,
String src, long start, long length)
throws IOException {
try {
return namenode.getBlockLocations(src, start, length);
} catch(RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
UnresolvedPathException.class);
}
}
示例19
private static FileCreator createFileCreator3() throws NoSuchMethodException {
Method createMethod = ClientProtocol.class.getMethod("create", String.class, FsPermission.class,
String.class, EnumSetWritable.class, boolean.class, short.class, long.class,
CryptoProtocolVersion[].class, String.class);
return (instance, src, masked, clientName, flag, createParent, replication, blockSize,
supportedVersions) -> {
return (HdfsFileStatus) createMethod.invoke(instance, src, masked, clientName, flag,
createParent, replication, blockSize, supportedVersions, null);
};
}
示例20
/**
* Test addBlock() name-node RPC is idempotent
*/
public void testIdepotentCallsAddBlock() throws IOException {
ClientProtocol nn = cluster.getNameNode();
FileSystem fs = cluster.getFileSystem();
DFSClient dfsclient = ((DistributedFileSystem) fs).dfs;
String src = "/testNameNodeFingerprintSent1.txt";
// Path f = new Path(src);
DFSOutputStream dos = (DFSOutputStream) dfsclient.create(src, true,
(short) 1, 512L);
FSDataOutputStream a_out = new FSDataOutputStream(dos); // fs.create(f);
for (int i = 0; i < 512; i++) {
a_out.writeBytes("bc");
}
a_out.flush();
LocatedBlocks lb = nn.getBlockLocations(src, 256, 257);
LocatedBlock lb1 = nn.addBlockAndFetchMetaInfo(src, dfsclient.clientName,
null, null, 512L, lb.getLocatedBlocks().get(lb.locatedBlockCount() - 1)
.getBlock());
LocatedBlock lb2 = nn.addBlockAndFetchMetaInfo(src, dfsclient.clientName,
null, null, 512L, lb.getLocatedBlocks().get(lb.locatedBlockCount() - 1)
.getBlock());
TestCase.assertTrue("blocks: " + lb1.getBlock() + " and " + lb2.getBlock(),
lb1.getBlock().equals(lb2.getBlock()));
}
示例21
DFSInotifyEventInputStream(Sampler traceSampler, ClientProtocol namenode,
long lastReadTxid) throws IOException {
this.traceSampler = traceSampler;
this.namenode = namenode;
this.it = Iterators.emptyIterator();
this.lastReadTxid = lastReadTxid;
}
示例22
public static ProtocolProxy<ClientProtocol> createRPCNamenode(
Configuration conf) throws IOException {
try {
return createRPCNamenode(NameNode.getAddress(conf), conf,
UnixUserGroupInformation.login(conf, true));
} catch (LoginException e) {
throw new IOException(e);
}
}
示例23
public static LocatedBlock getLastLocatedBlock(
ClientProtocol namenode, String src
) throws IOException {
//get block info for the last block
LocatedBlocks locations = namenode.getBlockLocations(src, 0, Long.MAX_VALUE);
List<LocatedBlock> blocks = locations.getLocatedBlocks();
DataNode.LOG.info("blocks.size()=" + blocks.size());
assertTrue(blocks.size() > 0);
return blocks.get(blocks.size() - 1);
}
示例24
/** Create a redirection URL */
private URL createRedirectURL(String path, String encodedPath, HdfsFileStatus status,
UserGroupInformation ugi, ClientProtocol nnproxy, HttpServletRequest request, String dt)
throws IOException {
String scheme = request.getScheme();
final LocatedBlocks blks = nnproxy.getBlockLocations(
status.getFullPath(new Path(path)).toUri().getPath(), 0, 1);
final Configuration conf = NameNodeHttpServer.getConfFromContext(
getServletContext());
final DatanodeID host = pickSrcDatanode(blks, status, conf);
final String hostname;
if (host instanceof DatanodeInfo) {
hostname = host.getHostName();
} else {
hostname = host.getIpAddr();
}
int port = "https".equals(scheme) ? host.getInfoSecurePort() : host
.getInfoPort();
String dtParam = "";
if (dt != null) {
dtParam = JspHelper.getDelegationTokenUrlParam(dt);
}
// Add namenode address to the url params
NameNode nn = NameNodeHttpServer.getNameNodeFromContext(
getServletContext());
String addr = nn.getNameNodeAddressHostPortString();
String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);
return new URL(scheme, hostname, port,
"/streamFile" + encodedPath + '?' +
"ugi=" + ServletUtil.encodeQueryValue(ugi.getShortUserName()) +
dtParam + addrParam);
}
示例25
/** {@inheritDoc} */
@Override
protected URI createUri(FileStatus i, UnixUserGroupInformation ugi,
ClientProtocol nnproxy, HttpServletRequest request) throws IOException,
URISyntaxException {
return new URI(request.getScheme(), null, request.getServerName(), request
.getServerPort(), "/streamFile", "filename=" + i.getPath() + "&ugi="
+ ugi, null);
}
示例26
public static LocatedBlock getLastLocatedBlock(
ClientProtocol namenode, String src) throws IOException {
//get block info for the last block
LocatedBlocks locations = namenode.getBlockLocations(src, 0, Long.MAX_VALUE);
List<LocatedBlock> blocks = locations.getLocatedBlocks();
DataNode.LOG.info("blocks.size()=" + blocks.size());
assertTrue(blocks.size() > 0);
return blocks.get(blocks.size() - 1);
}
示例27
/**
* Get an RPC proxy for each NN in an HA nameservice. Used when a given RPC
* call should be made on every NN in an HA nameservice, not just the active.
*
* @param conf configuration
* @param nsId the nameservice to get all of the proxies for.
* @return a list of RPC proxies for each NN in the nameservice.
* @throws IOException in the event of error.
*/
public static List<ClientProtocol> getProxiesForAllNameNodesInNameservice(
Configuration conf, String nsId) throws IOException {
List<ProxyAndInfo<ClientProtocol>> proxies =
getProxiesForAllNameNodesInNameservice(conf, nsId, ClientProtocol.class);
List<ClientProtocol> namenodes = new ArrayList<ClientProtocol>(
proxies.size());
for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
namenodes.add(proxy.getProxy());
}
return namenodes;
}
示例28
private void doTestConnection(URI host, Configuration hadoopConfiguration) throws IOException {
SocketFactory socketFactory = NetUtils.getSocketFactory(hadoopConfiguration, ClientProtocol.class);
Socket socket = socketFactory.createSocket();
socket.setTcpNoDelay(false);
SocketAddress address = new InetSocketAddress(host.getHost(), host.getPort());
try {
NetUtils.connect(socket, address, CONNECTION_TEST_TIMEOUT);
} finally {
try {
socket.close();
} catch (IOException e) {}
}
}
示例29
/**
* Command to ask the namenode to reread the hosts and excluded hosts
* file.
* Usage: hdfs dfsadmin -refreshNodes
* @exception IOException
*/
public int refreshNodes() throws IOException {
int exitCode = -1;
DistributedFileSystem dfs = getDFS();
Configuration dfsConf = dfs.getConf();
URI dfsUri = dfs.getUri();
boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);
if (isHaEnabled) {
String nsId = dfsUri.getHost();
List<ProxyAndInfo<ClientProtocol>> proxies =
HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
nsId, ClientProtocol.class);
for (ProxyAndInfo<ClientProtocol> proxy: proxies) {
proxy.getProxy().refreshNodes();
System.out.println("Refresh nodes successful for " +
proxy.getAddress());
}
} else {
dfs.refreshNodes();
System.out.println("Refresh nodes successful");
}
exitCode = 0;
return exitCode;
}
示例30
/**
* Command to ask the namenode to finalize previously performed upgrade.
* Usage: hdfs dfsadmin -finalizeUpgrade
* @exception IOException
*/
public int finalizeUpgrade() throws IOException {
DistributedFileSystem dfs = getDFS();
Configuration dfsConf = dfs.getConf();
URI dfsUri = dfs.getUri();
boolean isHaAndLogicalUri = HAUtil.isLogicalUri(dfsConf, dfsUri);
if (isHaAndLogicalUri) {
// In the case of HA and logical URI, run finalizeUpgrade for all
// NNs in this nameservice.
String nsId = dfsUri.getHost();
List<ClientProtocol> namenodes =
HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId);
if (!HAUtil.isAtLeastOneActive(namenodes)) {
throw new IOException("Cannot finalize with no NameNode active");
}
List<ProxyAndInfo<ClientProtocol>> proxies =
HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
nsId, ClientProtocol.class);
for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
proxy.getProxy().finalizeUpgrade();
System.out.println("Finalize upgrade successful for " +
proxy.getAddress());
}
} else {
dfs.finalizeUpgrade();
System.out.println("Finalize upgrade successful");
}
return 0;
}