HADOOP-7030. Add TableMapping topology implementation to read host to rack mapping from a file. Contributed by Patrick Angeles and tomwhite.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1304597 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
f67c2d1bd0
commit
1ff0359aa0
@ -135,6 +135,9 @@ Release 0.23.3 - UNRELEASED
|
|||||||
HADOOP-8121. Active Directory Group Mapping Service. (Jonathan Natkins via
|
HADOOP-8121. Active Directory Group Mapping Service. (Jonathan Natkins via
|
||||||
atm)
|
atm)
|
||||||
|
|
||||||
|
HADOOP-7030. Add TableMapping topology implementation to read host to rack
|
||||||
|
mapping from a file. (Patrick Angeles and tomwhite via tomwhite)
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
|
|
||||||
HADOOP-7524. Change RPC to allow multiple protocols including multuple
|
HADOOP-7524. Change RPC to allow multiple protocols including multuple
|
||||||
|
@ -64,6 +64,10 @@ public class CommonConfigurationKeysPublic {
|
|||||||
public static final String NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY =
|
public static final String NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY =
|
||||||
"net.topology.node.switch.mapping.impl";
|
"net.topology.node.switch.mapping.impl";
|
||||||
|
|
||||||
|
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
|
||||||
|
public static final String NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY =
|
||||||
|
"net.topology.table.file.name";
|
||||||
|
|
||||||
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
|
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
|
||||||
public static final String FS_TRASH_CHECKPOINT_INTERVAL_KEY =
|
public static final String FS_TRASH_CHECKPOINT_INTERVAL_KEY =
|
||||||
"fs.trash.checkpoint.interval";
|
"fs.trash.checkpoint.interval";
|
||||||
|
@ -0,0 +1,147 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.net;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY;
|
||||||
|
|
||||||
|
import java.io.BufferedReader;
|
||||||
|
import java.io.FileReader;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
import org.apache.commons.lang.StringUtils;
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.conf.Configured;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* <p>
|
||||||
|
* Simple {@link DNSToSwitchMapping} implementation that reads a 2 column text
|
||||||
|
* file. The columns are separated by whitespace. The first column is a DNS or
|
||||||
|
* IP address and the second column specifies the rack where the address maps.
|
||||||
|
* </p>
|
||||||
|
* <p>
|
||||||
|
* This class uses the configuration parameter {@code
|
||||||
|
* net.topology.table.file.name} to locate the mapping file.
|
||||||
|
* </p>
|
||||||
|
* <p>
|
||||||
|
* Calls to {@link #resolve(List)} will look up the address as defined in the
|
||||||
|
* mapping file. If no entry corresponding to the address is found, the value
|
||||||
|
* {@code /default-rack} is returned.
|
||||||
|
* </p>
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Evolving
|
||||||
|
public class TableMapping extends CachedDNSToSwitchMapping {
|
||||||
|
|
||||||
|
private static final Log LOG = LogFactory.getLog(TableMapping.class);
|
||||||
|
|
||||||
|
public TableMapping() {
|
||||||
|
super(new RawTableMapping());
|
||||||
|
}
|
||||||
|
|
||||||
|
private RawTableMapping getRawMapping() {
|
||||||
|
return (RawTableMapping) rawMapping;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Configuration getConf() {
|
||||||
|
return getRawMapping().getConf();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setConf(Configuration conf) {
|
||||||
|
super.setConf(conf);
|
||||||
|
getRawMapping().setConf(conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static final class RawTableMapping extends Configured
|
||||||
|
implements DNSToSwitchMapping {
|
||||||
|
|
||||||
|
private final Map<String, String> map = new HashMap<String, String>();
|
||||||
|
private boolean initialized = false;
|
||||||
|
|
||||||
|
private synchronized void load() {
|
||||||
|
map.clear();
|
||||||
|
|
||||||
|
String filename = getConf().get(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY, null);
|
||||||
|
if (StringUtils.isBlank(filename)) {
|
||||||
|
LOG.warn(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY + " not configured. "
|
||||||
|
+ NetworkTopology.DEFAULT_RACK + " will be returned.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
BufferedReader reader = null;
|
||||||
|
try {
|
||||||
|
reader = new BufferedReader(new FileReader(filename));
|
||||||
|
String line = reader.readLine();
|
||||||
|
while (line != null) {
|
||||||
|
line = line.trim();
|
||||||
|
if (line.length() != 0 && line.charAt(0) != '#') {
|
||||||
|
String[] columns = line.split("\\s+");
|
||||||
|
if (columns.length == 2) {
|
||||||
|
map.put(columns[0], columns[1]);
|
||||||
|
} else {
|
||||||
|
LOG.warn("Line does not have two columns. Ignoring. " + line);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
line = reader.readLine();
|
||||||
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
LOG.warn(filename + " cannot be read. " + NetworkTopology.DEFAULT_RACK
|
||||||
|
+ " will be returned.", e);
|
||||||
|
map.clear();
|
||||||
|
} finally {
|
||||||
|
if (reader != null) {
|
||||||
|
try {
|
||||||
|
reader.close();
|
||||||
|
} catch (IOException e) {
|
||||||
|
LOG.warn(filename + " cannot be read. "
|
||||||
|
+ NetworkTopology.DEFAULT_RACK + " will be returned.", e);
|
||||||
|
map.clear();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public synchronized List<String> resolve(List<String> names) {
|
||||||
|
if (!initialized) {
|
||||||
|
initialized = true;
|
||||||
|
load();
|
||||||
|
}
|
||||||
|
|
||||||
|
List<String> results = new ArrayList<String>(names.size());
|
||||||
|
for (String name : names) {
|
||||||
|
String result = map.get(name);
|
||||||
|
if (result != null) {
|
||||||
|
results.add(result);
|
||||||
|
} else {
|
||||||
|
results.add(NetworkTopology.DEFAULT_RACK);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return results;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
@ -688,6 +688,19 @@
|
|||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>net.topology.table.file.name</name>
|
||||||
|
<value></value>
|
||||||
|
<description> The file name for a topology file, which is used when the
|
||||||
|
net.topology.script.file.name property is set to
|
||||||
|
org.apache.hadoop.net.TableMapping. The file format is a two column text
|
||||||
|
file, with columns separated by whitespace. The first column is a DNS or
|
||||||
|
IP address and the second column specifies the rack where the address maps.
|
||||||
|
If no entry corresponding to a host in the cluster is found, then
|
||||||
|
/default-rack is assumed.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
|
||||||
<!-- Local file system -->
|
<!-- Local file system -->
|
||||||
<property>
|
<property>
|
||||||
<name>file.stream-buffer-size</name>
|
<name>file.stream-buffer-size</name>
|
||||||
|
@ -0,0 +1,145 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.net;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
|
||||||
|
import com.google.common.base.Charsets;
|
||||||
|
import com.google.common.io.Files;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
public class TestTableMapping {
|
||||||
|
|
||||||
|
private File mappingFile;
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setUp() throws IOException {
|
||||||
|
mappingFile = File.createTempFile(getClass().getSimpleName(), ".txt");
|
||||||
|
Files.write("a.b.c /rack1\n" +
|
||||||
|
"1.2.3\t/rack2\n", mappingFile, Charsets.UTF_8);
|
||||||
|
mappingFile.deleteOnExit();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testResolve() throws IOException {
|
||||||
|
TableMapping mapping = new TableMapping();
|
||||||
|
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY, mappingFile.getCanonicalPath());
|
||||||
|
mapping.setConf(conf);
|
||||||
|
|
||||||
|
List<String> names = new ArrayList<String>();
|
||||||
|
names.add("a.b.c");
|
||||||
|
names.add("1.2.3");
|
||||||
|
|
||||||
|
List<String> result = mapping.resolve(names);
|
||||||
|
assertEquals(names.size(), result.size());
|
||||||
|
assertEquals("/rack1", result.get(0));
|
||||||
|
assertEquals("/rack2", result.get(1));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testTableCaching() throws IOException {
|
||||||
|
TableMapping mapping = new TableMapping();
|
||||||
|
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY, mappingFile.getCanonicalPath());
|
||||||
|
mapping.setConf(conf);
|
||||||
|
|
||||||
|
List<String> names = new ArrayList<String>();
|
||||||
|
names.add("a.b.c");
|
||||||
|
names.add("1.2.3");
|
||||||
|
|
||||||
|
List<String> result1 = mapping.resolve(names);
|
||||||
|
assertEquals(names.size(), result1.size());
|
||||||
|
assertEquals("/rack1", result1.get(0));
|
||||||
|
assertEquals("/rack2", result1.get(1));
|
||||||
|
|
||||||
|
// unset the file, see if it gets read again
|
||||||
|
conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY, "some bad value for a file");
|
||||||
|
|
||||||
|
List<String> result2 = mapping.resolve(names);
|
||||||
|
assertEquals(result1, result2);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testNoFile() {
|
||||||
|
TableMapping mapping = new TableMapping();
|
||||||
|
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
mapping.setConf(conf);
|
||||||
|
|
||||||
|
List<String> names = new ArrayList<String>();
|
||||||
|
names.add("a.b.c");
|
||||||
|
names.add("1.2.3");
|
||||||
|
|
||||||
|
List<String> result = mapping.resolve(names);
|
||||||
|
assertEquals(names.size(), result.size());
|
||||||
|
assertEquals(NetworkTopology.DEFAULT_RACK, result.get(0));
|
||||||
|
assertEquals(NetworkTopology.DEFAULT_RACK, result.get(1));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testFileDoesNotExist() {
|
||||||
|
TableMapping mapping = new TableMapping();
|
||||||
|
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY, "/this/file/does/not/exist");
|
||||||
|
mapping.setConf(conf);
|
||||||
|
|
||||||
|
List<String> names = new ArrayList<String>();
|
||||||
|
names.add("a.b.c");
|
||||||
|
names.add("1.2.3");
|
||||||
|
|
||||||
|
List<String> result = mapping.resolve(names);
|
||||||
|
assertEquals(names.size(), result.size());
|
||||||
|
assertEquals(result.get(0), NetworkTopology.DEFAULT_RACK);
|
||||||
|
assertEquals(result.get(1), NetworkTopology.DEFAULT_RACK);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testBadFile() throws IOException {
|
||||||
|
Files.write("bad contents", mappingFile, Charsets.UTF_8);
|
||||||
|
|
||||||
|
TableMapping mapping = new TableMapping();
|
||||||
|
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY, mappingFile.getCanonicalPath());
|
||||||
|
mapping.setConf(conf);
|
||||||
|
|
||||||
|
List<String> names = new ArrayList<String>();
|
||||||
|
names.add("a.b.c");
|
||||||
|
names.add("1.2.3");
|
||||||
|
|
||||||
|
List<String> result = mapping.resolve(names);
|
||||||
|
assertEquals(names.size(), result.size());
|
||||||
|
assertEquals(result.get(0), NetworkTopology.DEFAULT_RACK);
|
||||||
|
assertEquals(result.get(1), NetworkTopology.DEFAULT_RACK);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user