HDFS-3869. Expose non-file journal manager details in web UI. Contributed by Todd Lipcon.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-3077@1380978 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
8021d9199f
commit
72485f3112
@ -38,3 +38,5 @@ HDFS-3845. Fixes for edge cases in QJM recovery protocol (todd)
|
||||
HDFS-3877. QJM: Provide defaults for dfs.journalnode.*address (eli)
|
||||
|
||||
HDFS-3863. Track last "committed" txid in QJM (todd)
|
||||
|
||||
HDFS-3869. Expose non-file journal manager details in web UI (todd)
|
||||
|
@ -140,4 +140,10 @@ public ListenableFuture<Void> acceptRecovery(SegmentStateProto log,
|
||||
* after this point, and any in-flight RPCs may throw an exception.
|
||||
*/
|
||||
public void close();
|
||||
|
||||
/**
|
||||
* Append an HTML-formatted report for this logger's status to the provided
|
||||
* StringBuilder. This is displayed on the NN web UI.
|
||||
*/
|
||||
public void appendHtmlReport(StringBuilder sb);
|
||||
}
|
||||
|
@ -33,6 +33,7 @@
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.jasper.compiler.JspUtil;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
@ -190,6 +191,24 @@ String getMajorityString() {
|
||||
int size() {
|
||||
return loggers.size();
|
||||
}
|
||||
|
||||
/**
|
||||
* Append an HTML-formatted status readout on the current
|
||||
* state of the underlying loggers.
|
||||
* @param sb the StringBuilder to append to
|
||||
*/
|
||||
void appendHtmlReport(StringBuilder sb) {
|
||||
sb.append("<table class=\"storage\">");
|
||||
sb.append("<thead><tr><td>JN</td><td>Status</td></tr></thead>\n");
|
||||
for (AsyncLogger l : loggers) {
|
||||
sb.append("<tr>");
|
||||
sb.append("<td>" + JspUtil.escapeXml(l.toString()) + "</td>");
|
||||
sb.append("<td>");
|
||||
l.appendHtmlReport(sb);
|
||||
sb.append("</td></tr>\n");
|
||||
}
|
||||
sb.append("</table>");
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the (mutable) list of loggers, for use in tests to
|
||||
|
@ -84,7 +84,12 @@ public class IPCLoggerChannel implements AsyncLogger {
|
||||
* The number of bytes of edits data still in the queue.
|
||||
*/
|
||||
private int queuedEditsSizeBytes = 0;
|
||||
|
||||
|
||||
/**
|
||||
* The highest txid that has been successfully logged on the remote JN.
|
||||
*/
|
||||
private long highestAckedTxId = 0;
|
||||
|
||||
/**
|
||||
* The maximum number of bytes that can be pending in the queue.
|
||||
* This keeps the writer from hitting OOME if one of the loggers
|
||||
@ -262,6 +267,9 @@ public ListenableFuture<Void> sendEdits(
|
||||
public Void call() throws IOException {
|
||||
getProxy().journal(createReqInfo(),
|
||||
segmentTxId, firstTxnId, numTxns, data);
|
||||
synchronized (IPCLoggerChannel.this) {
|
||||
highestAckedTxId = firstTxnId + numTxns - 1;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
});
|
||||
@ -398,4 +406,14 @@ public Void call() throws IOException {
|
||||
public String toString() {
|
||||
return "Channel to journal node " + addr;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void appendHtmlReport(StringBuilder sb) {
|
||||
sb.append("Written txid ").append(highestAckedTxId);
|
||||
long behind = committedTxId - highestAckedTxId;
|
||||
assert behind >= 0;
|
||||
if (behind > 0) {
|
||||
sb.append(" (" + behind + " behind)");
|
||||
}
|
||||
}
|
||||
}
|
@ -109,4 +109,13 @@ protected void flushAndSync() throws IOException {
|
||||
loggers.setCommittedTxId(firstTxToFlush + numReadyTxns - 1);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String generateHtmlReport() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("Writing segment beginning at txid " + segmentTxId + "<br/>\n");
|
||||
loggers.appendHtmlReport(sb);
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -24,6 +24,7 @@
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.jasper.compiler.JspUtil;
|
||||
|
||||
/**
|
||||
* A generic abstract class to support journaling of edits logs into
|
||||
@ -132,4 +133,12 @@ long getTotalSyncTime() {
|
||||
protected long getNumSync() {
|
||||
return numSync;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return a short HTML snippet suitable for describing the current
|
||||
* status of the stream
|
||||
*/
|
||||
public String generateHtmlReport() {
|
||||
return JspUtil.escapeXml(this.toString());
|
||||
}
|
||||
}
|
||||
|
@ -26,6 +26,7 @@
|
||||
import java.util.List;
|
||||
import java.util.PriorityQueue;
|
||||
import java.util.SortedSet;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
@ -147,7 +148,7 @@ JournalManager getManager() {
|
||||
return journal;
|
||||
}
|
||||
|
||||
private boolean isDisabled() {
|
||||
boolean isDisabled() {
|
||||
return disabled;
|
||||
}
|
||||
|
||||
@ -165,8 +166,12 @@ public boolean isRequired() {
|
||||
return required;
|
||||
}
|
||||
}
|
||||
|
||||
private List<JournalAndStream> journals = Lists.newArrayList();
|
||||
|
||||
// COW implementation is necessary since some users (eg the web ui) call
|
||||
// getAllJournalStreams() and then iterate. Since this is rarely
|
||||
// mutated, there is no performance concern.
|
||||
private List<JournalAndStream> journals =
|
||||
new CopyOnWriteArrayList<JournalSet.JournalAndStream>();
|
||||
final int minimumRedundantJournals;
|
||||
|
||||
JournalSet(int minimumRedundantResources) {
|
||||
@ -519,7 +524,6 @@ public void apply(JournalAndStream jas) throws IOException {
|
||||
}
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
List<JournalAndStream> getAllJournalStreams() {
|
||||
return journals;
|
||||
}
|
||||
|
@ -48,6 +48,7 @@
|
||||
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||
import org.apache.hadoop.http.HttpConfig;
|
||||
import org.apache.hadoop.io.Text;
|
||||
@ -61,6 +62,8 @@
|
||||
import org.apache.hadoop.util.VersionInfo;
|
||||
import org.znerd.xmlenc.XMLOutputter;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
class NamenodeJspHelper {
|
||||
static String getSafeModeText(FSNamesystem fsn) {
|
||||
if (!fsn.isInSafeMode())
|
||||
@ -213,6 +216,52 @@ void generateConfReport(JspWriter out, NameNode nn,
|
||||
|
||||
out.print("</table></div>\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate an HTML report containing the current status of the HDFS
|
||||
* journals.
|
||||
*/
|
||||
void generateJournalReport(JspWriter out, NameNode nn,
|
||||
HttpServletRequest request) throws IOException {
|
||||
FSEditLog log = nn.getFSImage().getEditLog();
|
||||
Preconditions.checkArgument(log != null, "no edit log set in %s", nn);
|
||||
|
||||
out.println("<h3> " + nn.getRole() + " Journal Status: </h3>");
|
||||
|
||||
out.println("<b>Current transaction ID:</b> " +
|
||||
nn.getFSImage().getLastAppliedOrWrittenTxId() + "<br/>");
|
||||
|
||||
|
||||
boolean openForWrite = log.isOpenForWrite();
|
||||
|
||||
out.println("<div class=\"dfstable\">");
|
||||
out.println("<table class=\"storage\" title=\"NameNode Journals\">\n"
|
||||
+ "<thead><tr><td><b>Journal Manager</b></td><td><b>State</b></td></tr></thead>");
|
||||
for (JournalAndStream jas : log.getJournals()) {
|
||||
out.print("<tr>");
|
||||
out.print("<td>" + jas.getManager());
|
||||
if (jas.isRequired()) {
|
||||
out.print(" [required]");
|
||||
}
|
||||
out.print("</td><td>");
|
||||
|
||||
if (jas.isDisabled()) {
|
||||
out.print("<span class=\"failed\">Failed</span>");
|
||||
} else if (openForWrite) {
|
||||
EditLogOutputStream elos = jas.getCurrentStream();
|
||||
if (elos != null) {
|
||||
out.println(elos.generateHtmlReport());
|
||||
} else {
|
||||
out.println("not currently writing");
|
||||
}
|
||||
} else {
|
||||
out.println("open for read");
|
||||
}
|
||||
out.println("</td></tr>");
|
||||
}
|
||||
|
||||
out.println("</table></div>");
|
||||
}
|
||||
|
||||
void generateHealthReport(JspWriter out, NameNode nn,
|
||||
HttpServletRequest request) throws IOException {
|
||||
|
@ -60,8 +60,10 @@
|
||||
<%= NamenodeJspHelper.getCorruptFilesWarning(fsn)%>
|
||||
|
||||
<% healthjsp.generateHealthReport(out, nn, request); %>
|
||||
<hr>
|
||||
<% healthjsp.generateJournalReport(out, nn, request); %>
|
||||
<hr/>
|
||||
<% healthjsp.generateConfReport(out, nn, request); %>
|
||||
<hr>
|
||||
<%
|
||||
out.println(ServletUtil.htmlFooter());
|
||||
%>
|
||||
|
@ -17,17 +17,18 @@
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.qjournal;
|
||||
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
@ -185,4 +186,41 @@ public void testMismatchedNNIsRejected() throws Exception {
|
||||
"Unable to start log segment 1: too few journals", ioe);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testWebPageHasQjmInfo() throws Exception {
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
|
||||
MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image");
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
|
||||
mjc.getQuorumJournalURI("myjournal").toString());
|
||||
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
||||
.numDataNodes(0)
|
||||
.manageNameDfsDirs(false)
|
||||
.build();
|
||||
try {
|
||||
URL url = new URL("http://localhost:"
|
||||
+ NameNode.getHttpAddress(cluster.getConfiguration(0)).getPort()
|
||||
+ "/dfshealth.jsp");
|
||||
|
||||
cluster.getFileSystem().mkdirs(TEST_PATH);
|
||||
|
||||
String contents = DFSTestUtil.urlGet(url);
|
||||
assertTrue(contents.contains("Channel to journal node"));
|
||||
assertTrue(contents.contains("Written txid 2"));
|
||||
|
||||
// Stop one JN, do another txn, and make sure it shows as behind
|
||||
// stuck behind the others.
|
||||
mjc.getJournalNode(0).stopAndJoin(0);
|
||||
|
||||
cluster.getFileSystem().delete(TEST_PATH, true);
|
||||
|
||||
contents = DFSTestUtil.urlGet(url);
|
||||
System.out.println(contents);
|
||||
assertTrue(contents.contains("(1 behind)"));
|
||||
} finally {
|
||||
cluster.shutdown();
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user